problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_20456 | rasdani/github-patches | git_diff | translate__pootle-5699 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Lost timeSince data for suggestions
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pootle/apps/pootle_misc/templatetags/locale.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 from django import template
10 from django.utils.formats import get_format
11 from django.utils.translation import trans_real
12
13 from pootle.core.utils import dateformat
14
15
16 register = template.Library()
17
18
19 @register.simple_tag
20 def locale_dir():
21 """Returns current locale's direction."""
22 return trans_real.get_language_bidi() and "rtl" or "ltr"
23
24
25 @register.filter(name='dateformat')
26 def do_dateformat(value, use_format='c'):
27 """Formats a `value` date using `format`.
28
29 :param value: a datetime object.
30 :param use_format: a format string accepted by
31 :func:`django.utils.formats.get_format` or
32 :func:`django.utils.dateformat.format`. If none is set, the current
33 locale's default format will be used.
34 """
35 try:
36 use_format = get_format(use_format)
37 except AttributeError:
38 pass
39
40 return dateformat.format(value, use_format)
41
42
43 @register.simple_tag
44 def locale_align():
45 """Returns current locale's default alignment."""
46 return trans_real.get_language_bidi() and "right" or "left"
47
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pootle/apps/pootle_misc/templatetags/locale.py b/pootle/apps/pootle_misc/templatetags/locale.py
--- a/pootle/apps/pootle_misc/templatetags/locale.py
+++ b/pootle/apps/pootle_misc/templatetags/locale.py
@@ -6,11 +6,14 @@
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
+import calendar
+
from django import template
from django.utils.formats import get_format
from django.utils.translation import trans_real
from pootle.core.utils import dateformat
+from pootle.local.dates import timesince
register = template.Library()
@@ -40,6 +43,11 @@
return dateformat.format(value, use_format)
[email protected](name='relative_datetime_format')
+def do_relative_datetime_format(value):
+ return timesince(calendar.timegm(value.timetuple()))
+
+
@register.simple_tag
def locale_align():
"""Returns current locale's default alignment."""
| {"golden_diff": "diff --git a/pootle/apps/pootle_misc/templatetags/locale.py b/pootle/apps/pootle_misc/templatetags/locale.py\n--- a/pootle/apps/pootle_misc/templatetags/locale.py\n+++ b/pootle/apps/pootle_misc/templatetags/locale.py\n@@ -6,11 +6,14 @@\n # or later license. See the LICENSE file for a copy of the license and the\n # AUTHORS file for copyright and authorship information.\n \n+import calendar\n+\n from django import template\n from django.utils.formats import get_format\n from django.utils.translation import trans_real\n \n from pootle.core.utils import dateformat\n+from pootle.local.dates import timesince\n \n \n register = template.Library()\n@@ -40,6 +43,11 @@\n return dateformat.format(value, use_format)\n \n \[email protected](name='relative_datetime_format')\n+def do_relative_datetime_format(value):\n+ return timesince(calendar.timegm(value.timetuple()))\n+\n+\n @register.simple_tag\n def locale_align():\n \"\"\"Returns current locale's default alignment.\"\"\"\n", "issue": "Lost timeSince data for suggestions\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django import template\nfrom django.utils.formats import get_format\nfrom django.utils.translation import trans_real\n\nfrom pootle.core.utils import dateformat\n\n\nregister = template.Library()\n\n\[email protected]_tag\ndef locale_dir():\n \"\"\"Returns current locale's direction.\"\"\"\n return trans_real.get_language_bidi() and \"rtl\" or \"ltr\"\n\n\[email protected](name='dateformat')\ndef do_dateformat(value, use_format='c'):\n \"\"\"Formats a `value` date using `format`.\n\n :param value: a datetime object.\n :param use_format: a format string accepted by\n :func:`django.utils.formats.get_format` or\n :func:`django.utils.dateformat.format`. If none is set, the current\n locale's default format will be used.\n \"\"\"\n try:\n use_format = get_format(use_format)\n except AttributeError:\n pass\n\n return dateformat.format(value, use_format)\n\n\[email protected]_tag\ndef locale_align():\n \"\"\"Returns current locale's default alignment.\"\"\"\n return trans_real.get_language_bidi() and \"right\" or \"left\"\n", "path": "pootle/apps/pootle_misc/templatetags/locale.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport calendar\n\nfrom django import template\nfrom django.utils.formats import get_format\nfrom django.utils.translation import trans_real\n\nfrom pootle.core.utils import dateformat\nfrom pootle.local.dates import timesince\n\n\nregister = template.Library()\n\n\[email protected]_tag\ndef locale_dir():\n \"\"\"Returns current locale's direction.\"\"\"\n return trans_real.get_language_bidi() and \"rtl\" or \"ltr\"\n\n\[email protected](name='dateformat')\ndef do_dateformat(value, use_format='c'):\n \"\"\"Formats a `value` date using `format`.\n\n :param value: a datetime object.\n :param use_format: a format string accepted by\n :func:`django.utils.formats.get_format` or\n :func:`django.utils.dateformat.format`. If none is set, the current\n locale's default format will be used.\n \"\"\"\n try:\n use_format = get_format(use_format)\n except AttributeError:\n pass\n\n return dateformat.format(value, use_format)\n\n\[email protected](name='relative_datetime_format')\ndef do_relative_datetime_format(value):\n return timesince(calendar.timegm(value.timetuple()))\n\n\[email protected]_tag\ndef locale_align():\n \"\"\"Returns current locale's default alignment.\"\"\"\n return trans_real.get_language_bidi() and \"right\" or \"left\"\n", "path": "pootle/apps/pootle_misc/templatetags/locale.py"}]} | 671 | 243 |
gh_patches_debug_1597 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-915 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix a few issues in Django example
The Django example has a few issues, fix them.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/examples/django/pages/views.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from django.http import HttpResponse
15
16 from opentelemetry import trace
17 from opentelemetry.sdk.trace import TracerProvider
18 from opentelemetry.sdk.trace.export import (
19 ConsoleSpanExporter,
20 SimpleExportSpanProcessor,
21 )
22
23 trace.set_tracer_provider(TracerProvider())
24 tracer = trace.get_tracer_provider().get_tracer(__name__)
25
26 trace.get_tracer_provider().add_span_processor(
27 SimpleExportSpanProcessor(ConsoleSpanExporter())
28 )
29
30
31 def home_page_view(request):
32 return HttpResponse("Hello, world")
33
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/examples/django/pages/views.py b/docs/examples/django/pages/views.py
--- a/docs/examples/django/pages/views.py
+++ b/docs/examples/django/pages/views.py
@@ -21,7 +21,6 @@
)
trace.set_tracer_provider(TracerProvider())
-tracer = trace.get_tracer_provider().get_tracer(__name__)
trace.get_tracer_provider().add_span_processor(
SimpleExportSpanProcessor(ConsoleSpanExporter())
| {"golden_diff": "diff --git a/docs/examples/django/pages/views.py b/docs/examples/django/pages/views.py\n--- a/docs/examples/django/pages/views.py\n+++ b/docs/examples/django/pages/views.py\n@@ -21,7 +21,6 @@\n )\n \n trace.set_tracer_provider(TracerProvider())\n-tracer = trace.get_tracer_provider().get_tracer(__name__)\n \n trace.get_tracer_provider().add_span_processor(\n SimpleExportSpanProcessor(ConsoleSpanExporter())\n", "issue": "Fix a few issues in Django example\nThe Django example has a few issues, fix them.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom django.http import HttpResponse\n\nfrom opentelemetry import trace\nfrom opentelemetry.sdk.trace import TracerProvider\nfrom opentelemetry.sdk.trace.export import (\n ConsoleSpanExporter,\n SimpleExportSpanProcessor,\n)\n\ntrace.set_tracer_provider(TracerProvider())\ntracer = trace.get_tracer_provider().get_tracer(__name__)\n\ntrace.get_tracer_provider().add_span_processor(\n SimpleExportSpanProcessor(ConsoleSpanExporter())\n)\n\n\ndef home_page_view(request):\n return HttpResponse(\"Hello, world\")\n", "path": "docs/examples/django/pages/views.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom django.http import HttpResponse\n\nfrom opentelemetry import trace\nfrom opentelemetry.sdk.trace import TracerProvider\nfrom opentelemetry.sdk.trace.export import (\n ConsoleSpanExporter,\n SimpleExportSpanProcessor,\n)\n\ntrace.set_tracer_provider(TracerProvider())\n\ntrace.get_tracer_provider().add_span_processor(\n SimpleExportSpanProcessor(ConsoleSpanExporter())\n)\n\n\ndef home_page_view(request):\n return HttpResponse(\"Hello, world\")\n", "path": "docs/examples/django/pages/views.py"}]} | 573 | 99 |
gh_patches_debug_21112 | rasdani/github-patches | git_diff | bridgecrewio__checkov-5171 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Checkov v2.3.261 fails with CKV_AWS_356 for KMS actions which must specify 'all resources'
**Describe the issue**
Checkov v2.3.261 fails with CKV_AWS_356 highlights IAM policies which are overly permissive but is incorrectly identifying actions for KMS policies which need to be for all resources potentially scoped with conditional access per https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-services.html
Similar issue for https://github.com/bridgecrewio/checkov/issues/5134 where certain actions like 'list' require all resources.
**Examples**
```
data "aws_iam_policy_document" "myKmsKey" {
actions = [
"kms:GenerateDataKey",
"kms:Decrypt"
]
resources = [
"*"
]
condition {
test = "ArnEquals"
variable = "aws:SourceArn"
values = [
<SOME OTHER RESOURCE>.arn
]
}
}
}
```
**Version (please complete the following information):**
- Checkov Version 2.3.261
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/terraform/checks/utils/iam_terraform_document_to_policy_converter.py`
Content:
```
1 from typing import Dict, List, Any
2
3 from checkov.common.util.data_structures_utils import pickle_deepcopy
4
5
6 def convert_terraform_conf_to_iam_policy(conf: Dict[str, List[Dict[str, Any]]]) -> Dict[str, List[Dict[str, Any]]]:
7 """
8 converts terraform parsed configuration to iam policy document
9 """
10 result = pickle_deepcopy(conf)
11 if "statement" in result.keys():
12 result["Statement"] = result.pop("statement")
13 for statement in result["Statement"]:
14 if "actions" in statement:
15 statement["Action"] = statement.pop("actions")[0]
16 if "resources" in statement:
17 statement["Resource"] = statement.pop("resources")[0]
18 if "not_actions" in statement:
19 statement["NotAction"] = statement.pop("not_actions")[0]
20 if "not_resources" in statement:
21 statement["NotResource"] = statement.pop("not_resources")[0]
22 if "effect" in statement:
23 statement["Effect"] = statement.pop("effect")[0]
24 if "effect" not in statement and "Effect" not in statement:
25 statement["Effect"] = "Allow"
26 return result
27
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/terraform/checks/utils/iam_terraform_document_to_policy_converter.py b/checkov/terraform/checks/utils/iam_terraform_document_to_policy_converter.py
--- a/checkov/terraform/checks/utils/iam_terraform_document_to_policy_converter.py
+++ b/checkov/terraform/checks/utils/iam_terraform_document_to_policy_converter.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
from typing import Dict, List, Any
from checkov.common.util.data_structures_utils import pickle_deepcopy
@@ -23,4 +25,13 @@
statement["Effect"] = statement.pop("effect")[0]
if "effect" not in statement and "Effect" not in statement:
statement["Effect"] = "Allow"
+ if "condition" in statement:
+ conditions = statement.pop("condition")
+ if conditions and isinstance(conditions, list):
+ statement["Condition"] = {}
+ for condition in conditions:
+ cond_operator = condition["test"][0]
+ cond_key = condition["variable"][0]
+ cond_value = condition["values"][0]
+ statement["Condition"].setdefault(cond_operator, {})[cond_key] = cond_value
return result
| {"golden_diff": "diff --git a/checkov/terraform/checks/utils/iam_terraform_document_to_policy_converter.py b/checkov/terraform/checks/utils/iam_terraform_document_to_policy_converter.py\n--- a/checkov/terraform/checks/utils/iam_terraform_document_to_policy_converter.py\n+++ b/checkov/terraform/checks/utils/iam_terraform_document_to_policy_converter.py\n@@ -1,3 +1,5 @@\n+from __future__ import annotations\n+\n from typing import Dict, List, Any\n \n from checkov.common.util.data_structures_utils import pickle_deepcopy\n@@ -23,4 +25,13 @@\n statement[\"Effect\"] = statement.pop(\"effect\")[0]\n if \"effect\" not in statement and \"Effect\" not in statement:\n statement[\"Effect\"] = \"Allow\"\n+ if \"condition\" in statement:\n+ conditions = statement.pop(\"condition\")\n+ if conditions and isinstance(conditions, list):\n+ statement[\"Condition\"] = {}\n+ for condition in conditions:\n+ cond_operator = condition[\"test\"][0]\n+ cond_key = condition[\"variable\"][0]\n+ cond_value = condition[\"values\"][0]\n+ statement[\"Condition\"].setdefault(cond_operator, {})[cond_key] = cond_value\n return result\n", "issue": "Checkov v2.3.261 fails with CKV_AWS_356 for KMS actions which must specify 'all resources'\n**Describe the issue**\r\nCheckov v2.3.261 fails with CKV_AWS_356 highlights IAM policies which are overly permissive but is incorrectly identifying actions for KMS policies which need to be for all resources potentially scoped with conditional access per https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-services.html\r\n\r\nSimilar issue for https://github.com/bridgecrewio/checkov/issues/5134 where certain actions like 'list' require all resources.\r\n\r\n**Examples**\r\n```\r\ndata \"aws_iam_policy_document\" \"myKmsKey\" {\r\n actions = [\r\n \"kms:GenerateDataKey\",\r\n \"kms:Decrypt\"\r\n ]\r\n resources = [\r\n \"*\"\r\n ]\r\n\r\n condition {\r\n test = \"ArnEquals\"\r\n variable = \"aws:SourceArn\"\r\n values = [\r\n <SOME OTHER RESOURCE>.arn\r\n ]\r\n }\r\n }\r\n}\r\n```\r\n**Version (please complete the following information):**\r\n - Checkov Version 2.3.261\r\n\n", "before_files": [{"content": "from typing import Dict, List, Any\n\nfrom checkov.common.util.data_structures_utils import pickle_deepcopy\n\n\ndef convert_terraform_conf_to_iam_policy(conf: Dict[str, List[Dict[str, Any]]]) -> Dict[str, List[Dict[str, Any]]]:\n \"\"\"\n converts terraform parsed configuration to iam policy document\n \"\"\"\n result = pickle_deepcopy(conf)\n if \"statement\" in result.keys():\n result[\"Statement\"] = result.pop(\"statement\")\n for statement in result[\"Statement\"]:\n if \"actions\" in statement:\n statement[\"Action\"] = statement.pop(\"actions\")[0]\n if \"resources\" in statement:\n statement[\"Resource\"] = statement.pop(\"resources\")[0]\n if \"not_actions\" in statement:\n statement[\"NotAction\"] = statement.pop(\"not_actions\")[0]\n if \"not_resources\" in statement:\n statement[\"NotResource\"] = statement.pop(\"not_resources\")[0]\n if \"effect\" in statement:\n statement[\"Effect\"] = statement.pop(\"effect\")[0]\n if \"effect\" not in statement and \"Effect\" not in statement:\n statement[\"Effect\"] = \"Allow\"\n return result\n", "path": "checkov/terraform/checks/utils/iam_terraform_document_to_policy_converter.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import Dict, List, Any\n\nfrom checkov.common.util.data_structures_utils import pickle_deepcopy\n\n\ndef convert_terraform_conf_to_iam_policy(conf: Dict[str, List[Dict[str, Any]]]) -> Dict[str, List[Dict[str, Any]]]:\n \"\"\"\n converts terraform parsed configuration to iam policy document\n \"\"\"\n result = pickle_deepcopy(conf)\n if \"statement\" in result.keys():\n result[\"Statement\"] = result.pop(\"statement\")\n for statement in result[\"Statement\"]:\n if \"actions\" in statement:\n statement[\"Action\"] = statement.pop(\"actions\")[0]\n if \"resources\" in statement:\n statement[\"Resource\"] = statement.pop(\"resources\")[0]\n if \"not_actions\" in statement:\n statement[\"NotAction\"] = statement.pop(\"not_actions\")[0]\n if \"not_resources\" in statement:\n statement[\"NotResource\"] = statement.pop(\"not_resources\")[0]\n if \"effect\" in statement:\n statement[\"Effect\"] = statement.pop(\"effect\")[0]\n if \"effect\" not in statement and \"Effect\" not in statement:\n statement[\"Effect\"] = \"Allow\"\n if \"condition\" in statement:\n conditions = statement.pop(\"condition\")\n if conditions and isinstance(conditions, list):\n statement[\"Condition\"] = {}\n for condition in conditions:\n cond_operator = condition[\"test\"][0]\n cond_key = condition[\"variable\"][0]\n cond_value = condition[\"values\"][0]\n statement[\"Condition\"].setdefault(cond_operator, {})[cond_key] = cond_value\n return result\n", "path": "checkov/terraform/checks/utils/iam_terraform_document_to_policy_converter.py"}]} | 818 | 270 |
gh_patches_debug_8908 | rasdani/github-patches | git_diff | facebookresearch__ParlAI-371 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Slow loading of image features during training
For VQA tasks, it takes massive amount of time to load the image features for training. The overhead is so much that for the same model it increases the training time by a factor of 50.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parlai/core/image_featurizers.py`
Content:
```
1 # All rights reserved.
2 # This source code is licensed under the BSD-style license found in the
3 # LICENSE file in the root directory of this source tree. An additional grant
4 # of patent rights can be found in the PATENTS file in the same directory.
5
6 import parlai.core.build_data as build_data
7
8 import os
9 import copy
10 import numpy as np
11 from PIL import Image
12
13 _greyscale = ' .,:;crsA23hHG#98&@'
14
15 class ImageLoader():
16 """Extract image feature using pretrained CNN network.
17 """
18 def __init__(self, opt):
19 self.opt = copy.deepcopy(opt)
20 self.netCNN = None
21
22 def init_cnn(self):
23 """Lazy initialization of preprocessor model in case we don't need any image preprocessing."""
24 try:
25 import torch
26 except ModuleNotFoundError:
27 raise ModuleNotFoundError('Need to install Pytorch: go to pytorch.org')
28 from torch.autograd import Variable
29 import torchvision
30 import torchvision.transforms as transforms
31 import torch.nn as nn
32
33 opt = self.opt
34 self.image_size = opt['image_size']
35 self.crop_size = opt['image_cropsize']
36 self.datatype = opt['datatype']
37 self.image_mode = opt['image_mode']
38
39 opt['cuda'] = not opt['no_cuda'] and torch.cuda.is_available()
40 self.use_cuda = opt['cuda']
41
42 if self.use_cuda:
43 print('[ Using CUDA ]')
44 torch.cuda.set_device(opt['gpu'])
45
46 cnn_type, layer_num = self.image_mode_switcher()
47
48 # initialize the pretrained CNN using pytorch.
49 CNN = getattr(torchvision.models, cnn_type)
50
51 # cut off the additional layer.
52 self.netCNN = nn.Sequential(*list(CNN(pretrained=True).children())[:layer_num])
53
54 # initialize the transform function using torch vision.
55 self.transform = transforms.Compose([
56 transforms.Scale(self.image_size),
57 transforms.CenterCrop(self.crop_size),
58 transforms.ToTensor(),
59 transforms.Normalize(mean=[0.485, 0.456, 0.406],
60 std=[0.229, 0.224, 0.225])
61 ])
62
63 # container for single image
64 self.xs = torch.FloatTensor(1, 3, self.crop_size, self.crop_size).fill_(0)
65
66 if self.use_cuda:
67 self.cuda()
68 self.xs = self.xs.cuda()
69
70 # make self.xs variable.
71 self.xs = Variable(self.xs)
72
73 def cuda(self):
74 self.netCNN.cuda()
75
76 def save(self, feature, path):
77 np.save(path, feature)
78
79 def image_mode_switcher(self):
80 switcher = {
81 'resnet152': ['resnet152', -1],
82 'resnet101': ['resnet101', -1],
83 'resnet50': ['resnet50', -1],
84 'resnet34': ['resnet34', -1],
85 'resnet18': ['resnet18', -1],
86 'resnet152_spatial': ['resnet152', -2],
87 'resnet101_spatial': ['resnet101', -2],
88 'resnet50_spatial': ['resnet50', -2],
89 'resnet34_spatial': ['resnet34', -2],
90 'resnet18_spatial': ['resnet18', -2],
91 }
92
93 if self.image_mode not in switcher:
94 raise NotImplementedError('image preprocessing mode' +
95 '{} not supported yet'.format(self.image_mode))
96
97 return switcher.get(self.image_mode)
98
99 def extract(self, image, path):
100 # check whether initlize CNN network.
101 if not self.netCNN:
102 self.init_cnn()
103
104 self.xs.data.copy_(self.transform(image))
105 # extract the image feature
106 feature = self.netCNN(self.xs)
107 feature = feature.cpu().data.numpy()
108 # save the feature
109 self.save(feature, path)
110 return feature
111
112 def img_to_ascii(self, path):
113 im = Image.open(path)
114 im.thumbnail((60, 40), Image.BICUBIC)
115 im = im.convert('L')
116 asc = []
117 for y in range(0, im.size[1]):
118 for x in range(0, im.size[0]):
119 lum = 255 - im.getpixel((x, y))
120 asc.append(_greyscale[lum * len(_greyscale) // 256])
121 asc.append('\n')
122 return ''.join(asc)
123
124 def load(self, path):
125 opt = self.opt
126 mode = opt.get('image_mode', 'raw')
127 if mode is None or mode == 'none':
128 # don't need to load images
129 return None
130 elif mode == 'raw':
131 # raw just returns RGB values
132 return Image.open(path).convert('RGB')
133 elif mode == 'ascii':
134 # convert images to ascii ¯\_(ツ)_/¯
135 return self.img_to_ascii(path)
136 else:
137 # otherwise, looks for preprocessed version under 'mode' directory
138 prepath, imagefn = os.path.split(path)
139
140 dpath = os.path.join(prepath, mode)
141
142 if not os.path.exists(dpath):
143 build_data.make_dir(dpath)
144
145 imagefn = imagefn.split('.')[0]
146 imagefn = imagefn + '.npy'
147 new_path = os.path.join(prepath, mode, imagefn)
148
149 if not os.path.isfile(new_path):
150 return self.extract(Image.open(path).convert('RGB'), new_path)
151 else:
152 return np.load(new_path)
153
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parlai/core/image_featurizers.py b/parlai/core/image_featurizers.py
--- a/parlai/core/image_featurizers.py
+++ b/parlai/core/image_featurizers.py
@@ -9,6 +9,7 @@
import copy
import numpy as np
from PIL import Image
+from functools import lru_cache
_greyscale = ' .,:;crsA23hHG#98&@'
@@ -121,6 +122,7 @@
asc.append('\n')
return ''.join(asc)
+ @lru_cache(maxsize=None)
def load(self, path):
opt = self.opt
mode = opt.get('image_mode', 'raw')
| {"golden_diff": "diff --git a/parlai/core/image_featurizers.py b/parlai/core/image_featurizers.py\n--- a/parlai/core/image_featurizers.py\n+++ b/parlai/core/image_featurizers.py\n@@ -9,6 +9,7 @@\n import copy\n import numpy as np\n from PIL import Image\n+from functools import lru_cache\n \n _greyscale = ' .,:;crsA23hHG#98&@'\n \n@@ -121,6 +122,7 @@\n asc.append('\\n')\n return ''.join(asc)\n \n+ @lru_cache(maxsize=None)\n def load(self, path):\n opt = self.opt\n mode = opt.get('image_mode', 'raw')\n", "issue": "Slow loading of image features during training\nFor VQA tasks, it takes massive amount of time to load the image features for training. The overhead is so much that for the same model it increases the training time by a factor of 50.\n", "before_files": [{"content": "# All rights reserved.\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree. An additional grant\n# of patent rights can be found in the PATENTS file in the same directory.\n\nimport parlai.core.build_data as build_data\n\nimport os\nimport copy\nimport numpy as np\nfrom PIL import Image\n\n_greyscale = ' .,:;crsA23hHG#98&@'\n\nclass ImageLoader():\n \"\"\"Extract image feature using pretrained CNN network.\n \"\"\"\n def __init__(self, opt):\n self.opt = copy.deepcopy(opt)\n self.netCNN = None\n\n def init_cnn(self):\n \"\"\"Lazy initialization of preprocessor model in case we don't need any image preprocessing.\"\"\"\n try:\n import torch\n except ModuleNotFoundError:\n raise ModuleNotFoundError('Need to install Pytorch: go to pytorch.org')\n from torch.autograd import Variable\n import torchvision\n import torchvision.transforms as transforms\n import torch.nn as nn\n\n opt = self.opt\n self.image_size = opt['image_size']\n self.crop_size = opt['image_cropsize']\n self.datatype = opt['datatype']\n self.image_mode = opt['image_mode']\n\n opt['cuda'] = not opt['no_cuda'] and torch.cuda.is_available()\n self.use_cuda = opt['cuda']\n\n if self.use_cuda:\n print('[ Using CUDA ]')\n torch.cuda.set_device(opt['gpu'])\n\n cnn_type, layer_num = self.image_mode_switcher()\n\n # initialize the pretrained CNN using pytorch.\n CNN = getattr(torchvision.models, cnn_type)\n\n # cut off the additional layer.\n self.netCNN = nn.Sequential(*list(CNN(pretrained=True).children())[:layer_num])\n\n # initialize the transform function using torch vision.\n self.transform = transforms.Compose([\n transforms.Scale(self.image_size),\n transforms.CenterCrop(self.crop_size),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n\n # container for single image\n self.xs = torch.FloatTensor(1, 3, self.crop_size, self.crop_size).fill_(0)\n\n if self.use_cuda:\n self.cuda()\n self.xs = self.xs.cuda()\n\n # make self.xs variable.\n self.xs = Variable(self.xs)\n\n def cuda(self):\n self.netCNN.cuda()\n\n def save(self, feature, path):\n np.save(path, feature)\n\n def image_mode_switcher(self):\n switcher = {\n 'resnet152': ['resnet152', -1],\n 'resnet101': ['resnet101', -1],\n 'resnet50': ['resnet50', -1],\n 'resnet34': ['resnet34', -1],\n 'resnet18': ['resnet18', -1],\n 'resnet152_spatial': ['resnet152', -2],\n 'resnet101_spatial': ['resnet101', -2],\n 'resnet50_spatial': ['resnet50', -2],\n 'resnet34_spatial': ['resnet34', -2],\n 'resnet18_spatial': ['resnet18', -2],\n }\n\n if self.image_mode not in switcher:\n raise NotImplementedError('image preprocessing mode' +\n '{} not supported yet'.format(self.image_mode))\n\n return switcher.get(self.image_mode)\n\n def extract(self, image, path):\n # check whether initlize CNN network.\n if not self.netCNN:\n self.init_cnn()\n\n self.xs.data.copy_(self.transform(image))\n # extract the image feature\n feature = self.netCNN(self.xs)\n feature = feature.cpu().data.numpy()\n # save the feature\n self.save(feature, path)\n return feature\n\n def img_to_ascii(self, path):\n im = Image.open(path)\n im.thumbnail((60, 40), Image.BICUBIC)\n im = im.convert('L')\n asc = []\n for y in range(0, im.size[1]):\n for x in range(0, im.size[0]):\n lum = 255 - im.getpixel((x, y))\n asc.append(_greyscale[lum * len(_greyscale) // 256])\n asc.append('\\n')\n return ''.join(asc)\n\n def load(self, path):\n opt = self.opt\n mode = opt.get('image_mode', 'raw')\n if mode is None or mode == 'none':\n # don't need to load images\n return None\n elif mode == 'raw':\n # raw just returns RGB values\n return Image.open(path).convert('RGB')\n elif mode == 'ascii':\n # convert images to ascii \u00af\\_(\u30c4)_/\u00af\n return self.img_to_ascii(path)\n else:\n # otherwise, looks for preprocessed version under 'mode' directory\n prepath, imagefn = os.path.split(path)\n\n dpath = os.path.join(prepath, mode)\n\n if not os.path.exists(dpath):\n build_data.make_dir(dpath)\n\n imagefn = imagefn.split('.')[0]\n imagefn = imagefn + '.npy'\n new_path = os.path.join(prepath, mode, imagefn)\n\n if not os.path.isfile(new_path):\n return self.extract(Image.open(path).convert('RGB'), new_path)\n else:\n return np.load(new_path)\n", "path": "parlai/core/image_featurizers.py"}], "after_files": [{"content": "# All rights reserved.\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree. An additional grant\n# of patent rights can be found in the PATENTS file in the same directory.\n\nimport parlai.core.build_data as build_data\n\nimport os\nimport copy\nimport numpy as np\nfrom PIL import Image\nfrom functools import lru_cache\n\n_greyscale = ' .,:;crsA23hHG#98&@'\n\nclass ImageLoader():\n \"\"\"Extract image feature using pretrained CNN network.\n \"\"\"\n def __init__(self, opt):\n self.opt = copy.deepcopy(opt)\n self.netCNN = None\n\n def init_cnn(self):\n \"\"\"Lazy initialization of preprocessor model in case we don't need any image preprocessing.\"\"\"\n try:\n import torch\n except ModuleNotFoundError:\n raise ModuleNotFoundError('Need to install Pytorch: go to pytorch.org')\n from torch.autograd import Variable\n import torchvision\n import torchvision.transforms as transforms\n import torch.nn as nn\n\n opt = self.opt\n self.image_size = opt['image_size']\n self.crop_size = opt['image_cropsize']\n self.datatype = opt['datatype']\n self.image_mode = opt['image_mode']\n\n opt['cuda'] = not opt['no_cuda'] and torch.cuda.is_available()\n self.use_cuda = opt['cuda']\n\n if self.use_cuda:\n print('[ Using CUDA ]')\n torch.cuda.set_device(opt['gpu'])\n\n cnn_type, layer_num = self.image_mode_switcher()\n\n # initialize the pretrained CNN using pytorch.\n CNN = getattr(torchvision.models, cnn_type)\n\n # cut off the additional layer.\n self.netCNN = nn.Sequential(*list(CNN(pretrained=True).children())[:layer_num])\n\n # initialize the transform function using torch vision.\n self.transform = transforms.Compose([\n transforms.Scale(self.image_size),\n transforms.CenterCrop(self.crop_size),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n\n # container for single image\n self.xs = torch.FloatTensor(1, 3, self.crop_size, self.crop_size).fill_(0)\n\n if self.use_cuda:\n self.cuda()\n self.xs = self.xs.cuda()\n\n # make self.xs variable.\n self.xs = Variable(self.xs)\n\n def cuda(self):\n self.netCNN.cuda()\n\n def save(self, feature, path):\n np.save(path, feature)\n\n def image_mode_switcher(self):\n switcher = {\n 'resnet152': ['resnet152', -1],\n 'resnet101': ['resnet101', -1],\n 'resnet50': ['resnet50', -1],\n 'resnet34': ['resnet34', -1],\n 'resnet18': ['resnet18', -1],\n 'resnet152_spatial': ['resnet152', -2],\n 'resnet101_spatial': ['resnet101', -2],\n 'resnet50_spatial': ['resnet50', -2],\n 'resnet34_spatial': ['resnet34', -2],\n 'resnet18_spatial': ['resnet18', -2],\n }\n\n if self.image_mode not in switcher:\n raise NotImplementedError('image preprocessing mode' +\n '{} not supported yet'.format(self.image_mode))\n\n return switcher.get(self.image_mode)\n\n def extract(self, image, path):\n # check whether initlize CNN network.\n if not self.netCNN:\n self.init_cnn()\n\n self.xs.data.copy_(self.transform(image))\n # extract the image feature\n feature = self.netCNN(self.xs)\n feature = feature.cpu().data.numpy()\n # save the feature\n self.save(feature, path)\n return feature\n\n def img_to_ascii(self, path):\n im = Image.open(path)\n im.thumbnail((60, 40), Image.BICUBIC)\n im = im.convert('L')\n asc = []\n for y in range(0, im.size[1]):\n for x in range(0, im.size[0]):\n lum = 255 - im.getpixel((x, y))\n asc.append(_greyscale[lum * len(_greyscale) // 256])\n asc.append('\\n')\n return ''.join(asc)\n\n @lru_cache(maxsize=None)\n def load(self, path):\n opt = self.opt\n mode = opt.get('image_mode', 'raw')\n if mode is None or mode == 'none':\n # don't need to load images\n return None\n elif mode == 'raw':\n # raw just returns RGB values\n return Image.open(path).convert('RGB')\n elif mode == 'ascii':\n # convert images to ascii \u00af\\_(\u30c4)_/\u00af\n return self.img_to_ascii(path)\n else:\n # otherwise, looks for preprocessed version under 'mode' directory\n prepath, imagefn = os.path.split(path)\n\n dpath = os.path.join(prepath, mode)\n\n if not os.path.exists(dpath):\n build_data.make_dir(dpath)\n\n imagefn = imagefn.split('.')[0]\n imagefn = imagefn + '.npy'\n new_path = os.path.join(prepath, mode, imagefn)\n\n if not os.path.isfile(new_path):\n return self.extract(Image.open(path).convert('RGB'), new_path)\n else:\n return np.load(new_path)\n", "path": "parlai/core/image_featurizers.py"}]} | 1,926 | 169 |
gh_patches_debug_35693 | rasdani/github-patches | git_diff | falconry__falcon-1987 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update asgi look examples to use aioredis 2
A new major vesion of aioredis was released, and it has some api changes.
This is the changelog https://github.com/aio-libs/aioredis-py/blob/master/CHANGELOG.md
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/asgilook/asgilook/config.py`
Content:
```
1 import os
2 import pathlib
3 import uuid
4
5 import aioredis
6
7
8 class Config:
9 DEFAULT_CONFIG_PATH = '/tmp/asgilook'
10 DEFAULT_MIN_THUMB_SIZE = 64
11 DEFAULT_REDIS_HOST = 'redis://localhost'
12 DEFAULT_REDIS_POOL = aioredis.create_redis_pool
13 DEFAULT_UUID_GENERATOR = uuid.uuid4
14
15 def __init__(self):
16 self.storage_path = pathlib.Path(
17 os.environ.get('ASGI_LOOK_STORAGE_PATH', self.DEFAULT_CONFIG_PATH)
18 )
19 self.storage_path.mkdir(parents=True, exist_ok=True)
20
21 self.create_redis_pool = Config.DEFAULT_REDIS_POOL
22 self.min_thumb_size = self.DEFAULT_MIN_THUMB_SIZE
23 self.redis_host = self.DEFAULT_REDIS_HOST
24 self.uuid_generator = Config.DEFAULT_UUID_GENERATOR
25
```
Path: `examples/asgilook/asgilook/cache.py`
Content:
```
1 import msgpack
2
3
4 class RedisCache:
5 PREFIX = 'asgilook:'
6 INVALIDATE_ON = frozenset({'DELETE', 'POST', 'PUT'})
7 CACHE_HEADER = 'X-ASGILook-Cache'
8 TTL = 3600
9
10 def __init__(self, config):
11 self._config = config
12
13 # NOTE(vytas): To be initialized upon application startup (see the
14 # method below).
15 self._redis = None
16
17 async def _serialize_response(self, resp):
18 data = await resp.render_body()
19 return msgpack.packb([resp.content_type, data], use_bin_type=True)
20
21 def _deserialize_response(self, resp, data):
22 resp.content_type, resp.data = msgpack.unpackb(data, raw=False)
23 resp.complete = True
24 resp.context.cached = True
25
26 async def process_startup(self, scope, event):
27 if self._redis is None:
28 self._redis = await self._config.create_redis_pool(self._config.redis_host)
29
30 async def process_request(self, req, resp):
31 resp.context.cached = False
32
33 if req.method in self.INVALIDATE_ON:
34 return
35
36 key = f'{self.PREFIX}/{req.path}'
37 data = await self._redis.get(key)
38 if data is not None:
39 self._deserialize_response(resp, data)
40 resp.set_header(self.CACHE_HEADER, 'Hit')
41 else:
42 resp.set_header(self.CACHE_HEADER, 'Miss')
43
44 async def process_response(self, req, resp, resource, req_succeeded):
45 if not req_succeeded:
46 return
47
48 key = f'{self.PREFIX}/{req.path}'
49
50 if req.method in self.INVALIDATE_ON:
51 await self._redis.delete(key)
52 elif not resp.context.cached:
53 data = await self._serialize_response(resp)
54 await self._redis.set(key, data, expire=self.TTL)
55
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/asgilook/asgilook/cache.py b/examples/asgilook/asgilook/cache.py
--- a/examples/asgilook/asgilook/cache.py
+++ b/examples/asgilook/asgilook/cache.py
@@ -9,10 +9,7 @@
def __init__(self, config):
self._config = config
-
- # NOTE(vytas): To be initialized upon application startup (see the
- # method below).
- self._redis = None
+ self._redis = self._config.redis_from_url(self._config.redis_host)
async def _serialize_response(self, resp):
data = await resp.render_body()
@@ -24,8 +21,10 @@
resp.context.cached = True
async def process_startup(self, scope, event):
- if self._redis is None:
- self._redis = await self._config.create_redis_pool(self._config.redis_host)
+ await self._redis.ping()
+
+ async def process_shutdown(self, scope, event):
+ await self._redis.close()
async def process_request(self, req, resp):
resp.context.cached = False
@@ -51,4 +50,4 @@
await self._redis.delete(key)
elif not resp.context.cached:
data = await self._serialize_response(resp)
- await self._redis.set(key, data, expire=self.TTL)
+ await self._redis.set(key, data, ex=self.TTL)
diff --git a/examples/asgilook/asgilook/config.py b/examples/asgilook/asgilook/config.py
--- a/examples/asgilook/asgilook/config.py
+++ b/examples/asgilook/asgilook/config.py
@@ -9,7 +9,7 @@
DEFAULT_CONFIG_PATH = '/tmp/asgilook'
DEFAULT_MIN_THUMB_SIZE = 64
DEFAULT_REDIS_HOST = 'redis://localhost'
- DEFAULT_REDIS_POOL = aioredis.create_redis_pool
+ DEFAULT_REDIS_FROM_URL = aioredis.from_url
DEFAULT_UUID_GENERATOR = uuid.uuid4
def __init__(self):
@@ -18,7 +18,7 @@
)
self.storage_path.mkdir(parents=True, exist_ok=True)
- self.create_redis_pool = Config.DEFAULT_REDIS_POOL
+ self.redis_from_url = Config.DEFAULT_REDIS_FROM_URL
self.min_thumb_size = self.DEFAULT_MIN_THUMB_SIZE
self.redis_host = self.DEFAULT_REDIS_HOST
self.uuid_generator = Config.DEFAULT_UUID_GENERATOR
| {"golden_diff": "diff --git a/examples/asgilook/asgilook/cache.py b/examples/asgilook/asgilook/cache.py\n--- a/examples/asgilook/asgilook/cache.py\n+++ b/examples/asgilook/asgilook/cache.py\n@@ -9,10 +9,7 @@\n \n def __init__(self, config):\n self._config = config\n-\n- # NOTE(vytas): To be initialized upon application startup (see the\n- # method below).\n- self._redis = None\n+ self._redis = self._config.redis_from_url(self._config.redis_host)\n \n async def _serialize_response(self, resp):\n data = await resp.render_body()\n@@ -24,8 +21,10 @@\n resp.context.cached = True\n \n async def process_startup(self, scope, event):\n- if self._redis is None:\n- self._redis = await self._config.create_redis_pool(self._config.redis_host)\n+ await self._redis.ping()\n+\n+ async def process_shutdown(self, scope, event):\n+ await self._redis.close()\n \n async def process_request(self, req, resp):\n resp.context.cached = False\n@@ -51,4 +50,4 @@\n await self._redis.delete(key)\n elif not resp.context.cached:\n data = await self._serialize_response(resp)\n- await self._redis.set(key, data, expire=self.TTL)\n+ await self._redis.set(key, data, ex=self.TTL)\ndiff --git a/examples/asgilook/asgilook/config.py b/examples/asgilook/asgilook/config.py\n--- a/examples/asgilook/asgilook/config.py\n+++ b/examples/asgilook/asgilook/config.py\n@@ -9,7 +9,7 @@\n DEFAULT_CONFIG_PATH = '/tmp/asgilook'\n DEFAULT_MIN_THUMB_SIZE = 64\n DEFAULT_REDIS_HOST = 'redis://localhost'\n- DEFAULT_REDIS_POOL = aioredis.create_redis_pool\n+ DEFAULT_REDIS_FROM_URL = aioredis.from_url\n DEFAULT_UUID_GENERATOR = uuid.uuid4\n \n def __init__(self):\n@@ -18,7 +18,7 @@\n )\n self.storage_path.mkdir(parents=True, exist_ok=True)\n \n- self.create_redis_pool = Config.DEFAULT_REDIS_POOL\n+ self.redis_from_url = Config.DEFAULT_REDIS_FROM_URL\n self.min_thumb_size = self.DEFAULT_MIN_THUMB_SIZE\n self.redis_host = self.DEFAULT_REDIS_HOST\n self.uuid_generator = Config.DEFAULT_UUID_GENERATOR\n", "issue": "Update asgi look examples to use aioredis 2\nA new major vesion of aioredis was released, and it has some api changes.\r\nThis is the changelog https://github.com/aio-libs/aioredis-py/blob/master/CHANGELOG.md\n", "before_files": [{"content": "import os\nimport pathlib\nimport uuid\n\nimport aioredis\n\n\nclass Config:\n DEFAULT_CONFIG_PATH = '/tmp/asgilook'\n DEFAULT_MIN_THUMB_SIZE = 64\n DEFAULT_REDIS_HOST = 'redis://localhost'\n DEFAULT_REDIS_POOL = aioredis.create_redis_pool\n DEFAULT_UUID_GENERATOR = uuid.uuid4\n\n def __init__(self):\n self.storage_path = pathlib.Path(\n os.environ.get('ASGI_LOOK_STORAGE_PATH', self.DEFAULT_CONFIG_PATH)\n )\n self.storage_path.mkdir(parents=True, exist_ok=True)\n\n self.create_redis_pool = Config.DEFAULT_REDIS_POOL\n self.min_thumb_size = self.DEFAULT_MIN_THUMB_SIZE\n self.redis_host = self.DEFAULT_REDIS_HOST\n self.uuid_generator = Config.DEFAULT_UUID_GENERATOR\n", "path": "examples/asgilook/asgilook/config.py"}, {"content": "import msgpack\n\n\nclass RedisCache:\n PREFIX = 'asgilook:'\n INVALIDATE_ON = frozenset({'DELETE', 'POST', 'PUT'})\n CACHE_HEADER = 'X-ASGILook-Cache'\n TTL = 3600\n\n def __init__(self, config):\n self._config = config\n\n # NOTE(vytas): To be initialized upon application startup (see the\n # method below).\n self._redis = None\n\n async def _serialize_response(self, resp):\n data = await resp.render_body()\n return msgpack.packb([resp.content_type, data], use_bin_type=True)\n\n def _deserialize_response(self, resp, data):\n resp.content_type, resp.data = msgpack.unpackb(data, raw=False)\n resp.complete = True\n resp.context.cached = True\n\n async def process_startup(self, scope, event):\n if self._redis is None:\n self._redis = await self._config.create_redis_pool(self._config.redis_host)\n\n async def process_request(self, req, resp):\n resp.context.cached = False\n\n if req.method in self.INVALIDATE_ON:\n return\n\n key = f'{self.PREFIX}/{req.path}'\n data = await self._redis.get(key)\n if data is not None:\n self._deserialize_response(resp, data)\n resp.set_header(self.CACHE_HEADER, 'Hit')\n else:\n resp.set_header(self.CACHE_HEADER, 'Miss')\n\n async def process_response(self, req, resp, resource, req_succeeded):\n if not req_succeeded:\n return\n\n key = f'{self.PREFIX}/{req.path}'\n\n if req.method in self.INVALIDATE_ON:\n await self._redis.delete(key)\n elif not resp.context.cached:\n data = await self._serialize_response(resp)\n await self._redis.set(key, data, expire=self.TTL)\n", "path": "examples/asgilook/asgilook/cache.py"}], "after_files": [{"content": "import os\nimport pathlib\nimport uuid\n\nimport aioredis\n\n\nclass Config:\n DEFAULT_CONFIG_PATH = '/tmp/asgilook'\n DEFAULT_MIN_THUMB_SIZE = 64\n DEFAULT_REDIS_HOST = 'redis://localhost'\n DEFAULT_REDIS_FROM_URL = aioredis.from_url\n DEFAULT_UUID_GENERATOR = uuid.uuid4\n\n def __init__(self):\n self.storage_path = pathlib.Path(\n os.environ.get('ASGI_LOOK_STORAGE_PATH', self.DEFAULT_CONFIG_PATH)\n )\n self.storage_path.mkdir(parents=True, exist_ok=True)\n\n self.redis_from_url = Config.DEFAULT_REDIS_FROM_URL\n self.min_thumb_size = self.DEFAULT_MIN_THUMB_SIZE\n self.redis_host = self.DEFAULT_REDIS_HOST\n self.uuid_generator = Config.DEFAULT_UUID_GENERATOR\n", "path": "examples/asgilook/asgilook/config.py"}, {"content": "import msgpack\n\n\nclass RedisCache:\n PREFIX = 'asgilook:'\n INVALIDATE_ON = frozenset({'DELETE', 'POST', 'PUT'})\n CACHE_HEADER = 'X-ASGILook-Cache'\n TTL = 3600\n\n def __init__(self, config):\n self._config = config\n self._redis = self._config.redis_from_url(self._config.redis_host)\n\n async def _serialize_response(self, resp):\n data = await resp.render_body()\n return msgpack.packb([resp.content_type, data], use_bin_type=True)\n\n def _deserialize_response(self, resp, data):\n resp.content_type, resp.data = msgpack.unpackb(data, raw=False)\n resp.complete = True\n resp.context.cached = True\n\n async def process_startup(self, scope, event):\n await self._redis.ping()\n\n async def process_shutdown(self, scope, event):\n await self._redis.close()\n\n async def process_request(self, req, resp):\n resp.context.cached = False\n\n if req.method in self.INVALIDATE_ON:\n return\n\n key = f'{self.PREFIX}/{req.path}'\n data = await self._redis.get(key)\n if data is not None:\n self._deserialize_response(resp, data)\n resp.set_header(self.CACHE_HEADER, 'Hit')\n else:\n resp.set_header(self.CACHE_HEADER, 'Miss')\n\n async def process_response(self, req, resp, resource, req_succeeded):\n if not req_succeeded:\n return\n\n key = f'{self.PREFIX}/{req.path}'\n\n if req.method in self.INVALIDATE_ON:\n await self._redis.delete(key)\n elif not resp.context.cached:\n data = await self._serialize_response(resp)\n await self._redis.set(key, data, ex=self.TTL)\n", "path": "examples/asgilook/asgilook/cache.py"}]} | 1,071 | 552 |
gh_patches_debug_783 | rasdani/github-patches | git_diff | modin-project__modin-3440 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove inheritance of Modin DMatrix from xgb.DMatrix
Inheritance of Modin DMatrix from xgb.DMatrix doesn't include any benefits. Wrong documentation is provided to user using `help(modin.experimtenal.xgboost.DMatrix)` command.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `modin/experimental/xgboost/xgboost.py`
Content:
```
1 # Licensed to Modin Development Team under one or more contributor license agreements.
2 # See the NOTICE file distributed with this work for additional information regarding
3 # copyright ownership. The Modin Development Team licenses this file to you under the
4 # Apache License, Version 2.0 (the "License"); you may not use this file except in
5 # compliance with the License. You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software distributed under
10 # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific language
12 # governing permissions and limitations under the License.
13
14 """Module holds public interfaces for work Modin XGBoost."""
15
16 import logging
17 from typing import Dict, Optional
18
19 import xgboost as xgb
20
21 from modin.config import Engine
22 from modin.distributed.dataframe.pandas import unwrap_partitions
23 import modin.pandas as pd
24
25 LOGGER = logging.getLogger("[modin.xgboost]")
26
27
28 class DMatrix(xgb.DMatrix):
29 """
30 DMatrix holds references to partitions of Modin DataFrame.
31
32 On init stage unwrapping partitions of Modin DataFrame is started.
33
34 Parameters
35 ----------
36 data : modin.pandas.DataFrame
37 Data source of DMatrix.
38 label : modin.pandas.DataFrame or modin.pandas.Series
39 Labels used for training.
40
41 Notes
42 -----
43 Currently DMatrix supports only `data` and `label` parameters.
44 """
45
46 def __init__(self, data, label):
47 assert isinstance(
48 data, pd.DataFrame
49 ), f"Type of `data` is {type(data)}, but expected {pd.DataFrame}."
50 assert isinstance(
51 label, (pd.DataFrame, pd.Series)
52 ), f"Type of `data` is {type(label)}, but expected {pd.DataFrame} or {pd.Series}."
53
54 self.data = unwrap_partitions(data, axis=0, get_ip=True)
55 self.label = unwrap_partitions(label, axis=0)
56
57 self.metadata = (
58 data.index,
59 data.columns,
60 data._query_compiler._modin_frame._row_lengths,
61 )
62
63 def __iter__(self):
64 """
65 Return unwrapped `self.data` and `self.label`.
66
67 Yields
68 ------
69 list
70 List of `self.data` with pairs of references to IP of row partition
71 and row partition [(IP_ref0, partition_ref0), ..].
72 list
73 List of `self.label` with references to row partitions
74 [partition_ref0, ..].
75 """
76 yield self.data
77 yield self.label
78
79
80 class Booster(xgb.Booster):
81 """
82 A Modin Booster of XGBoost.
83
84 Booster is the model of XGBoost, that contains low level routines for
85 training, prediction and evaluation.
86
87 Parameters
88 ----------
89 params : dict, optional
90 Parameters for boosters.
91 cache : list, default: empty
92 List of cache items.
93 model_file : string/os.PathLike/xgb.Booster/bytearray, optional
94 Path to the model file if it's string or PathLike or xgb.Booster.
95 """
96
97 def __init__(self, params=None, cache=(), model_file=None): # noqa: MD01
98 super(Booster, self).__init__(params=params, cache=cache, model_file=model_file)
99
100 def predict(
101 self,
102 data: DMatrix,
103 **kwargs,
104 ):
105 """
106 Run distributed prediction with a trained booster.
107
108 During execution it runs ``xgb.predict`` on each worker for subset of `data`
109 and creates Modin DataFrame with prediction results.
110
111 Parameters
112 ----------
113 data : modin.experimental.xgboost.DMatrix
114 Input data used for prediction.
115 **kwargs : dict
116 Other parameters are the same as for ``xgboost.Booster.predict``.
117
118 Returns
119 -------
120 modin.pandas.DataFrame
121 Modin DataFrame with prediction results.
122 """
123 LOGGER.info("Prediction started")
124
125 if Engine.get() == "Ray":
126 from .xgboost_ray import _predict
127 else:
128 raise ValueError("Current version supports only Ray engine.")
129
130 assert isinstance(
131 data, DMatrix
132 ), f"Type of `data` is {type(data)}, but expected {DMatrix}."
133
134 result = _predict(self.copy(), data, **kwargs)
135 LOGGER.info("Prediction finished")
136
137 return result
138
139
140 def train(
141 params: Dict,
142 dtrain: DMatrix,
143 *args,
144 evals=(),
145 num_actors: Optional[int] = None,
146 evals_result: Optional[Dict] = None,
147 **kwargs,
148 ):
149 """
150 Run distributed training of XGBoost model.
151
152 During work it evenly distributes `dtrain` between workers according
153 to IP addresses partitions (in case of not even distribution of `dtrain`
154 over nodes, some partitions will be re-distributed between nodes),
155 runs xgb.train on each worker for subset of `dtrain` and reduces training results
156 of each worker using Rabit Context.
157
158 Parameters
159 ----------
160 params : dict
161 Booster params.
162 dtrain : modin.experimental.xgboost.DMatrix
163 Data to be trained against.
164 *args : iterable
165 Other parameters for `xgboost.train`.
166 evals : list of pairs (modin.experimental.xgboost.DMatrix, str), default: empty
167 List of validation sets for which metrics will evaluated during training.
168 Validation metrics will help us track the performance of the model.
169 num_actors : int, optional
170 Number of actors for training. If unspecified, this value will be
171 computed automatically.
172 evals_result : dict, optional
173 Dict to store evaluation results in.
174 **kwargs : dict
175 Other parameters are the same as `xgboost.train`.
176
177 Returns
178 -------
179 modin.experimental.xgboost.Booster
180 A trained booster.
181 """
182 LOGGER.info("Training started")
183
184 if Engine.get() == "Ray":
185 from .xgboost_ray import _train
186 else:
187 raise ValueError("Current version supports only Ray engine.")
188
189 assert isinstance(
190 dtrain, DMatrix
191 ), f"Type of `dtrain` is {type(dtrain)}, but expected {DMatrix}."
192 result = _train(dtrain, params, *args, num_actors=num_actors, evals=evals, **kwargs)
193 if isinstance(evals_result, dict):
194 evals_result.update(result["history"])
195
196 LOGGER.info("Training finished")
197 return Booster(model_file=result["booster"])
198
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/modin/experimental/xgboost/xgboost.py b/modin/experimental/xgboost/xgboost.py
--- a/modin/experimental/xgboost/xgboost.py
+++ b/modin/experimental/xgboost/xgboost.py
@@ -25,7 +25,7 @@
LOGGER = logging.getLogger("[modin.xgboost]")
-class DMatrix(xgb.DMatrix):
+class DMatrix:
"""
DMatrix holds references to partitions of Modin DataFrame.
| {"golden_diff": "diff --git a/modin/experimental/xgboost/xgboost.py b/modin/experimental/xgboost/xgboost.py\n--- a/modin/experimental/xgboost/xgboost.py\n+++ b/modin/experimental/xgboost/xgboost.py\n@@ -25,7 +25,7 @@\n LOGGER = logging.getLogger(\"[modin.xgboost]\")\n \n \n-class DMatrix(xgb.DMatrix):\n+class DMatrix:\n \"\"\"\n DMatrix holds references to partitions of Modin DataFrame.\n", "issue": "Remove inheritance of Modin DMatrix from xgb.DMatrix\nInheritance of Modin DMatrix from xgb.DMatrix doesn't include any benefits. Wrong documentation is provided to user using `help(modin.experimtenal.xgboost.DMatrix)` command.\n", "before_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\"\"\"Module holds public interfaces for work Modin XGBoost.\"\"\"\n\nimport logging\nfrom typing import Dict, Optional\n\nimport xgboost as xgb\n\nfrom modin.config import Engine\nfrom modin.distributed.dataframe.pandas import unwrap_partitions\nimport modin.pandas as pd\n\nLOGGER = logging.getLogger(\"[modin.xgboost]\")\n\n\nclass DMatrix(xgb.DMatrix):\n \"\"\"\n DMatrix holds references to partitions of Modin DataFrame.\n\n On init stage unwrapping partitions of Modin DataFrame is started.\n\n Parameters\n ----------\n data : modin.pandas.DataFrame\n Data source of DMatrix.\n label : modin.pandas.DataFrame or modin.pandas.Series\n Labels used for training.\n\n Notes\n -----\n Currently DMatrix supports only `data` and `label` parameters.\n \"\"\"\n\n def __init__(self, data, label):\n assert isinstance(\n data, pd.DataFrame\n ), f\"Type of `data` is {type(data)}, but expected {pd.DataFrame}.\"\n assert isinstance(\n label, (pd.DataFrame, pd.Series)\n ), f\"Type of `data` is {type(label)}, but expected {pd.DataFrame} or {pd.Series}.\"\n\n self.data = unwrap_partitions(data, axis=0, get_ip=True)\n self.label = unwrap_partitions(label, axis=0)\n\n self.metadata = (\n data.index,\n data.columns,\n data._query_compiler._modin_frame._row_lengths,\n )\n\n def __iter__(self):\n \"\"\"\n Return unwrapped `self.data` and `self.label`.\n\n Yields\n ------\n list\n List of `self.data` with pairs of references to IP of row partition\n and row partition [(IP_ref0, partition_ref0), ..].\n list\n List of `self.label` with references to row partitions\n [partition_ref0, ..].\n \"\"\"\n yield self.data\n yield self.label\n\n\nclass Booster(xgb.Booster):\n \"\"\"\n A Modin Booster of XGBoost.\n\n Booster is the model of XGBoost, that contains low level routines for\n training, prediction and evaluation.\n\n Parameters\n ----------\n params : dict, optional\n Parameters for boosters.\n cache : list, default: empty\n List of cache items.\n model_file : string/os.PathLike/xgb.Booster/bytearray, optional\n Path to the model file if it's string or PathLike or xgb.Booster.\n \"\"\"\n\n def __init__(self, params=None, cache=(), model_file=None): # noqa: MD01\n super(Booster, self).__init__(params=params, cache=cache, model_file=model_file)\n\n def predict(\n self,\n data: DMatrix,\n **kwargs,\n ):\n \"\"\"\n Run distributed prediction with a trained booster.\n\n During execution it runs ``xgb.predict`` on each worker for subset of `data`\n and creates Modin DataFrame with prediction results.\n\n Parameters\n ----------\n data : modin.experimental.xgboost.DMatrix\n Input data used for prediction.\n **kwargs : dict\n Other parameters are the same as for ``xgboost.Booster.predict``.\n\n Returns\n -------\n modin.pandas.DataFrame\n Modin DataFrame with prediction results.\n \"\"\"\n LOGGER.info(\"Prediction started\")\n\n if Engine.get() == \"Ray\":\n from .xgboost_ray import _predict\n else:\n raise ValueError(\"Current version supports only Ray engine.\")\n\n assert isinstance(\n data, DMatrix\n ), f\"Type of `data` is {type(data)}, but expected {DMatrix}.\"\n\n result = _predict(self.copy(), data, **kwargs)\n LOGGER.info(\"Prediction finished\")\n\n return result\n\n\ndef train(\n params: Dict,\n dtrain: DMatrix,\n *args,\n evals=(),\n num_actors: Optional[int] = None,\n evals_result: Optional[Dict] = None,\n **kwargs,\n):\n \"\"\"\n Run distributed training of XGBoost model.\n\n During work it evenly distributes `dtrain` between workers according\n to IP addresses partitions (in case of not even distribution of `dtrain`\n over nodes, some partitions will be re-distributed between nodes),\n runs xgb.train on each worker for subset of `dtrain` and reduces training results\n of each worker using Rabit Context.\n\n Parameters\n ----------\n params : dict\n Booster params.\n dtrain : modin.experimental.xgboost.DMatrix\n Data to be trained against.\n *args : iterable\n Other parameters for `xgboost.train`.\n evals : list of pairs (modin.experimental.xgboost.DMatrix, str), default: empty\n List of validation sets for which metrics will evaluated during training.\n Validation metrics will help us track the performance of the model.\n num_actors : int, optional\n Number of actors for training. If unspecified, this value will be\n computed automatically.\n evals_result : dict, optional\n Dict to store evaluation results in.\n **kwargs : dict\n Other parameters are the same as `xgboost.train`.\n\n Returns\n -------\n modin.experimental.xgboost.Booster\n A trained booster.\n \"\"\"\n LOGGER.info(\"Training started\")\n\n if Engine.get() == \"Ray\":\n from .xgboost_ray import _train\n else:\n raise ValueError(\"Current version supports only Ray engine.\")\n\n assert isinstance(\n dtrain, DMatrix\n ), f\"Type of `dtrain` is {type(dtrain)}, but expected {DMatrix}.\"\n result = _train(dtrain, params, *args, num_actors=num_actors, evals=evals, **kwargs)\n if isinstance(evals_result, dict):\n evals_result.update(result[\"history\"])\n\n LOGGER.info(\"Training finished\")\n return Booster(model_file=result[\"booster\"])\n", "path": "modin/experimental/xgboost/xgboost.py"}], "after_files": [{"content": "# Licensed to Modin Development Team under one or more contributor license agreements.\n# See the NOTICE file distributed with this work for additional information regarding\n# copyright ownership. The Modin Development Team licenses this file to you under the\n# Apache License, Version 2.0 (the \"License\"); you may not use this file except in\n# compliance with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software distributed under\n# the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific language\n# governing permissions and limitations under the License.\n\n\"\"\"Module holds public interfaces for work Modin XGBoost.\"\"\"\n\nimport logging\nfrom typing import Dict, Optional\n\nimport xgboost as xgb\n\nfrom modin.config import Engine\nfrom modin.distributed.dataframe.pandas import unwrap_partitions\nimport modin.pandas as pd\n\nLOGGER = logging.getLogger(\"[modin.xgboost]\")\n\n\nclass DMatrix:\n \"\"\"\n DMatrix holds references to partitions of Modin DataFrame.\n\n On init stage unwrapping partitions of Modin DataFrame is started.\n\n Parameters\n ----------\n data : modin.pandas.DataFrame\n Data source of DMatrix.\n label : modin.pandas.DataFrame or modin.pandas.Series\n Labels used for training.\n\n Notes\n -----\n Currently DMatrix supports only `data` and `label` parameters.\n \"\"\"\n\n def __init__(self, data, label):\n assert isinstance(\n data, pd.DataFrame\n ), f\"Type of `data` is {type(data)}, but expected {pd.DataFrame}.\"\n assert isinstance(\n label, (pd.DataFrame, pd.Series)\n ), f\"Type of `data` is {type(label)}, but expected {pd.DataFrame} or {pd.Series}.\"\n\n self.data = unwrap_partitions(data, axis=0, get_ip=True)\n self.label = unwrap_partitions(label, axis=0)\n\n self.metadata = (\n data.index,\n data.columns,\n data._query_compiler._modin_frame._row_lengths,\n )\n\n def __iter__(self):\n \"\"\"\n Return unwrapped `self.data` and `self.label`.\n\n Yields\n ------\n list\n List of `self.data` with pairs of references to IP of row partition\n and row partition [(IP_ref0, partition_ref0), ..].\n list\n List of `self.label` with references to row partitions\n [partition_ref0, ..].\n \"\"\"\n yield self.data\n yield self.label\n\n\nclass Booster(xgb.Booster):\n \"\"\"\n A Modin Booster of XGBoost.\n\n Booster is the model of XGBoost, that contains low level routines for\n training, prediction and evaluation.\n\n Parameters\n ----------\n params : dict, optional\n Parameters for boosters.\n cache : list, default: empty\n List of cache items.\n model_file : string/os.PathLike/xgb.Booster/bytearray, optional\n Path to the model file if it's string or PathLike or xgb.Booster.\n \"\"\"\n\n def __init__(self, params=None, cache=(), model_file=None): # noqa: MD01\n super(Booster, self).__init__(params=params, cache=cache, model_file=model_file)\n\n def predict(\n self,\n data: DMatrix,\n **kwargs,\n ):\n \"\"\"\n Run distributed prediction with a trained booster.\n\n During execution it runs ``xgb.predict`` on each worker for subset of `data`\n and creates Modin DataFrame with prediction results.\n\n Parameters\n ----------\n data : modin.experimental.xgboost.DMatrix\n Input data used for prediction.\n **kwargs : dict\n Other parameters are the same as for ``xgboost.Booster.predict``.\n\n Returns\n -------\n modin.pandas.DataFrame\n Modin DataFrame with prediction results.\n \"\"\"\n LOGGER.info(\"Prediction started\")\n\n if Engine.get() == \"Ray\":\n from .xgboost_ray import _predict\n else:\n raise ValueError(\"Current version supports only Ray engine.\")\n\n assert isinstance(\n data, DMatrix\n ), f\"Type of `data` is {type(data)}, but expected {DMatrix}.\"\n\n result = _predict(self.copy(), data, **kwargs)\n LOGGER.info(\"Prediction finished\")\n\n return result\n\n\ndef train(\n params: Dict,\n dtrain: DMatrix,\n *args,\n evals=(),\n num_actors: Optional[int] = None,\n evals_result: Optional[Dict] = None,\n **kwargs,\n):\n \"\"\"\n Run distributed training of XGBoost model.\n\n During work it evenly distributes `dtrain` between workers according\n to IP addresses partitions (in case of not even distribution of `dtrain`\n over nodes, some partitions will be re-distributed between nodes),\n runs xgb.train on each worker for subset of `dtrain` and reduces training results\n of each worker using Rabit Context.\n\n Parameters\n ----------\n params : dict\n Booster params.\n dtrain : modin.experimental.xgboost.DMatrix\n Data to be trained against.\n *args : iterable\n Other parameters for `xgboost.train`.\n evals : list of pairs (modin.experimental.xgboost.DMatrix, str), default: empty\n List of validation sets for which metrics will evaluated during training.\n Validation metrics will help us track the performance of the model.\n num_actors : int, optional\n Number of actors for training. If unspecified, this value will be\n computed automatically.\n evals_result : dict, optional\n Dict to store evaluation results in.\n **kwargs : dict\n Other parameters are the same as `xgboost.train`.\n\n Returns\n -------\n modin.experimental.xgboost.Booster\n A trained booster.\n \"\"\"\n LOGGER.info(\"Training started\")\n\n if Engine.get() == \"Ray\":\n from .xgboost_ray import _train\n else:\n raise ValueError(\"Current version supports only Ray engine.\")\n\n assert isinstance(\n dtrain, DMatrix\n ), f\"Type of `dtrain` is {type(dtrain)}, but expected {DMatrix}.\"\n result = _train(dtrain, params, *args, num_actors=num_actors, evals=evals, **kwargs)\n if isinstance(evals_result, dict):\n evals_result.update(result[\"history\"])\n\n LOGGER.info(\"Training finished\")\n return Booster(model_file=result[\"booster\"])\n", "path": "modin/experimental/xgboost/xgboost.py"}]} | 2,268 | 107 |
gh_patches_debug_8028 | rasdani/github-patches | git_diff | e-valuation__EvaP-848 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Username case sensitivity
Usernames are case sensitive. The importer makes all usernames lowercase, but automatically created accounts when logging in with Kerberos authentification can have uppercase letters.
This can lead to two users having the same username and then the system crashed on login.
Automatically created accounts should also get lowercase usernames, even if the user enters the name differently.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `evap/evaluation/forms.py`
Content:
```
1 from django import forms
2 from django.contrib.auth import authenticate
3 from django.utils.translation import ugettext_lazy as _
4 from django.views.decorators.debug import sensitive_variables
5
6 from evap.evaluation.models import UserProfile
7
8
9 class LoginUsernameForm(forms.Form):
10 """Form encapsulating the login with username and password, for example from an Active Directory.
11 """
12
13 username = forms.CharField(label=_("Username"), max_length=254)
14 password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
15
16 def __init__(self, request=None, *args, **kwargs):
17 """
18 If request is passed in, the form will validate that cookies are
19 enabled. Note that the request (a HttpRequest object) must have set a
20 cookie with the key TEST_COOKIE_NAME and value TEST_COOKIE_VALUE before
21 running this validation.
22 """
23 self.request = request
24 self.user_cache = None
25 super().__init__(*args, **kwargs)
26
27 @sensitive_variables('password')
28 def clean_password(self):
29 username = self.cleaned_data.get('username')
30 password = self.cleaned_data.get('password')
31
32 if username and password:
33 self.user_cache = authenticate(username=username, password=password)
34 if self.user_cache is None:
35 raise forms.ValidationError(_("Please enter a correct username and password."))
36 self.check_for_test_cookie()
37 return password
38
39 def check_for_test_cookie(self):
40 if self.request and not self.request.session.test_cookie_worked():
41 raise forms.ValidationError(_("Your Web browser doesn't appear to have cookies enabled. Cookies are required for logging in."))
42
43 def get_user_id(self):
44 if self.user_cache:
45 return self.user_cache.id
46 return None
47
48 def get_user(self):
49 return self.user_cache
50
51
52 class NewKeyForm(forms.Form):
53 email = forms.EmailField(label=_("Email address"))
54
55 def __init__(self, *args, **kwargs):
56 self.user_cache = None
57
58 super().__init__(*args, **kwargs)
59
60 def clean_email(self):
61 email = self.cleaned_data.get('email')
62
63 if not UserProfile.email_needs_login_key(email):
64 raise forms.ValidationError(_("HPI users cannot request login keys. Please login using your domain credentials."))
65
66 try:
67 user = UserProfile.objects.get(email__iexact=email)
68 self.user_cache = user
69 except UserProfile.DoesNotExist:
70 raise forms.ValidationError(_("No user with this email address was found. Please make sure to enter the email address already known to the university office."))
71
72 return email
73
74 def get_user(self):
75 return self.user_cache
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/evap/evaluation/forms.py b/evap/evaluation/forms.py
--- a/evap/evaluation/forms.py
+++ b/evap/evaluation/forms.py
@@ -29,6 +29,9 @@
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
+ # django-auth-kerberos might create a new userprofile. make sure it gets a lowercase username.
+ username = username.lower()
+
if username and password:
self.user_cache = authenticate(username=username, password=password)
if self.user_cache is None:
| {"golden_diff": "diff --git a/evap/evaluation/forms.py b/evap/evaluation/forms.py\n--- a/evap/evaluation/forms.py\n+++ b/evap/evaluation/forms.py\n@@ -29,6 +29,9 @@\n username = self.cleaned_data.get('username')\n password = self.cleaned_data.get('password')\n \n+ # django-auth-kerberos might create a new userprofile. make sure it gets a lowercase username.\n+ username = username.lower()\n+\n if username and password:\n self.user_cache = authenticate(username=username, password=password)\n if self.user_cache is None:\n", "issue": "Username case sensitivity\nUsernames are case sensitive. The importer makes all usernames lowercase, but automatically created accounts when logging in with Kerberos authentification can have uppercase letters.\nThis can lead to two users having the same username and then the system crashed on login.\n\nAutomatically created accounts should also get lowercase usernames, even if the user enters the name differently.\n\n", "before_files": [{"content": "from django import forms\nfrom django.contrib.auth import authenticate\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.decorators.debug import sensitive_variables\n\nfrom evap.evaluation.models import UserProfile\n\n\nclass LoginUsernameForm(forms.Form):\n \"\"\"Form encapsulating the login with username and password, for example from an Active Directory.\n \"\"\"\n\n username = forms.CharField(label=_(\"Username\"), max_length=254)\n password = forms.CharField(label=_(\"Password\"), widget=forms.PasswordInput)\n\n def __init__(self, request=None, *args, **kwargs):\n \"\"\"\n If request is passed in, the form will validate that cookies are\n enabled. Note that the request (a HttpRequest object) must have set a\n cookie with the key TEST_COOKIE_NAME and value TEST_COOKIE_VALUE before\n running this validation.\n \"\"\"\n self.request = request\n self.user_cache = None\n super().__init__(*args, **kwargs)\n\n @sensitive_variables('password')\n def clean_password(self):\n username = self.cleaned_data.get('username')\n password = self.cleaned_data.get('password')\n\n if username and password:\n self.user_cache = authenticate(username=username, password=password)\n if self.user_cache is None:\n raise forms.ValidationError(_(\"Please enter a correct username and password.\"))\n self.check_for_test_cookie()\n return password\n\n def check_for_test_cookie(self):\n if self.request and not self.request.session.test_cookie_worked():\n raise forms.ValidationError(_(\"Your Web browser doesn't appear to have cookies enabled. Cookies are required for logging in.\"))\n\n def get_user_id(self):\n if self.user_cache:\n return self.user_cache.id\n return None\n\n def get_user(self):\n return self.user_cache\n\n\nclass NewKeyForm(forms.Form):\n email = forms.EmailField(label=_(\"Email address\"))\n\n def __init__(self, *args, **kwargs):\n self.user_cache = None\n\n super().__init__(*args, **kwargs)\n\n def clean_email(self):\n email = self.cleaned_data.get('email')\n\n if not UserProfile.email_needs_login_key(email):\n raise forms.ValidationError(_(\"HPI users cannot request login keys. Please login using your domain credentials.\"))\n\n try:\n user = UserProfile.objects.get(email__iexact=email)\n self.user_cache = user\n except UserProfile.DoesNotExist:\n raise forms.ValidationError(_(\"No user with this email address was found. Please make sure to enter the email address already known to the university office.\"))\n\n return email\n\n def get_user(self):\n return self.user_cache\n", "path": "evap/evaluation/forms.py"}], "after_files": [{"content": "from django import forms\nfrom django.contrib.auth import authenticate\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.decorators.debug import sensitive_variables\n\nfrom evap.evaluation.models import UserProfile\n\n\nclass LoginUsernameForm(forms.Form):\n \"\"\"Form encapsulating the login with username and password, for example from an Active Directory.\n \"\"\"\n\n username = forms.CharField(label=_(\"Username\"), max_length=254)\n password = forms.CharField(label=_(\"Password\"), widget=forms.PasswordInput)\n\n def __init__(self, request=None, *args, **kwargs):\n \"\"\"\n If request is passed in, the form will validate that cookies are\n enabled. Note that the request (a HttpRequest object) must have set a\n cookie with the key TEST_COOKIE_NAME and value TEST_COOKIE_VALUE before\n running this validation.\n \"\"\"\n self.request = request\n self.user_cache = None\n super().__init__(*args, **kwargs)\n\n @sensitive_variables('password')\n def clean_password(self):\n username = self.cleaned_data.get('username')\n password = self.cleaned_data.get('password')\n\n # django-auth-kerberos might create a new userprofile. make sure it gets a lowercase username.\n username = username.lower()\n\n if username and password:\n self.user_cache = authenticate(username=username, password=password)\n if self.user_cache is None:\n raise forms.ValidationError(_(\"Please enter a correct username and password.\"))\n self.check_for_test_cookie()\n return password\n\n def check_for_test_cookie(self):\n if self.request and not self.request.session.test_cookie_worked():\n raise forms.ValidationError(_(\"Your Web browser doesn't appear to have cookies enabled. Cookies are required for logging in.\"))\n\n def get_user_id(self):\n if self.user_cache:\n return self.user_cache.id\n return None\n\n def get_user(self):\n return self.user_cache\n\n\nclass NewKeyForm(forms.Form):\n email = forms.EmailField(label=_(\"Email address\"))\n\n def __init__(self, *args, **kwargs):\n self.user_cache = None\n\n super().__init__(*args, **kwargs)\n\n def clean_email(self):\n email = self.cleaned_data.get('email')\n\n if not UserProfile.email_needs_login_key(email):\n raise forms.ValidationError(_(\"HPI users cannot request login keys. Please login using your domain credentials.\"))\n\n try:\n user = UserProfile.objects.get(email__iexact=email)\n self.user_cache = user\n except UserProfile.DoesNotExist:\n raise forms.ValidationError(_(\"No user with this email address was found. Please make sure to enter the email address already known to the university office.\"))\n\n return email\n\n def get_user(self):\n return self.user_cache\n", "path": "evap/evaluation/forms.py"}]} | 1,023 | 130 |
gh_patches_debug_211 | rasdani/github-patches | git_diff | mdn__kuma-6489 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Can't browse users in django admin now that tags are gone
https://sentry.prod.mozaws.net/operations/mdn-prod/issues/7273070/
```
Resolver404: {'tried': [[<RegexURLPattern None ^media/(?:redesign/)?css/(?P<doc>.*)-min.css$>], [<RegexURLPattern None ^media/(?:redesign/)?js/(?P<doc>.*)-min.js$>], [<RegexURLPattern None ^media/(?:redesign/)?img(?P<suffix>.*)$>], [<RegexURLPattern None ^media/(?:redesign/)?css(?P<suffix>.*)$>], [<RegexURLPattern None ^media/(?:redesign/)?js(?P<suffix>.*)$>], [<RegexURLPattern None ^media/(?:redesign/)?fonts(?P<suffix>.*)$>], [<RegexURLPattern None ^media/uploads/demos/(?:.*)$>], [<RegexURLPattern None (?i)^(?P<one>.*)//(?P<two>.*)//(?P<three>.*)$>], [<RegexURLPattern None (?i)^(?P<one>.*)//(?P<two>.*)$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_1_canvas_rect.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_2_canvas_moveto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_3_canvas_lineto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_4_canvas_arc.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_5_canvas_quadraticcurveto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_6_canvas_beziercurveto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_1_canvas_drawimage.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_2_canvas_drawimage.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_3_canvas_drawimage.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_4_canvas_gallery.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_1_canvas_fillstyle.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_2_canvas_strokestyle.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_3_canvas_globalalpha.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_4_canvas_rgba.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_5_canvas_linewidth.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_6_canvas_linecap.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_7_canvas_linejoin.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_8_canvas_miterlimit.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_9_canvas_lineargradient.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_10_canvas_radialgradient.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_11_canvas_createpattern.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_1_canvas_savestate.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_2_canvas_translate.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_3_canvas_rotate.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_4_canvas_scale.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/6_1_canvas_composite.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/6_2_canvas_clipping.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/globalCompositeOperation.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/backdrop.png$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/bg_gallery.png$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_1.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_2.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_3.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_4.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_5.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_6.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_7.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_8.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/picture_frame.png$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/rhino.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/wallpaper.png$>], [<RegexURLPattern None (?i)^samples/domref/mozGetAsFile.html$>], [<RegexURLPattern None (?i)^samples/raycaster/input.js$>], [<RegexURLPattern None (?i)^samples/raycaster/Level.js$>], [<RegexURL...
File "redirect_urls/middleware.py", line 14, in __call__
resolver_match = self.resolver.resolve(request.path_info)
File "newrelic/hooks/framework_django.py", line 600, in wrapper
return _wrapped(*args, **kwargs)
File "newrelic/hooks/framework_django.py", line 588, in _wrapped
result = wrapped(path)
File "newrelic/hooks/framework_django.py", line 575, in wrapper
return wrapped(*args, **kwargs)
File "django/urls/resolvers.py", line 394, in resolve
raise Resolver404({'tried': tried, 'path': new_path})
FieldError: Cannot resolve keyword 'tags' into field. Choices are: auth_token, bans, bans_issued, bio, created_attachment_revisions, created_revisions, created_toolbars, date_joined, discourse_url, documentattachment, documentdeletionlog, documentspam_reviewed, documentspamattempt, email, emailaddress, facebook_url, first_name, flag, fullname, github_url, groups, homepage, id, irc_nickname, is_active, is_github_url_public, is_newsletter_subscribed, is_staff, is_superuser, key, last_login, last_name, linkedin_url, locale, location, logentry, mozillians_url, organization, password, revisionakismetsubmission, socialaccount, stackoverflow_url, stripe_customer_id, timezone, title, twitter_url, user_permissions, username, watch, website_url
(18 additional frame(s) were not displayed)
...
File "django/db/models/sql/query.py", line 1268, in _add_q
child_clause, needed_inner = self._add_q(
File "django/db/models/sql/query.py", line 1273, in _add_q
child_clause, needed_inner = self.build_filter(
File "django/db/models/sql/query.py", line 1154, in build_filter
lookups, parts, reffed_expression = self.solve_lookup_type(arg)
File "django/db/models/sql/query.py", line 1034, in solve_lookup_type
_, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta())
File "django/db/models/sql/query.py", line 1351, in names_to_path
raise FieldError("Cannot resolve keyword '%s' into field. "
FieldError: Cannot resolve keyword 'tags' into field. Choices are: auth_token, bans, bans_issued, bio, created_attachment_revisions, created_revisions, created_toolbars, date_joined, discourse_url, documentattachment, documentdeletionlog, documentspam_reviewed, documentspamattempt, email, emailaddress, facebook_url, first_name, flag, fullname, github_url, groups, homepage, id, irc_nickname, is_active, is_github_url_public, is_newsletter_subscribed, is_staff, is_superuser, key, last_login, last_name, linkedin_url, locale, location, logentry, mozillians_url, organization, password, revisionakismetsubmission, socialaccount, stackoverflow_url, stripe_customer_id, timezone, title, twitter_url, user_permissions, username, watch, website_url
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kuma/users/admin.py`
Content:
```
1 from django.contrib import admin
2 from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
3 from django.utils.html import format_html
4
5 from kuma.core.urlresolvers import reverse
6 from kuma.core.utils import urlparams
7
8 from .models import User, UserBan
9
10
11 @admin.register(UserBan)
12 class UserBanAdmin(admin.ModelAdmin):
13 fields = ("user", "by", "reason", "is_active")
14 list_display = ("user", "by", "reason", "is_active")
15 list_editable = ("is_active",)
16 list_filter = ("is_active",)
17 raw_id_fields = ("user", "by")
18 search_fields = ("user__username", "reason", "by__username")
19
20
21 @admin.register(User)
22 class UserAdmin(BaseUserAdmin):
23 """
24 Extends the admin view of users to show date_joined field
25 add a filter on the field too
26 """
27
28 list_display = (
29 "username",
30 "fullname",
31 "email",
32 "revisions",
33 "date_joined",
34 "is_staff",
35 "is_active",
36 )
37 list_filter = ("is_staff", "is_superuser", "is_active", "date_joined", "groups")
38 ordering = ("-date_joined",)
39 search_fields = (
40 "username",
41 "title",
42 "fullname",
43 "organization",
44 "location",
45 "email",
46 "tags__name",
47 )
48
49 def revisions(self, obj):
50 """HTML link to user's revisions with count"""
51 link = urlparams(reverse("dashboards.revisions"), user=obj.username)
52 count = obj.created_revisions.count()
53 return format_html('<a href="{}"><strong>{}</strong></a>', link, count)
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kuma/users/admin.py b/kuma/users/admin.py
--- a/kuma/users/admin.py
+++ b/kuma/users/admin.py
@@ -43,7 +43,6 @@
"organization",
"location",
"email",
- "tags__name",
)
def revisions(self, obj):
| {"golden_diff": "diff --git a/kuma/users/admin.py b/kuma/users/admin.py\n--- a/kuma/users/admin.py\n+++ b/kuma/users/admin.py\n@@ -43,7 +43,6 @@\n \"organization\",\n \"location\",\n \"email\",\n- \"tags__name\",\n )\n \n def revisions(self, obj):\n", "issue": "Can't browse users in django admin now that tags are gone\nhttps://sentry.prod.mozaws.net/operations/mdn-prod/issues/7273070/\n\n```\nResolver404: {'tried': [[<RegexURLPattern None ^media/(?:redesign/)?css/(?P<doc>.*)-min.css$>], [<RegexURLPattern None ^media/(?:redesign/)?js/(?P<doc>.*)-min.js$>], [<RegexURLPattern None ^media/(?:redesign/)?img(?P<suffix>.*)$>], [<RegexURLPattern None ^media/(?:redesign/)?css(?P<suffix>.*)$>], [<RegexURLPattern None ^media/(?:redesign/)?js(?P<suffix>.*)$>], [<RegexURLPattern None ^media/(?:redesign/)?fonts(?P<suffix>.*)$>], [<RegexURLPattern None ^media/uploads/demos/(?:.*)$>], [<RegexURLPattern None (?i)^(?P<one>.*)//(?P<two>.*)//(?P<three>.*)$>], [<RegexURLPattern None (?i)^(?P<one>.*)//(?P<two>.*)$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_1_canvas_rect.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_2_canvas_moveto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_3_canvas_lineto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_4_canvas_arc.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_5_canvas_quadraticcurveto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/2_6_canvas_beziercurveto.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_1_canvas_drawimage.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_2_canvas_drawimage.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_3_canvas_drawimage.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/3_4_canvas_gallery.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_1_canvas_fillstyle.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_2_canvas_strokestyle.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_3_canvas_globalalpha.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_4_canvas_rgba.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_5_canvas_linewidth.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_6_canvas_linecap.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_7_canvas_linejoin.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_8_canvas_miterlimit.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_9_canvas_lineargradient.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_10_canvas_radialgradient.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/4_11_canvas_createpattern.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_1_canvas_savestate.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_2_canvas_translate.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_3_canvas_rotate.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/5_4_canvas_scale.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/6_1_canvas_composite.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/6_2_canvas_clipping.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/globalCompositeOperation.html$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/backdrop.png$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/bg_gallery.png$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_1.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_2.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_3.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_4.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_5.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_6.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_7.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/gallery_8.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/picture_frame.png$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/rhino.jpg$>], [<RegexURLPattern None (?i)^samples/canvas-tutorial/images/wallpaper.png$>], [<RegexURLPattern None (?i)^samples/domref/mozGetAsFile.html$>], [<RegexURLPattern None (?i)^samples/raycaster/input.js$>], [<RegexURLPattern None (?i)^samples/raycaster/Level.js$>], [<RegexURL...\n File \"redirect_urls/middleware.py\", line 14, in __call__\n resolver_match = self.resolver.resolve(request.path_info)\n File \"newrelic/hooks/framework_django.py\", line 600, in wrapper\n return _wrapped(*args, **kwargs)\n File \"newrelic/hooks/framework_django.py\", line 588, in _wrapped\n result = wrapped(path)\n File \"newrelic/hooks/framework_django.py\", line 575, in wrapper\n return wrapped(*args, **kwargs)\n File \"django/urls/resolvers.py\", line 394, in resolve\n raise Resolver404({'tried': tried, 'path': new_path})\n\nFieldError: Cannot resolve keyword 'tags' into field. Choices are: auth_token, bans, bans_issued, bio, created_attachment_revisions, created_revisions, created_toolbars, date_joined, discourse_url, documentattachment, documentdeletionlog, documentspam_reviewed, documentspamattempt, email, emailaddress, facebook_url, first_name, flag, fullname, github_url, groups, homepage, id, irc_nickname, is_active, is_github_url_public, is_newsletter_subscribed, is_staff, is_superuser, key, last_login, last_name, linkedin_url, locale, location, logentry, mozillians_url, organization, password, revisionakismetsubmission, socialaccount, stackoverflow_url, stripe_customer_id, timezone, title, twitter_url, user_permissions, username, watch, website_url\n(18 additional frame(s) were not displayed)\n...\n File \"django/db/models/sql/query.py\", line 1268, in _add_q\n child_clause, needed_inner = self._add_q(\n File \"django/db/models/sql/query.py\", line 1273, in _add_q\n child_clause, needed_inner = self.build_filter(\n File \"django/db/models/sql/query.py\", line 1154, in build_filter\n lookups, parts, reffed_expression = self.solve_lookup_type(arg)\n File \"django/db/models/sql/query.py\", line 1034, in solve_lookup_type\n _, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta())\n File \"django/db/models/sql/query.py\", line 1351, in names_to_path\n raise FieldError(\"Cannot resolve keyword '%s' into field. \"\n\nFieldError: Cannot resolve keyword 'tags' into field. Choices are: auth_token, bans, bans_issued, bio, created_attachment_revisions, created_revisions, created_toolbars, date_joined, discourse_url, documentattachment, documentdeletionlog, documentspam_reviewed, documentspamattempt, email, emailaddress, facebook_url, first_name, flag, fullname, github_url, groups, homepage, id, irc_nickname, is_active, is_github_url_public, is_newsletter_subscribed, is_staff, is_superuser, key, last_login, last_name, linkedin_url, locale, location, logentry, mozillians_url, organization, password, revisionakismetsubmission, socialaccount, stackoverflow_url, stripe_customer_id, timezone, title, twitter_url, user_permissions, username, watch, website_url\n```\n", "before_files": [{"content": "from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin as BaseUserAdmin\nfrom django.utils.html import format_html\n\nfrom kuma.core.urlresolvers import reverse\nfrom kuma.core.utils import urlparams\n\nfrom .models import User, UserBan\n\n\[email protected](UserBan)\nclass UserBanAdmin(admin.ModelAdmin):\n fields = (\"user\", \"by\", \"reason\", \"is_active\")\n list_display = (\"user\", \"by\", \"reason\", \"is_active\")\n list_editable = (\"is_active\",)\n list_filter = (\"is_active\",)\n raw_id_fields = (\"user\", \"by\")\n search_fields = (\"user__username\", \"reason\", \"by__username\")\n\n\[email protected](User)\nclass UserAdmin(BaseUserAdmin):\n \"\"\"\n Extends the admin view of users to show date_joined field\n add a filter on the field too\n \"\"\"\n\n list_display = (\n \"username\",\n \"fullname\",\n \"email\",\n \"revisions\",\n \"date_joined\",\n \"is_staff\",\n \"is_active\",\n )\n list_filter = (\"is_staff\", \"is_superuser\", \"is_active\", \"date_joined\", \"groups\")\n ordering = (\"-date_joined\",)\n search_fields = (\n \"username\",\n \"title\",\n \"fullname\",\n \"organization\",\n \"location\",\n \"email\",\n \"tags__name\",\n )\n\n def revisions(self, obj):\n \"\"\"HTML link to user's revisions with count\"\"\"\n link = urlparams(reverse(\"dashboards.revisions\"), user=obj.username)\n count = obj.created_revisions.count()\n return format_html('<a href=\"{}\"><strong>{}</strong></a>', link, count)\n", "path": "kuma/users/admin.py"}], "after_files": [{"content": "from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin as BaseUserAdmin\nfrom django.utils.html import format_html\n\nfrom kuma.core.urlresolvers import reverse\nfrom kuma.core.utils import urlparams\n\nfrom .models import User, UserBan\n\n\[email protected](UserBan)\nclass UserBanAdmin(admin.ModelAdmin):\n fields = (\"user\", \"by\", \"reason\", \"is_active\")\n list_display = (\"user\", \"by\", \"reason\", \"is_active\")\n list_editable = (\"is_active\",)\n list_filter = (\"is_active\",)\n raw_id_fields = (\"user\", \"by\")\n search_fields = (\"user__username\", \"reason\", \"by__username\")\n\n\[email protected](User)\nclass UserAdmin(BaseUserAdmin):\n \"\"\"\n Extends the admin view of users to show date_joined field\n add a filter on the field too\n \"\"\"\n\n list_display = (\n \"username\",\n \"fullname\",\n \"email\",\n \"revisions\",\n \"date_joined\",\n \"is_staff\",\n \"is_active\",\n )\n list_filter = (\"is_staff\", \"is_superuser\", \"is_active\", \"date_joined\", \"groups\")\n ordering = (\"-date_joined\",)\n search_fields = (\n \"username\",\n \"title\",\n \"fullname\",\n \"organization\",\n \"location\",\n \"email\",\n )\n\n def revisions(self, obj):\n \"\"\"HTML link to user's revisions with count\"\"\"\n link = urlparams(reverse(\"dashboards.revisions\"), user=obj.username)\n count = obj.created_revisions.count()\n return format_html('<a href=\"{}\"><strong>{}</strong></a>', link, count)\n", "path": "kuma/users/admin.py"}]} | 2,709 | 73 |
gh_patches_debug_6806 | rasdani/github-patches | git_diff | Pylons__pyramid-2674 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
reify docstring doesn't render properly
The testsetup section is omitted from the rendered docs at http://pyramid.readthedocs.io/en/latest/api/decorator.html#pyramid.decorator.reify
Unfortunately this section is essential for understanding the example.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyramid/decorator.py`
Content:
```
1 from functools import update_wrapper
2
3
4 class reify(object):
5 """ Use as a class method decorator. It operates almost exactly like the
6 Python ``@property`` decorator, but it puts the result of the method it
7 decorates into the instance dict after the first call, effectively
8 replacing the function it decorates with an instance variable. It is, in
9 Python parlance, a non-data descriptor. An example:
10
11 .. testsetup::
12
13 from pyramid.decorator import reify
14
15 class Foo(object):
16 @reify
17 def jammy(self):
18 print('jammy called')
19 return 1
20
21 And usage of Foo:
22
23 .. doctest::
24
25 >>> f = Foo()
26 >>> v = f.jammy
27 jammy called
28 >>> print(v)
29 1
30 >>> f.jammy
31 1
32 >>> # jammy func not called the second time; it replaced itself with 1
33 >>> # Note: reassignment is possible
34 >>> f.jammy = 2
35 >>> f.jammy
36 2
37 """
38 def __init__(self, wrapped):
39 self.wrapped = wrapped
40 update_wrapper(self, wrapped)
41
42 def __get__(self, inst, objtype=None):
43 if inst is None:
44 return self
45 val = self.wrapped(inst)
46 setattr(inst, self.wrapped.__name__, val)
47 return val
48
49
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pyramid/decorator.py b/pyramid/decorator.py
--- a/pyramid/decorator.py
+++ b/pyramid/decorator.py
@@ -8,6 +8,16 @@
replacing the function it decorates with an instance variable. It is, in
Python parlance, a non-data descriptor. An example:
+ .. code-block:: python
+
+ from pyramid.decorator import reify
+
+ class Foo(object):
+ @reify
+ def jammy(self):
+ print('jammy called')
+ return 1
+
.. testsetup::
from pyramid.decorator import reify
| {"golden_diff": "diff --git a/pyramid/decorator.py b/pyramid/decorator.py\n--- a/pyramid/decorator.py\n+++ b/pyramid/decorator.py\n@@ -8,6 +8,16 @@\n replacing the function it decorates with an instance variable. It is, in\n Python parlance, a non-data descriptor. An example:\n \n+ .. code-block:: python\n+\n+ from pyramid.decorator import reify\n+\n+ class Foo(object):\n+ @reify\n+ def jammy(self):\n+ print('jammy called')\n+ return 1\n+\n .. testsetup::\n \n from pyramid.decorator import reify\n", "issue": "reify docstring doesn't render properly\nThe testsetup section is omitted from the rendered docs at http://pyramid.readthedocs.io/en/latest/api/decorator.html#pyramid.decorator.reify\n\nUnfortunately this section is essential for understanding the example.\n\n", "before_files": [{"content": "from functools import update_wrapper\n\n\nclass reify(object):\n \"\"\" Use as a class method decorator. It operates almost exactly like the\n Python ``@property`` decorator, but it puts the result of the method it\n decorates into the instance dict after the first call, effectively\n replacing the function it decorates with an instance variable. It is, in\n Python parlance, a non-data descriptor. An example:\n\n .. testsetup::\n\n from pyramid.decorator import reify\n\n class Foo(object):\n @reify\n def jammy(self):\n print('jammy called')\n return 1\n\n And usage of Foo:\n\n .. doctest::\n\n >>> f = Foo()\n >>> v = f.jammy\n jammy called\n >>> print(v)\n 1\n >>> f.jammy\n 1\n >>> # jammy func not called the second time; it replaced itself with 1\n >>> # Note: reassignment is possible\n >>> f.jammy = 2\n >>> f.jammy\n 2\n \"\"\"\n def __init__(self, wrapped):\n self.wrapped = wrapped\n update_wrapper(self, wrapped)\n\n def __get__(self, inst, objtype=None):\n if inst is None:\n return self\n val = self.wrapped(inst)\n setattr(inst, self.wrapped.__name__, val)\n return val\n\n", "path": "pyramid/decorator.py"}], "after_files": [{"content": "from functools import update_wrapper\n\n\nclass reify(object):\n \"\"\" Use as a class method decorator. It operates almost exactly like the\n Python ``@property`` decorator, but it puts the result of the method it\n decorates into the instance dict after the first call, effectively\n replacing the function it decorates with an instance variable. It is, in\n Python parlance, a non-data descriptor. An example:\n\n .. code-block:: python\n\n from pyramid.decorator import reify\n\n class Foo(object):\n @reify\n def jammy(self):\n print('jammy called')\n return 1\n\n .. testsetup::\n\n from pyramid.decorator import reify\n\n class Foo(object):\n @reify\n def jammy(self):\n print('jammy called')\n return 1\n\n And usage of Foo:\n\n .. doctest::\n\n >>> f = Foo()\n >>> v = f.jammy\n jammy called\n >>> print(v)\n 1\n >>> f.jammy\n 1\n >>> # jammy func not called the second time; it replaced itself with 1\n >>> # Note: reassignment is possible\n >>> f.jammy = 2\n >>> f.jammy\n 2\n \"\"\"\n def __init__(self, wrapped):\n self.wrapped = wrapped\n update_wrapper(self, wrapped)\n\n def __get__(self, inst, objtype=None):\n if inst is None:\n return self\n val = self.wrapped(inst)\n setattr(inst, self.wrapped.__name__, val)\n return val\n\n", "path": "pyramid/decorator.py"}]} | 719 | 149 |
gh_patches_debug_5736 | rasdani/github-patches | git_diff | google__fuzzbench-630 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make issues: debug is broken, run is impossible to control-C out of
make debug- is broken. `make debug-libfuzzer-zlib_zlib_uncompress_fuzzer` fails with this error:
```
docker run \
--cpus=1 \
--cap-add SYS_NICE \
--cap-add SYS_PTRACE \
-e FUZZ_OUTSIDE_EXPERIMENT=1 \
-e FORCE_LOCAL=1 \
-e TRIAL_ID=1 \
-e FUZZER=libfuzzer \
-e BENCHMARK=zlib_zlib_uncompress_fuzzer \
-e FUZZ_TARGET=zlib_uncompress_fuzzer \
-entrypoint "/bin/bash" \
-it gcr.io/fuzzbench/runners/libfuzzer/zlib_zlib_uncompress_fuzzer
docker: invalid reference format.
See 'docker run --help'.
make: *** [docker/generated.mk:26568: debug-libfuzzer-zlib_zlib_uncompress_fuzzer] Error 125
```
make `run-libfuzzer-zlib_zlib_uncompress_fuzzer` runs forever and cannot be Ctrl-Ced out of.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/generate_makefile.py`
Content:
```
1 # Copyright 2020 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 """Simple generator for local Makefile rules."""
15
16 import os
17
18 from common import yaml_utils
19 from common import benchmark_utils
20 from common import fuzzer_utils
21 from experiment.build import docker_images
22
23 BASE_TAG = "gcr.io/fuzzbench"
24 BENCHMARK_DIR = benchmark_utils.BENCHMARKS_DIR
25
26
27 def _print_benchmark_fuzz_target(benchmarks):
28 """Prints benchmark variables from benchmark.yaml files."""
29 for benchmark in benchmarks:
30 benchmark_vars = yaml_utils.read(
31 os.path.join(BENCHMARK_DIR, benchmark, 'benchmark.yaml'))
32 print(benchmark + '-fuzz-target=' + benchmark_vars['fuzz_target'])
33 print()
34
35
36 def _print_makefile_run_template(image):
37 fuzzer, benchmark = image['tag'].split('/')[1:]
38
39 for run_type in ('run', 'debug', 'test-run'):
40 print(('{run_type}-{fuzzer}-{benchmark}: ' +
41 '.{fuzzer}-{benchmark}-runner').format(run_type=run_type,
42 benchmark=benchmark,
43 fuzzer=fuzzer))
44
45 print('\
46 \tdocker run \\\n\
47 \t--cpus=1 \\\n\
48 \t--cap-add SYS_NICE \\\n\
49 \t--cap-add SYS_PTRACE \\\n\
50 \t-e FUZZ_OUTSIDE_EXPERIMENT=1 \\\n\
51 \t-e FORCE_LOCAL=1 \\\n\
52 \t-e TRIAL_ID=1 \\\n\
53 \t-e FUZZER={fuzzer} \\\n\
54 \t-e BENCHMARK={benchmark} \\\n\
55 \t-e FUZZ_TARGET=$({benchmark}-fuzz-target) \\\
56 '.format(fuzzer=fuzzer, benchmark=benchmark))
57
58 if run_type == 'test-run':
59 print('\t-e MAX_TOTAL_TIME=20 \\\n\t-e SNAPSHOT_PERIOD=10 \\')
60 if run_type == 'debug':
61 print('\t--entrypoint "/bin/bash" \\\n\t-it ', end='')
62 else:
63 print('\t', end='')
64
65 print(os.path.join(BASE_TAG, image['tag']))
66 print()
67
68
69 # TODO(tanq16): Add unit test.
70 def _print_rules_for_image(name, image):
71 """Print makefile section for given image to stdout."""
72 if not ('base' in name or 'dispatcher' in name):
73 print('.', end='')
74 print(name + ':', end='')
75 if 'depends_on' in image:
76 for dep in image['depends_on']:
77 if 'base' in dep:
78 print(' ' + dep, end='')
79 else:
80 print(' .' + dep, end='')
81 print()
82 print('\tdocker build \\')
83 print('\t--tag ' + os.path.join(BASE_TAG, image['tag']) + ' \\')
84 print('\t--build-arg BUILDKIT_INLINE_CACHE=1 \\')
85 print('\t--cache-from ' + os.path.join(BASE_TAG, image['tag']) + ' \\')
86 if 'build_arg' in image:
87 for arg in image['build_arg']:
88 print('\t--build-arg ' + arg + ' \\')
89 if 'dockerfile' in image:
90 print('\t--file ' + image['dockerfile'] + ' \\')
91 print('\t' + image['context'])
92 print()
93
94 # Print run, debug, test-run rules if image is a runner.
95 if 'runner' in name and not ('intermediate' in name or 'base' in name):
96 _print_makefile_run_template(image)
97
98
99 def main():
100 """Generates Makefile with docker image build rules."""
101 fuzzers = fuzzer_utils.get_fuzzer_names()
102 benchmarks = benchmark_utils.get_all_benchmarks()
103 buildable_images = docker_images.get_images_to_build(fuzzers, benchmarks)
104
105 print('export DOCKER_BUILDKIT := 1')
106
107 # Print oss-fuzz benchmarks property variables.
108 _print_benchmark_fuzz_target(benchmarks)
109
110 for name, image in buildable_images.items():
111 _print_rules_for_image(name, image)
112
113 # Print build targets for all fuzzer-benchmark pairs (including coverage).
114 fuzzers.append('coverage')
115 for fuzzer in fuzzers:
116 image_type = "runner"
117 if 'coverage' in fuzzer:
118 image_type = "builder"
119 for benchmark in benchmarks:
120 print(('build-{fuzzer}-{benchmark}: ' +
121 '.{fuzzer}-{benchmark}-{image_type}\n').format(
122 fuzzer=fuzzer,
123 benchmark=benchmark,
124 image_type=image_type))
125 print()
126
127 # Print fuzzer-all benchmarks build targets.
128 for fuzzer in fuzzers:
129 all_build_targets = ' '.join([
130 'build-{0}-{1}'.format(fuzzer, benchmark)
131 for benchmark in benchmarks
132 ])
133 print('build-{fuzzer}-all: {all_targets}'.format(
134 fuzzer=fuzzer, all_targets=all_build_targets))
135
136 # Print all targets build target.
137 all_build_targets = ' '.join(
138 ['build-{0}-all'.format(name) for name in fuzzers])
139 print('build-all: {all_targets}'.format(all_targets=all_build_targets))
140
141
142 if __name__ == '__main__':
143 main()
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docker/generate_makefile.py b/docker/generate_makefile.py
--- a/docker/generate_makefile.py
+++ b/docker/generate_makefile.py
@@ -59,6 +59,8 @@
print('\t-e MAX_TOTAL_TIME=20 \\\n\t-e SNAPSHOT_PERIOD=10 \\')
if run_type == 'debug':
print('\t--entrypoint "/bin/bash" \\\n\t-it ', end='')
+ elif run_type == 'run':
+ print('\t-it ', end='')
else:
print('\t', end='')
| {"golden_diff": "diff --git a/docker/generate_makefile.py b/docker/generate_makefile.py\n--- a/docker/generate_makefile.py\n+++ b/docker/generate_makefile.py\n@@ -59,6 +59,8 @@\n print('\\t-e MAX_TOTAL_TIME=20 \\\\\\n\\t-e SNAPSHOT_PERIOD=10 \\\\')\n if run_type == 'debug':\n print('\\t--entrypoint \"/bin/bash\" \\\\\\n\\t-it ', end='')\n+ elif run_type == 'run':\n+ print('\\t-it ', end='')\n else:\n print('\\t', end='')\n", "issue": "Make issues: debug is broken, run is impossible to control-C out of\nmake debug- is broken. `make debug-libfuzzer-zlib_zlib_uncompress_fuzzer` fails with this error:\r\n```\r\ndocker run \\ \r\n--cpus=1 \\ \r\n--cap-add SYS_NICE \\ \r\n--cap-add SYS_PTRACE \\ \r\n-e FUZZ_OUTSIDE_EXPERIMENT=1 \\ \r\n-e FORCE_LOCAL=1 \\ \r\n-e TRIAL_ID=1 \\ \r\n-e FUZZER=libfuzzer \\ \r\n-e BENCHMARK=zlib_zlib_uncompress_fuzzer \\ \r\n-e FUZZ_TARGET=zlib_uncompress_fuzzer \\ \r\n-entrypoint \"/bin/bash\" \\ \r\n-it gcr.io/fuzzbench/runners/libfuzzer/zlib_zlib_uncompress_fuzzer \r\ndocker: invalid reference format. \r\nSee 'docker run --help'. \r\nmake: *** [docker/generated.mk:26568: debug-libfuzzer-zlib_zlib_uncompress_fuzzer] Error 125\r\n```\r\n\r\nmake `run-libfuzzer-zlib_zlib_uncompress_fuzzer` runs forever and cannot be Ctrl-Ced out of. \n", "before_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Simple generator for local Makefile rules.\"\"\"\n\nimport os\n\nfrom common import yaml_utils\nfrom common import benchmark_utils\nfrom common import fuzzer_utils\nfrom experiment.build import docker_images\n\nBASE_TAG = \"gcr.io/fuzzbench\"\nBENCHMARK_DIR = benchmark_utils.BENCHMARKS_DIR\n\n\ndef _print_benchmark_fuzz_target(benchmarks):\n \"\"\"Prints benchmark variables from benchmark.yaml files.\"\"\"\n for benchmark in benchmarks:\n benchmark_vars = yaml_utils.read(\n os.path.join(BENCHMARK_DIR, benchmark, 'benchmark.yaml'))\n print(benchmark + '-fuzz-target=' + benchmark_vars['fuzz_target'])\n print()\n\n\ndef _print_makefile_run_template(image):\n fuzzer, benchmark = image['tag'].split('/')[1:]\n\n for run_type in ('run', 'debug', 'test-run'):\n print(('{run_type}-{fuzzer}-{benchmark}: ' +\n '.{fuzzer}-{benchmark}-runner').format(run_type=run_type,\n benchmark=benchmark,\n fuzzer=fuzzer))\n\n print('\\\n\\tdocker run \\\\\\n\\\n\\t--cpus=1 \\\\\\n\\\n\\t--cap-add SYS_NICE \\\\\\n\\\n\\t--cap-add SYS_PTRACE \\\\\\n\\\n\\t-e FUZZ_OUTSIDE_EXPERIMENT=1 \\\\\\n\\\n\\t-e FORCE_LOCAL=1 \\\\\\n\\\n\\t-e TRIAL_ID=1 \\\\\\n\\\n\\t-e FUZZER={fuzzer} \\\\\\n\\\n\\t-e BENCHMARK={benchmark} \\\\\\n\\\n\\t-e FUZZ_TARGET=$({benchmark}-fuzz-target) \\\\\\\n'.format(fuzzer=fuzzer, benchmark=benchmark))\n\n if run_type == 'test-run':\n print('\\t-e MAX_TOTAL_TIME=20 \\\\\\n\\t-e SNAPSHOT_PERIOD=10 \\\\')\n if run_type == 'debug':\n print('\\t--entrypoint \"/bin/bash\" \\\\\\n\\t-it ', end='')\n else:\n print('\\t', end='')\n\n print(os.path.join(BASE_TAG, image['tag']))\n print()\n\n\n# TODO(tanq16): Add unit test.\ndef _print_rules_for_image(name, image):\n \"\"\"Print makefile section for given image to stdout.\"\"\"\n if not ('base' in name or 'dispatcher' in name):\n print('.', end='')\n print(name + ':', end='')\n if 'depends_on' in image:\n for dep in image['depends_on']:\n if 'base' in dep:\n print(' ' + dep, end='')\n else:\n print(' .' + dep, end='')\n print()\n print('\\tdocker build \\\\')\n print('\\t--tag ' + os.path.join(BASE_TAG, image['tag']) + ' \\\\')\n print('\\t--build-arg BUILDKIT_INLINE_CACHE=1 \\\\')\n print('\\t--cache-from ' + os.path.join(BASE_TAG, image['tag']) + ' \\\\')\n if 'build_arg' in image:\n for arg in image['build_arg']:\n print('\\t--build-arg ' + arg + ' \\\\')\n if 'dockerfile' in image:\n print('\\t--file ' + image['dockerfile'] + ' \\\\')\n print('\\t' + image['context'])\n print()\n\n # Print run, debug, test-run rules if image is a runner.\n if 'runner' in name and not ('intermediate' in name or 'base' in name):\n _print_makefile_run_template(image)\n\n\ndef main():\n \"\"\"Generates Makefile with docker image build rules.\"\"\"\n fuzzers = fuzzer_utils.get_fuzzer_names()\n benchmarks = benchmark_utils.get_all_benchmarks()\n buildable_images = docker_images.get_images_to_build(fuzzers, benchmarks)\n\n print('export DOCKER_BUILDKIT := 1')\n\n # Print oss-fuzz benchmarks property variables.\n _print_benchmark_fuzz_target(benchmarks)\n\n for name, image in buildable_images.items():\n _print_rules_for_image(name, image)\n\n # Print build targets for all fuzzer-benchmark pairs (including coverage).\n fuzzers.append('coverage')\n for fuzzer in fuzzers:\n image_type = \"runner\"\n if 'coverage' in fuzzer:\n image_type = \"builder\"\n for benchmark in benchmarks:\n print(('build-{fuzzer}-{benchmark}: ' +\n '.{fuzzer}-{benchmark}-{image_type}\\n').format(\n fuzzer=fuzzer,\n benchmark=benchmark,\n image_type=image_type))\n print()\n\n # Print fuzzer-all benchmarks build targets.\n for fuzzer in fuzzers:\n all_build_targets = ' '.join([\n 'build-{0}-{1}'.format(fuzzer, benchmark)\n for benchmark in benchmarks\n ])\n print('build-{fuzzer}-all: {all_targets}'.format(\n fuzzer=fuzzer, all_targets=all_build_targets))\n\n # Print all targets build target.\n all_build_targets = ' '.join(\n ['build-{0}-all'.format(name) for name in fuzzers])\n print('build-all: {all_targets}'.format(all_targets=all_build_targets))\n\n\nif __name__ == '__main__':\n main()\n", "path": "docker/generate_makefile.py"}], "after_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Simple generator for local Makefile rules.\"\"\"\n\nimport os\n\nfrom common import yaml_utils\nfrom common import benchmark_utils\nfrom common import fuzzer_utils\nfrom experiment.build import docker_images\n\nBASE_TAG = \"gcr.io/fuzzbench\"\nBENCHMARK_DIR = benchmark_utils.BENCHMARKS_DIR\n\n\ndef _print_benchmark_fuzz_target(benchmarks):\n \"\"\"Prints benchmark variables from benchmark.yaml files.\"\"\"\n for benchmark in benchmarks:\n benchmark_vars = yaml_utils.read(\n os.path.join(BENCHMARK_DIR, benchmark, 'benchmark.yaml'))\n print(benchmark + '-fuzz-target=' + benchmark_vars['fuzz_target'])\n print()\n\n\ndef _print_makefile_run_template(image):\n fuzzer, benchmark = image['tag'].split('/')[1:]\n\n for run_type in ('run', 'debug', 'test-run'):\n print(('{run_type}-{fuzzer}-{benchmark}: ' +\n '.{fuzzer}-{benchmark}-runner').format(run_type=run_type,\n benchmark=benchmark,\n fuzzer=fuzzer))\n\n print('\\\n\\tdocker run \\\\\\n\\\n\\t--cpus=1 \\\\\\n\\\n\\t--cap-add SYS_NICE \\\\\\n\\\n\\t--cap-add SYS_PTRACE \\\\\\n\\\n\\t-e FUZZ_OUTSIDE_EXPERIMENT=1 \\\\\\n\\\n\\t-e FORCE_LOCAL=1 \\\\\\n\\\n\\t-e TRIAL_ID=1 \\\\\\n\\\n\\t-e FUZZER={fuzzer} \\\\\\n\\\n\\t-e BENCHMARK={benchmark} \\\\\\n\\\n\\t-e FUZZ_TARGET=$({benchmark}-fuzz-target) \\\\\\\n'.format(fuzzer=fuzzer, benchmark=benchmark))\n\n if run_type == 'test-run':\n print('\\t-e MAX_TOTAL_TIME=20 \\\\\\n\\t-e SNAPSHOT_PERIOD=10 \\\\')\n if run_type == 'debug':\n print('\\t--entrypoint \"/bin/bash\" \\\\\\n\\t-it ', end='')\n elif run_type == 'run':\n print('\\t-it ', end='')\n else:\n print('\\t', end='')\n\n print(os.path.join(BASE_TAG, image['tag']))\n print()\n\n\n# TODO(tanq16): Add unit test.\ndef _print_rules_for_image(name, image):\n \"\"\"Print makefile section for given image to stdout.\"\"\"\n if not ('base' in name or 'dispatcher' in name):\n print('.', end='')\n print(name + ':', end='')\n if 'depends_on' in image:\n for dep in image['depends_on']:\n if 'base' in dep:\n print(' ' + dep, end='')\n else:\n print(' .' + dep, end='')\n print()\n print('\\tdocker build \\\\')\n print('\\t--tag ' + os.path.join(BASE_TAG, image['tag']) + ' \\\\')\n print('\\t--build-arg BUILDKIT_INLINE_CACHE=1 \\\\')\n print('\\t--cache-from ' + os.path.join(BASE_TAG, image['tag']) + ' \\\\')\n if 'build_arg' in image:\n for arg in image['build_arg']:\n print('\\t--build-arg ' + arg + ' \\\\')\n if 'dockerfile' in image:\n print('\\t--file ' + image['dockerfile'] + ' \\\\')\n print('\\t' + image['context'])\n print()\n\n # Print run, debug, test-run rules if image is a runner.\n if 'runner' in name and not ('intermediate' in name or 'base' in name):\n _print_makefile_run_template(image)\n\n\ndef main():\n \"\"\"Generates Makefile with docker image build rules.\"\"\"\n fuzzers = fuzzer_utils.get_fuzzer_names()\n benchmarks = benchmark_utils.get_all_benchmarks()\n buildable_images = docker_images.get_images_to_build(fuzzers, benchmarks)\n\n print('export DOCKER_BUILDKIT := 1')\n\n # Print oss-fuzz benchmarks property variables.\n _print_benchmark_fuzz_target(benchmarks)\n\n for name, image in buildable_images.items():\n _print_rules_for_image(name, image)\n\n # Print build targets for all fuzzer-benchmark pairs (including coverage).\n fuzzers.append('coverage')\n for fuzzer in fuzzers:\n image_type = \"runner\"\n if 'coverage' in fuzzer:\n image_type = \"builder\"\n for benchmark in benchmarks:\n print(('build-{fuzzer}-{benchmark}: ' +\n '.{fuzzer}-{benchmark}-{image_type}\\n').format(\n fuzzer=fuzzer,\n benchmark=benchmark,\n image_type=image_type))\n print()\n\n # Print fuzzer-all benchmarks build targets.\n for fuzzer in fuzzers:\n all_build_targets = ' '.join([\n 'build-{0}-{1}'.format(fuzzer, benchmark)\n for benchmark in benchmarks\n ])\n print('build-{fuzzer}-all: {all_targets}'.format(\n fuzzer=fuzzer, all_targets=all_build_targets))\n\n # Print all targets build target.\n all_build_targets = ' '.join(\n ['build-{0}-all'.format(name) for name in fuzzers])\n print('build-all: {all_targets}'.format(all_targets=all_build_targets))\n\n\nif __name__ == '__main__':\n main()\n", "path": "docker/generate_makefile.py"}]} | 2,098 | 127 |
gh_patches_debug_2197 | rasdani/github-patches | git_diff | microsoft__torchgeo-1755 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SustainBenchCropYield download doesn't work
### Description
Downloading the SustainBenchCropYield dataset doesn't work as expected
### Steps to reproduce
```
ds = SustainBenchCropYield("data/", download=True)
```
This downloads a file called `soybeans` then fails unzipping `soybeans.zip`. Works if you rename to .zip and unzip manually.
### Version
0.6.0.dev0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchgeo/datasets/sustainbench_crop_yield.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 """SustainBench Crop Yield dataset."""
5
6 import os
7 from typing import Any, Callable, Optional
8
9 import matplotlib.pyplot as plt
10 import numpy as np
11 import torch
12 from matplotlib.figure import Figure
13 from torch import Tensor
14
15 from .geo import NonGeoDataset
16 from .utils import DatasetNotFoundError, download_url, extract_archive
17
18
19 class SustainBenchCropYield(NonGeoDataset):
20 """SustainBench Crop Yield Dataset.
21
22 This dataset contains MODIS band histograms and soybean yield
23 estimates for selected counties in the USA, Argentina and Brazil.
24 The dataset is part of the
25 `SustainBench <https://sustainlab-group.github.io/sustainbench/docs/datasets/sdg2/crop_yield.html>`_
26 datasets for tackling the UN Sustainable Development Goals (SDGs).
27
28 Dataset Format:
29
30 * .npz files of stacked samples
31
32 Dataset Features:
33
34 * input histogram of 7 surface reflectance and 2 surface temperature
35 bands from MODIS pixel values in 32 ranges across 32 timesteps
36 resulting in 32x32x9 input images
37 * regression target value of soybean yield in metric tonnes per
38 harvested hectare
39
40 If you use this dataset in your research, please cite:
41
42 * https://doi.org/10.1145/3209811.3212707
43 * https://doi.org/10.1609/aaai.v31i1.11172
44
45 .. versionadded:: 0.5
46 """ # noqa: E501
47
48 valid_countries = ["usa", "brazil", "argentina"]
49
50 md5 = "c2794e59512c897d9bea77b112848122"
51
52 url = "https://drive.google.com/file/d/1odwkI1hiE5rMZ4VfM0hOXzlFR4NbhrfU/view?usp=share_link" # noqa: E501
53
54 dir = "soybeans"
55
56 valid_splits = ["train", "dev", "test"]
57
58 def __init__(
59 self,
60 root: str = "data",
61 split: str = "train",
62 countries: list[str] = ["usa"],
63 transforms: Optional[Callable[[dict[str, Any]], dict[str, Any]]] = None,
64 download: bool = False,
65 checksum: bool = False,
66 ) -> None:
67 """Initialize a new Dataset instance.
68
69 Args:
70 root: root directory where dataset can be found
71 split: one of "train", "dev", or "test"
72 countries: which countries to include in the dataset
73 transforms: a function/transform that takes an input sample
74 and returns a transformed version
75 download: if True, download dataset and store it in the root directory
76 checksum: if True, check the MD5 after downloading files (may be slow)
77
78 Raises:
79 AssertionError: if ``countries`` contains invalid countries or if ``split``
80 is invalid
81 DatasetNotFoundError: If dataset is not found and *download* is False.
82 """
83 assert set(countries).issubset(
84 self.valid_countries
85 ), f"Please choose a subset of these valid countried: {self.valid_countries}."
86 self.countries = countries
87
88 assert (
89 split in self.valid_splits
90 ), f"Pleas choose one of these valid data splits {self.valid_splits}."
91 self.split = split
92
93 self.root = root
94 self.transforms = transforms
95 self.download = download
96 self.checksum = checksum
97
98 self._verify()
99 self.collection = self.retrieve_collection()
100
101 def __len__(self) -> int:
102 """Return the number of data points in the dataset.
103
104 Returns:
105 length of the dataset
106 """
107 return len(self.collection)
108
109 def __getitem__(self, index: int) -> dict[str, Tensor]:
110 """Return an index within the dataset.
111
112 Args:
113 index: index to return
114
115 Returns:
116 data and label at that index
117 """
118 input_file_path, sample_idx = self.collection[index]
119
120 sample: dict[str, Tensor] = {
121 "image": self._load_image(input_file_path, sample_idx)
122 }
123 sample.update(self._load_features(input_file_path, sample_idx))
124
125 if self.transforms is not None:
126 sample = self.transforms(sample)
127
128 return sample
129
130 def _load_image(self, path: str, sample_idx: int) -> Tensor:
131 """Load input image.
132
133 Args:
134 path: path to input npz collection
135 sample_idx: what sample to index from the npz collection
136
137 Returns:
138 input image as tensor
139 """
140 arr = np.load(path)["data"][sample_idx]
141 # return [channel, height, width]
142 return torch.from_numpy(arr).permute(2, 0, 1).to(torch.float32)
143
144 def _load_features(self, path: str, sample_idx: int) -> dict[str, Tensor]:
145 """Load features value.
146
147 Args:
148 path: path to image npz collection
149 sample_idx: what sample to index from the npz collection
150
151 Returns:
152 target regression value
153 """
154 target_file_path = path.replace("_hists", "_yields")
155 target = np.load(target_file_path)["data"][sample_idx]
156
157 years_file_path = path.replace("_hists", "_years")
158 year = int(np.load(years_file_path)["data"][sample_idx])
159
160 ndvi_file_path = path.replace("_hists", "_ndvi")
161 ndvi = np.load(ndvi_file_path)["data"][sample_idx]
162
163 features = {
164 "label": torch.tensor(target).to(torch.float32),
165 "year": torch.tensor(year),
166 "ndvi": torch.from_numpy(ndvi).to(dtype=torch.float32),
167 }
168 return features
169
170 def retrieve_collection(self) -> list[tuple[str, int]]:
171 """Retrieve the collection.
172
173 Returns:
174 path and index to dataset samples
175 """
176 collection = []
177 for country in self.countries:
178 file_path = os.path.join(
179 self.root, self.dir, country, f"{self.split}_hists.npz"
180 )
181 npz_file = np.load(file_path)
182 num_data_points = npz_file["data"].shape[0]
183 for idx in range(num_data_points):
184 collection.append((file_path, idx))
185
186 return collection
187
188 def _verify(self) -> None:
189 """Verify the integrity of the dataset."""
190 # Check if the extracted files already exist
191 pathname = os.path.join(self.root, self.dir)
192 if os.path.exists(pathname):
193 return
194
195 # Check if the zip files have already been downloaded
196 pathname = os.path.join(self.root, self.dir) + ".zip"
197 if os.path.exists(pathname):
198 self._extract()
199 return
200
201 # Check if the user requested to download the dataset
202 if not self.download:
203 raise DatasetNotFoundError(self)
204
205 # Download the dataset
206 self._download()
207 self._extract()
208
209 def _download(self) -> None:
210 """Download the dataset and extract it."""
211 download_url(
212 self.url,
213 self.root,
214 filename=self.dir,
215 md5=self.md5 if self.checksum else None,
216 )
217 self._extract()
218
219 def _extract(self) -> None:
220 """Extract the dataset."""
221 zipfile_path = os.path.join(self.root, self.dir) + ".zip"
222 extract_archive(zipfile_path, self.root)
223
224 def plot(
225 self,
226 sample: dict[str, Any],
227 band_idx: int = 0,
228 show_titles: bool = True,
229 suptitle: Optional[str] = None,
230 ) -> Figure:
231 """Plot a sample from the dataset.
232
233 Args:
234 sample: a sample return by :meth:`__getitem__`
235 band_idx: which of the nine histograms to index
236 show_titles: flag indicating whether to show titles above each panel
237 suptitle: optional suptitle to use for figure
238
239 Returns:
240 a matplotlib Figure with the rendered sample
241
242 """
243 image, label = sample["image"], sample["label"].item()
244
245 showing_predictions = "prediction" in sample
246 if showing_predictions:
247 prediction = sample["prediction"].item()
248
249 fig, ax = plt.subplots(1, 1, figsize=(10, 10))
250
251 ax.imshow(image.permute(1, 2, 0)[:, :, band_idx])
252 ax.axis("off")
253
254 if show_titles:
255 title = f"Label: {label:.3f}"
256 if showing_predictions:
257 title += f"\nPrediction: {prediction:.3f}"
258 ax.set_title(title)
259
260 if suptitle is not None:
261 plt.suptitle(suptitle)
262
263 return fig
264
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torchgeo/datasets/sustainbench_crop_yield.py b/torchgeo/datasets/sustainbench_crop_yield.py
--- a/torchgeo/datasets/sustainbench_crop_yield.py
+++ b/torchgeo/datasets/sustainbench_crop_yield.py
@@ -211,7 +211,7 @@
download_url(
self.url,
self.root,
- filename=self.dir,
+ filename=self.dir + ".zip",
md5=self.md5 if self.checksum else None,
)
self._extract()
| {"golden_diff": "diff --git a/torchgeo/datasets/sustainbench_crop_yield.py b/torchgeo/datasets/sustainbench_crop_yield.py\n--- a/torchgeo/datasets/sustainbench_crop_yield.py\n+++ b/torchgeo/datasets/sustainbench_crop_yield.py\n@@ -211,7 +211,7 @@\n download_url(\n self.url,\n self.root,\n- filename=self.dir,\n+ filename=self.dir + \".zip\",\n md5=self.md5 if self.checksum else None,\n )\n self._extract()\n", "issue": "SustainBenchCropYield download doesn't work\n### Description\n\nDownloading the SustainBenchCropYield dataset doesn't work as expected\n\n### Steps to reproduce\n\n```\r\nds = SustainBenchCropYield(\"data/\", download=True)\r\n```\r\n\r\nThis downloads a file called `soybeans` then fails unzipping `soybeans.zip`. Works if you rename to .zip and unzip manually.\n\n### Version\n\n0.6.0.dev0\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"SustainBench Crop Yield dataset.\"\"\"\n\nimport os\nfrom typing import Any, Callable, Optional\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nfrom matplotlib.figure import Figure\nfrom torch import Tensor\n\nfrom .geo import NonGeoDataset\nfrom .utils import DatasetNotFoundError, download_url, extract_archive\n\n\nclass SustainBenchCropYield(NonGeoDataset):\n \"\"\"SustainBench Crop Yield Dataset.\n\n This dataset contains MODIS band histograms and soybean yield\n estimates for selected counties in the USA, Argentina and Brazil.\n The dataset is part of the\n `SustainBench <https://sustainlab-group.github.io/sustainbench/docs/datasets/sdg2/crop_yield.html>`_\n datasets for tackling the UN Sustainable Development Goals (SDGs).\n\n Dataset Format:\n\n * .npz files of stacked samples\n\n Dataset Features:\n\n * input histogram of 7 surface reflectance and 2 surface temperature\n bands from MODIS pixel values in 32 ranges across 32 timesteps\n resulting in 32x32x9 input images\n * regression target value of soybean yield in metric tonnes per\n harvested hectare\n\n If you use this dataset in your research, please cite:\n\n * https://doi.org/10.1145/3209811.3212707\n * https://doi.org/10.1609/aaai.v31i1.11172\n\n .. versionadded:: 0.5\n \"\"\" # noqa: E501\n\n valid_countries = [\"usa\", \"brazil\", \"argentina\"]\n\n md5 = \"c2794e59512c897d9bea77b112848122\"\n\n url = \"https://drive.google.com/file/d/1odwkI1hiE5rMZ4VfM0hOXzlFR4NbhrfU/view?usp=share_link\" # noqa: E501\n\n dir = \"soybeans\"\n\n valid_splits = [\"train\", \"dev\", \"test\"]\n\n def __init__(\n self,\n root: str = \"data\",\n split: str = \"train\",\n countries: list[str] = [\"usa\"],\n transforms: Optional[Callable[[dict[str, Any]], dict[str, Any]]] = None,\n download: bool = False,\n checksum: bool = False,\n ) -> None:\n \"\"\"Initialize a new Dataset instance.\n\n Args:\n root: root directory where dataset can be found\n split: one of \"train\", \"dev\", or \"test\"\n countries: which countries to include in the dataset\n transforms: a function/transform that takes an input sample\n and returns a transformed version\n download: if True, download dataset and store it in the root directory\n checksum: if True, check the MD5 after downloading files (may be slow)\n\n Raises:\n AssertionError: if ``countries`` contains invalid countries or if ``split``\n is invalid\n DatasetNotFoundError: If dataset is not found and *download* is False.\n \"\"\"\n assert set(countries).issubset(\n self.valid_countries\n ), f\"Please choose a subset of these valid countried: {self.valid_countries}.\"\n self.countries = countries\n\n assert (\n split in self.valid_splits\n ), f\"Pleas choose one of these valid data splits {self.valid_splits}.\"\n self.split = split\n\n self.root = root\n self.transforms = transforms\n self.download = download\n self.checksum = checksum\n\n self._verify()\n self.collection = self.retrieve_collection()\n\n def __len__(self) -> int:\n \"\"\"Return the number of data points in the dataset.\n\n Returns:\n length of the dataset\n \"\"\"\n return len(self.collection)\n\n def __getitem__(self, index: int) -> dict[str, Tensor]:\n \"\"\"Return an index within the dataset.\n\n Args:\n index: index to return\n\n Returns:\n data and label at that index\n \"\"\"\n input_file_path, sample_idx = self.collection[index]\n\n sample: dict[str, Tensor] = {\n \"image\": self._load_image(input_file_path, sample_idx)\n }\n sample.update(self._load_features(input_file_path, sample_idx))\n\n if self.transforms is not None:\n sample = self.transforms(sample)\n\n return sample\n\n def _load_image(self, path: str, sample_idx: int) -> Tensor:\n \"\"\"Load input image.\n\n Args:\n path: path to input npz collection\n sample_idx: what sample to index from the npz collection\n\n Returns:\n input image as tensor\n \"\"\"\n arr = np.load(path)[\"data\"][sample_idx]\n # return [channel, height, width]\n return torch.from_numpy(arr).permute(2, 0, 1).to(torch.float32)\n\n def _load_features(self, path: str, sample_idx: int) -> dict[str, Tensor]:\n \"\"\"Load features value.\n\n Args:\n path: path to image npz collection\n sample_idx: what sample to index from the npz collection\n\n Returns:\n target regression value\n \"\"\"\n target_file_path = path.replace(\"_hists\", \"_yields\")\n target = np.load(target_file_path)[\"data\"][sample_idx]\n\n years_file_path = path.replace(\"_hists\", \"_years\")\n year = int(np.load(years_file_path)[\"data\"][sample_idx])\n\n ndvi_file_path = path.replace(\"_hists\", \"_ndvi\")\n ndvi = np.load(ndvi_file_path)[\"data\"][sample_idx]\n\n features = {\n \"label\": torch.tensor(target).to(torch.float32),\n \"year\": torch.tensor(year),\n \"ndvi\": torch.from_numpy(ndvi).to(dtype=torch.float32),\n }\n return features\n\n def retrieve_collection(self) -> list[tuple[str, int]]:\n \"\"\"Retrieve the collection.\n\n Returns:\n path and index to dataset samples\n \"\"\"\n collection = []\n for country in self.countries:\n file_path = os.path.join(\n self.root, self.dir, country, f\"{self.split}_hists.npz\"\n )\n npz_file = np.load(file_path)\n num_data_points = npz_file[\"data\"].shape[0]\n for idx in range(num_data_points):\n collection.append((file_path, idx))\n\n return collection\n\n def _verify(self) -> None:\n \"\"\"Verify the integrity of the dataset.\"\"\"\n # Check if the extracted files already exist\n pathname = os.path.join(self.root, self.dir)\n if os.path.exists(pathname):\n return\n\n # Check if the zip files have already been downloaded\n pathname = os.path.join(self.root, self.dir) + \".zip\"\n if os.path.exists(pathname):\n self._extract()\n return\n\n # Check if the user requested to download the dataset\n if not self.download:\n raise DatasetNotFoundError(self)\n\n # Download the dataset\n self._download()\n self._extract()\n\n def _download(self) -> None:\n \"\"\"Download the dataset and extract it.\"\"\"\n download_url(\n self.url,\n self.root,\n filename=self.dir,\n md5=self.md5 if self.checksum else None,\n )\n self._extract()\n\n def _extract(self) -> None:\n \"\"\"Extract the dataset.\"\"\"\n zipfile_path = os.path.join(self.root, self.dir) + \".zip\"\n extract_archive(zipfile_path, self.root)\n\n def plot(\n self,\n sample: dict[str, Any],\n band_idx: int = 0,\n show_titles: bool = True,\n suptitle: Optional[str] = None,\n ) -> Figure:\n \"\"\"Plot a sample from the dataset.\n\n Args:\n sample: a sample return by :meth:`__getitem__`\n band_idx: which of the nine histograms to index\n show_titles: flag indicating whether to show titles above each panel\n suptitle: optional suptitle to use for figure\n\n Returns:\n a matplotlib Figure with the rendered sample\n\n \"\"\"\n image, label = sample[\"image\"], sample[\"label\"].item()\n\n showing_predictions = \"prediction\" in sample\n if showing_predictions:\n prediction = sample[\"prediction\"].item()\n\n fig, ax = plt.subplots(1, 1, figsize=(10, 10))\n\n ax.imshow(image.permute(1, 2, 0)[:, :, band_idx])\n ax.axis(\"off\")\n\n if show_titles:\n title = f\"Label: {label:.3f}\"\n if showing_predictions:\n title += f\"\\nPrediction: {prediction:.3f}\"\n ax.set_title(title)\n\n if suptitle is not None:\n plt.suptitle(suptitle)\n\n return fig\n", "path": "torchgeo/datasets/sustainbench_crop_yield.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"SustainBench Crop Yield dataset.\"\"\"\n\nimport os\nfrom typing import Any, Callable, Optional\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nfrom matplotlib.figure import Figure\nfrom torch import Tensor\n\nfrom .geo import NonGeoDataset\nfrom .utils import DatasetNotFoundError, download_url, extract_archive\n\n\nclass SustainBenchCropYield(NonGeoDataset):\n \"\"\"SustainBench Crop Yield Dataset.\n\n This dataset contains MODIS band histograms and soybean yield\n estimates for selected counties in the USA, Argentina and Brazil.\n The dataset is part of the\n `SustainBench <https://sustainlab-group.github.io/sustainbench/docs/datasets/sdg2/crop_yield.html>`_\n datasets for tackling the UN Sustainable Development Goals (SDGs).\n\n Dataset Format:\n\n * .npz files of stacked samples\n\n Dataset Features:\n\n * input histogram of 7 surface reflectance and 2 surface temperature\n bands from MODIS pixel values in 32 ranges across 32 timesteps\n resulting in 32x32x9 input images\n * regression target value of soybean yield in metric tonnes per\n harvested hectare\n\n If you use this dataset in your research, please cite:\n\n * https://doi.org/10.1145/3209811.3212707\n * https://doi.org/10.1609/aaai.v31i1.11172\n\n .. versionadded:: 0.5\n \"\"\" # noqa: E501\n\n valid_countries = [\"usa\", \"brazil\", \"argentina\"]\n\n md5 = \"c2794e59512c897d9bea77b112848122\"\n\n url = \"https://drive.google.com/file/d/1odwkI1hiE5rMZ4VfM0hOXzlFR4NbhrfU/view?usp=share_link\" # noqa: E501\n\n dir = \"soybeans\"\n\n valid_splits = [\"train\", \"dev\", \"test\"]\n\n def __init__(\n self,\n root: str = \"data\",\n split: str = \"train\",\n countries: list[str] = [\"usa\"],\n transforms: Optional[Callable[[dict[str, Any]], dict[str, Any]]] = None,\n download: bool = False,\n checksum: bool = False,\n ) -> None:\n \"\"\"Initialize a new Dataset instance.\n\n Args:\n root: root directory where dataset can be found\n split: one of \"train\", \"dev\", or \"test\"\n countries: which countries to include in the dataset\n transforms: a function/transform that takes an input sample\n and returns a transformed version\n download: if True, download dataset and store it in the root directory\n checksum: if True, check the MD5 after downloading files (may be slow)\n\n Raises:\n AssertionError: if ``countries`` contains invalid countries or if ``split``\n is invalid\n DatasetNotFoundError: If dataset is not found and *download* is False.\n \"\"\"\n assert set(countries).issubset(\n self.valid_countries\n ), f\"Please choose a subset of these valid countried: {self.valid_countries}.\"\n self.countries = countries\n\n assert (\n split in self.valid_splits\n ), f\"Pleas choose one of these valid data splits {self.valid_splits}.\"\n self.split = split\n\n self.root = root\n self.transforms = transforms\n self.download = download\n self.checksum = checksum\n\n self._verify()\n self.collection = self.retrieve_collection()\n\n def __len__(self) -> int:\n \"\"\"Return the number of data points in the dataset.\n\n Returns:\n length of the dataset\n \"\"\"\n return len(self.collection)\n\n def __getitem__(self, index: int) -> dict[str, Tensor]:\n \"\"\"Return an index within the dataset.\n\n Args:\n index: index to return\n\n Returns:\n data and label at that index\n \"\"\"\n input_file_path, sample_idx = self.collection[index]\n\n sample: dict[str, Tensor] = {\n \"image\": self._load_image(input_file_path, sample_idx)\n }\n sample.update(self._load_features(input_file_path, sample_idx))\n\n if self.transforms is not None:\n sample = self.transforms(sample)\n\n return sample\n\n def _load_image(self, path: str, sample_idx: int) -> Tensor:\n \"\"\"Load input image.\n\n Args:\n path: path to input npz collection\n sample_idx: what sample to index from the npz collection\n\n Returns:\n input image as tensor\n \"\"\"\n arr = np.load(path)[\"data\"][sample_idx]\n # return [channel, height, width]\n return torch.from_numpy(arr).permute(2, 0, 1).to(torch.float32)\n\n def _load_features(self, path: str, sample_idx: int) -> dict[str, Tensor]:\n \"\"\"Load features value.\n\n Args:\n path: path to image npz collection\n sample_idx: what sample to index from the npz collection\n\n Returns:\n target regression value\n \"\"\"\n target_file_path = path.replace(\"_hists\", \"_yields\")\n target = np.load(target_file_path)[\"data\"][sample_idx]\n\n years_file_path = path.replace(\"_hists\", \"_years\")\n year = int(np.load(years_file_path)[\"data\"][sample_idx])\n\n ndvi_file_path = path.replace(\"_hists\", \"_ndvi\")\n ndvi = np.load(ndvi_file_path)[\"data\"][sample_idx]\n\n features = {\n \"label\": torch.tensor(target).to(torch.float32),\n \"year\": torch.tensor(year),\n \"ndvi\": torch.from_numpy(ndvi).to(dtype=torch.float32),\n }\n return features\n\n def retrieve_collection(self) -> list[tuple[str, int]]:\n \"\"\"Retrieve the collection.\n\n Returns:\n path and index to dataset samples\n \"\"\"\n collection = []\n for country in self.countries:\n file_path = os.path.join(\n self.root, self.dir, country, f\"{self.split}_hists.npz\"\n )\n npz_file = np.load(file_path)\n num_data_points = npz_file[\"data\"].shape[0]\n for idx in range(num_data_points):\n collection.append((file_path, idx))\n\n return collection\n\n def _verify(self) -> None:\n \"\"\"Verify the integrity of the dataset.\"\"\"\n # Check if the extracted files already exist\n pathname = os.path.join(self.root, self.dir)\n if os.path.exists(pathname):\n return\n\n # Check if the zip files have already been downloaded\n pathname = os.path.join(self.root, self.dir) + \".zip\"\n if os.path.exists(pathname):\n self._extract()\n return\n\n # Check if the user requested to download the dataset\n if not self.download:\n raise DatasetNotFoundError(self)\n\n # Download the dataset\n self._download()\n self._extract()\n\n def _download(self) -> None:\n \"\"\"Download the dataset and extract it.\"\"\"\n download_url(\n self.url,\n self.root,\n filename=self.dir + \".zip\",\n md5=self.md5 if self.checksum else None,\n )\n self._extract()\n\n def _extract(self) -> None:\n \"\"\"Extract the dataset.\"\"\"\n zipfile_path = os.path.join(self.root, self.dir) + \".zip\"\n extract_archive(zipfile_path, self.root)\n\n def plot(\n self,\n sample: dict[str, Any],\n band_idx: int = 0,\n show_titles: bool = True,\n suptitle: Optional[str] = None,\n ) -> Figure:\n \"\"\"Plot a sample from the dataset.\n\n Args:\n sample: a sample return by :meth:`__getitem__`\n band_idx: which of the nine histograms to index\n show_titles: flag indicating whether to show titles above each panel\n suptitle: optional suptitle to use for figure\n\n Returns:\n a matplotlib Figure with the rendered sample\n\n \"\"\"\n image, label = sample[\"image\"], sample[\"label\"].item()\n\n showing_predictions = \"prediction\" in sample\n if showing_predictions:\n prediction = sample[\"prediction\"].item()\n\n fig, ax = plt.subplots(1, 1, figsize=(10, 10))\n\n ax.imshow(image.permute(1, 2, 0)[:, :, band_idx])\n ax.axis(\"off\")\n\n if show_titles:\n title = f\"Label: {label:.3f}\"\n if showing_predictions:\n title += f\"\\nPrediction: {prediction:.3f}\"\n ax.set_title(title)\n\n if suptitle is not None:\n plt.suptitle(suptitle)\n\n return fig\n", "path": "torchgeo/datasets/sustainbench_crop_yield.py"}]} | 3,038 | 117 |
gh_patches_debug_27882 | rasdani/github-patches | git_diff | cornellius-gp__gpytorch-644 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Where is `fast_pred_var` moving?
I received the following warning when using `fast_pred_var`:
```
/cluster/nhunt/anaconda/envs/bayes_opt/lib/python3.7/site-packages/gpytorch/beta_features.py:17:
DeprecationWarning: `gpytorch.settings.fast_pred_var` has moved to `gpytorch.settings.fast_pred_var`.
```
It seems that I'm being warned that `fast_pred_var` has moved to its current location. Was there a typo in the warning about how we should be using this setting now?
```bash
$ pip list | grep gpytorch
gpytorch 0.2.1
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gpytorch/__init__.py`
Content:
```
1 #!/usr/bin/env python3
2 from .module import Module
3 from . import (
4 beta_features,
5 distributions,
6 kernels,
7 lazy,
8 likelihoods,
9 means,
10 mlls,
11 models,
12 priors,
13 settings,
14 utils,
15 variational,
16 )
17 from .functions import (
18 add_diag,
19 add_jitter,
20 dsmm,
21 inv_matmul,
22 inv_quad,
23 inv_quad_logdet,
24 logdet,
25 log_normal_cdf,
26 matmul,
27 normal_cdf,
28 root_decomposition,
29 root_inv_decomposition,
30 # Deprecated
31 inv_quad_log_det,
32 log_det,
33 )
34 from .mlls import ExactMarginalLogLikelihood, VariationalMarginalLogLikelihood
35 from .lazy import lazify, delazify
36
37
38 __version__ = "0.2.1"
39
40 # Old deprecated stuff
41 fast_pred_var = beta_features._moved_beta_feature(settings.fast_pred_var, "gpytorch.settings.fast_pred_var")
42
43 __all__ = [
44 # Submodules
45 "distributions",
46 "kernels",
47 "lazy",
48 "likelihoods",
49 "means",
50 "mlls",
51 "models",
52 "priors",
53 "utils",
54 "variational",
55 # Classes
56 "Module",
57 "ExactMarginalLogLikelihood",
58 "VariationalMarginalLogLikelihood",
59 # Functions
60 "add_diag",
61 "add_jitter",
62 "delazify",
63 "dsmm",
64 "inv_matmul",
65 "inv_quad",
66 "inv_quad_logdet",
67 "lazify",
68 "logdet",
69 "log_normal_cdf",
70 "matmul",
71 "normal_cdf",
72 "root_decomposition",
73 "root_inv_decomposition",
74 # Context managers
75 "beta_features",
76 "settings",
77 # Other
78 "__version__",
79 # Deprecated
80 "fast_pred_var",
81 "inv_quad_log_det",
82 "log_det",
83 ]
84
```
Path: `gpytorch/beta_features.py`
Content:
```
1 #!/usr/bin/env python3
2
3 import warnings
4 from .settings import _feature_flag, _value_context
5 from .settings import fast_pred_var as _fast_pred_var
6 from .settings import fast_pred_samples as _fast_pred_samples
7
8
9 class _moved_beta_feature(object):
10 def __init__(self, new_cls, orig_name=None):
11 self.new_cls = new_cls
12 self.orig_name = orig_name if orig_name is not None else "gpytorch.settings.{}".format(new_cls.__name__)
13
14 def __call__(self, *args, **kwargs):
15 warnings.warn(
16 "`{}` has moved to `gpytorch.settings.{}`.".format(self.orig_name, self.new_cls.__name__),
17 DeprecationWarning
18 )
19 return self.new_cls(*args, **kwargs)
20
21 def __getattr__(self, name):
22 return getattr(self.new_cls, name)
23
24
25 fast_pred_var = _moved_beta_feature(_fast_pred_var)
26 fast_pred_samples = _moved_beta_feature(_fast_pred_samples)
27
28
29 class checkpoint_kernel(_value_context):
30 """
31 Should the kernel be computed in chunks with checkpointing or not? (Default, no)
32
33 If `split_size = 0`:
34 The kernel is computed explicitly. During training, the kernel matrix is
35 kept in memory for the backward pass. This is the fastest option but the
36 most memory intensive.
37 If `split_size > 0`:
38 The kernel is never fully computed or stored. Instead, the kernel is only
39 accessed through matrix multiplication. The matrix multiplication is
40 computed in `segments` chunks. This is slower, but requires significantly less memory.
41
42 Default: 0
43 """
44
45 _global_value = 0
46
47
48 class diagonal_correction(_feature_flag):
49 """
50 Add a diagonal correction to scalable inducing point methods
51 """
52
53 _state = True
54
55
56 class default_preconditioner(_feature_flag):
57 """
58 Add a diagonal correction to scalable inducing point methods
59 """
60
61 pass
62
63
64 __all__ = ["fast_pred_var", "fast_pred_samples", "diagonal_correction", "default_preconditioner"]
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gpytorch/__init__.py b/gpytorch/__init__.py
--- a/gpytorch/__init__.py
+++ b/gpytorch/__init__.py
@@ -37,9 +37,6 @@
__version__ = "0.2.1"
-# Old deprecated stuff
-fast_pred_var = beta_features._moved_beta_feature(settings.fast_pred_var, "gpytorch.settings.fast_pred_var")
-
__all__ = [
# Submodules
"distributions",
@@ -77,7 +74,6 @@
# Other
"__version__",
# Deprecated
- "fast_pred_var",
"inv_quad_log_det",
"log_det",
]
diff --git a/gpytorch/beta_features.py b/gpytorch/beta_features.py
--- a/gpytorch/beta_features.py
+++ b/gpytorch/beta_features.py
@@ -2,8 +2,6 @@
import warnings
from .settings import _feature_flag, _value_context
-from .settings import fast_pred_var as _fast_pred_var
-from .settings import fast_pred_samples as _fast_pred_samples
class _moved_beta_feature(object):
@@ -22,10 +20,6 @@
return getattr(self.new_cls, name)
-fast_pred_var = _moved_beta_feature(_fast_pred_var)
-fast_pred_samples = _moved_beta_feature(_fast_pred_samples)
-
-
class checkpoint_kernel(_value_context):
"""
Should the kernel be computed in chunks with checkpointing or not? (Default, no)
@@ -61,4 +55,4 @@
pass
-__all__ = ["fast_pred_var", "fast_pred_samples", "diagonal_correction", "default_preconditioner"]
+__all__ = ["checkpoint_kernel", "diagonal_correction", "default_preconditioner"]
| {"golden_diff": "diff --git a/gpytorch/__init__.py b/gpytorch/__init__.py\n--- a/gpytorch/__init__.py\n+++ b/gpytorch/__init__.py\n@@ -37,9 +37,6 @@\n \n __version__ = \"0.2.1\"\n \n-# Old deprecated stuff\n-fast_pred_var = beta_features._moved_beta_feature(settings.fast_pred_var, \"gpytorch.settings.fast_pred_var\")\n-\n __all__ = [\n # Submodules\n \"distributions\",\n@@ -77,7 +74,6 @@\n # Other\n \"__version__\",\n # Deprecated\n- \"fast_pred_var\",\n \"inv_quad_log_det\",\n \"log_det\",\n ]\ndiff --git a/gpytorch/beta_features.py b/gpytorch/beta_features.py\n--- a/gpytorch/beta_features.py\n+++ b/gpytorch/beta_features.py\n@@ -2,8 +2,6 @@\n \n import warnings\n from .settings import _feature_flag, _value_context\n-from .settings import fast_pred_var as _fast_pred_var\n-from .settings import fast_pred_samples as _fast_pred_samples\n \n \n class _moved_beta_feature(object):\n@@ -22,10 +20,6 @@\n return getattr(self.new_cls, name)\n \n \n-fast_pred_var = _moved_beta_feature(_fast_pred_var)\n-fast_pred_samples = _moved_beta_feature(_fast_pred_samples)\n-\n-\n class checkpoint_kernel(_value_context):\n \"\"\"\n Should the kernel be computed in chunks with checkpointing or not? (Default, no)\n@@ -61,4 +55,4 @@\n pass\n \n \n-__all__ = [\"fast_pred_var\", \"fast_pred_samples\", \"diagonal_correction\", \"default_preconditioner\"]\n+__all__ = [\"checkpoint_kernel\", \"diagonal_correction\", \"default_preconditioner\"]\n", "issue": "Where is `fast_pred_var` moving?\nI received the following warning when using `fast_pred_var`:\r\n\r\n```\r\n/cluster/nhunt/anaconda/envs/bayes_opt/lib/python3.7/site-packages/gpytorch/beta_features.py:17:\r\nDeprecationWarning: `gpytorch.settings.fast_pred_var` has moved to `gpytorch.settings.fast_pred_var`.\r\n```\r\n\r\nIt seems that I'm being warned that `fast_pred_var` has moved to its current location. Was there a typo in the warning about how we should be using this setting now?\r\n\r\n```bash\r\n$ pip list | grep gpytorch\r\ngpytorch 0.2.1\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python3\nfrom .module import Module\nfrom . import (\n beta_features,\n distributions,\n kernels,\n lazy,\n likelihoods,\n means,\n mlls,\n models,\n priors,\n settings,\n utils,\n variational,\n)\nfrom .functions import (\n add_diag,\n add_jitter,\n dsmm,\n inv_matmul,\n inv_quad,\n inv_quad_logdet,\n logdet,\n log_normal_cdf,\n matmul,\n normal_cdf,\n root_decomposition,\n root_inv_decomposition,\n # Deprecated\n inv_quad_log_det,\n log_det,\n)\nfrom .mlls import ExactMarginalLogLikelihood, VariationalMarginalLogLikelihood\nfrom .lazy import lazify, delazify\n\n\n__version__ = \"0.2.1\"\n\n# Old deprecated stuff\nfast_pred_var = beta_features._moved_beta_feature(settings.fast_pred_var, \"gpytorch.settings.fast_pred_var\")\n\n__all__ = [\n # Submodules\n \"distributions\",\n \"kernels\",\n \"lazy\",\n \"likelihoods\",\n \"means\",\n \"mlls\",\n \"models\",\n \"priors\",\n \"utils\",\n \"variational\",\n # Classes\n \"Module\",\n \"ExactMarginalLogLikelihood\",\n \"VariationalMarginalLogLikelihood\",\n # Functions\n \"add_diag\",\n \"add_jitter\",\n \"delazify\",\n \"dsmm\",\n \"inv_matmul\",\n \"inv_quad\",\n \"inv_quad_logdet\",\n \"lazify\",\n \"logdet\",\n \"log_normal_cdf\",\n \"matmul\",\n \"normal_cdf\",\n \"root_decomposition\",\n \"root_inv_decomposition\",\n # Context managers\n \"beta_features\",\n \"settings\",\n # Other\n \"__version__\",\n # Deprecated\n \"fast_pred_var\",\n \"inv_quad_log_det\",\n \"log_det\",\n]\n", "path": "gpytorch/__init__.py"}, {"content": "#!/usr/bin/env python3\n\nimport warnings\nfrom .settings import _feature_flag, _value_context\nfrom .settings import fast_pred_var as _fast_pred_var\nfrom .settings import fast_pred_samples as _fast_pred_samples\n\n\nclass _moved_beta_feature(object):\n def __init__(self, new_cls, orig_name=None):\n self.new_cls = new_cls\n self.orig_name = orig_name if orig_name is not None else \"gpytorch.settings.{}\".format(new_cls.__name__)\n\n def __call__(self, *args, **kwargs):\n warnings.warn(\n \"`{}` has moved to `gpytorch.settings.{}`.\".format(self.orig_name, self.new_cls.__name__),\n DeprecationWarning\n )\n return self.new_cls(*args, **kwargs)\n\n def __getattr__(self, name):\n return getattr(self.new_cls, name)\n\n\nfast_pred_var = _moved_beta_feature(_fast_pred_var)\nfast_pred_samples = _moved_beta_feature(_fast_pred_samples)\n\n\nclass checkpoint_kernel(_value_context):\n \"\"\"\n Should the kernel be computed in chunks with checkpointing or not? (Default, no)\n\n If `split_size = 0`:\n The kernel is computed explicitly. During training, the kernel matrix is\n kept in memory for the backward pass. This is the fastest option but the\n most memory intensive.\n If `split_size > 0`:\n The kernel is never fully computed or stored. Instead, the kernel is only\n accessed through matrix multiplication. The matrix multiplication is\n computed in `segments` chunks. This is slower, but requires significantly less memory.\n\n Default: 0\n \"\"\"\n\n _global_value = 0\n\n\nclass diagonal_correction(_feature_flag):\n \"\"\"\n Add a diagonal correction to scalable inducing point methods\n \"\"\"\n\n _state = True\n\n\nclass default_preconditioner(_feature_flag):\n \"\"\"\n Add a diagonal correction to scalable inducing point methods\n \"\"\"\n\n pass\n\n\n__all__ = [\"fast_pred_var\", \"fast_pred_samples\", \"diagonal_correction\", \"default_preconditioner\"]\n", "path": "gpytorch/beta_features.py"}], "after_files": [{"content": "#!/usr/bin/env python3\nfrom .module import Module\nfrom . import (\n beta_features,\n distributions,\n kernels,\n lazy,\n likelihoods,\n means,\n mlls,\n models,\n priors,\n settings,\n utils,\n variational,\n)\nfrom .functions import (\n add_diag,\n add_jitter,\n dsmm,\n inv_matmul,\n inv_quad,\n inv_quad_logdet,\n logdet,\n log_normal_cdf,\n matmul,\n normal_cdf,\n root_decomposition,\n root_inv_decomposition,\n # Deprecated\n inv_quad_log_det,\n log_det,\n)\nfrom .mlls import ExactMarginalLogLikelihood, VariationalMarginalLogLikelihood\nfrom .lazy import lazify, delazify\n\n\n__version__ = \"0.2.1\"\n\n__all__ = [\n # Submodules\n \"distributions\",\n \"kernels\",\n \"lazy\",\n \"likelihoods\",\n \"means\",\n \"mlls\",\n \"models\",\n \"priors\",\n \"utils\",\n \"variational\",\n # Classes\n \"Module\",\n \"ExactMarginalLogLikelihood\",\n \"VariationalMarginalLogLikelihood\",\n # Functions\n \"add_diag\",\n \"add_jitter\",\n \"delazify\",\n \"dsmm\",\n \"inv_matmul\",\n \"inv_quad\",\n \"inv_quad_logdet\",\n \"lazify\",\n \"logdet\",\n \"log_normal_cdf\",\n \"matmul\",\n \"normal_cdf\",\n \"root_decomposition\",\n \"root_inv_decomposition\",\n # Context managers\n \"beta_features\",\n \"settings\",\n # Other\n \"__version__\",\n # Deprecated\n \"inv_quad_log_det\",\n \"log_det\",\n]\n", "path": "gpytorch/__init__.py"}, {"content": "#!/usr/bin/env python3\n\nimport warnings\nfrom .settings import _feature_flag, _value_context\n\n\nclass _moved_beta_feature(object):\n def __init__(self, new_cls, orig_name=None):\n self.new_cls = new_cls\n self.orig_name = orig_name if orig_name is not None else \"gpytorch.settings.{}\".format(new_cls.__name__)\n\n def __call__(self, *args, **kwargs):\n warnings.warn(\n \"`{}` has moved to `gpytorch.settings.{}`.\".format(self.orig_name, self.new_cls.__name__),\n DeprecationWarning\n )\n return self.new_cls(*args, **kwargs)\n\n def __getattr__(self, name):\n return getattr(self.new_cls, name)\n\n\nclass checkpoint_kernel(_value_context):\n \"\"\"\n Should the kernel be computed in chunks with checkpointing or not? (Default, no)\n\n If `split_size = 0`:\n The kernel is computed explicitly. During training, the kernel matrix is\n kept in memory for the backward pass. This is the fastest option but the\n most memory intensive.\n If `split_size > 0`:\n The kernel is never fully computed or stored. Instead, the kernel is only\n accessed through matrix multiplication. The matrix multiplication is\n computed in `segments` chunks. This is slower, but requires significantly less memory.\n\n Default: 0\n \"\"\"\n\n _global_value = 0\n\n\nclass diagonal_correction(_feature_flag):\n \"\"\"\n Add a diagonal correction to scalable inducing point methods\n \"\"\"\n\n _state = True\n\n\nclass default_preconditioner(_feature_flag):\n \"\"\"\n Add a diagonal correction to scalable inducing point methods\n \"\"\"\n\n pass\n\n\n__all__ = [\"checkpoint_kernel\", \"diagonal_correction\", \"default_preconditioner\"]\n", "path": "gpytorch/beta_features.py"}]} | 1,598 | 397 |
gh_patches_debug_2264 | rasdani/github-patches | git_diff | bookwyrm-social__bookwyrm-1410 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unable to post review in Safari on iPadOS
**Describe the bug**
When trying to post a long-ish review in Safari on iPadOS (desktop mode, content blockers disabled), the post button shows the spinner introduced with #1388, but the posting never concludes.
**To Reproduce**
Steps to reproduce the behavior:
1. Get an iPad pro 13.9” running iPadOS 14.7.1
2. Open your bookwyrm.social account in Safari, ensuring it runs in desktop mode and content blockers are disabled
3. Write a review of at least 2700 chars
4. Try to post it
**Expected behavior**
After some spinning, the review appears on your feed.
**Instance**
bookwyrm.social
**Extra context**
[Book I’m trying to review](https://bookwyrm.social/book/214201).
---
**Device Info:**
- Device: iPad pro 2nd gen 13.9”
- OS: iPadOS 14.7.1
- Browser: Safari
- Version N/A
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/settings.py`
Content:
```
1 """ bookwyrm settings and configuration """
2 import os
3 from environs import Env
4
5 import requests
6 from django.utils.translation import gettext_lazy as _
7
8
9 env = Env()
10 DOMAIN = env("DOMAIN")
11 VERSION = "0.0.1"
12
13 PAGE_LENGTH = env("PAGE_LENGTH", 15)
14 DEFAULT_LANGUAGE = env("DEFAULT_LANGUAGE", "English")
15
16 JS_CACHE = "19447742"
17
18 # email
19 EMAIL_BACKEND = env("EMAIL_BACKEND", "django.core.mail.backends.smtp.EmailBackend")
20 EMAIL_HOST = env("EMAIL_HOST")
21 EMAIL_PORT = env("EMAIL_PORT", 587)
22 EMAIL_HOST_USER = env("EMAIL_HOST_USER")
23 EMAIL_HOST_PASSWORD = env("EMAIL_HOST_PASSWORD")
24 EMAIL_USE_TLS = env.bool("EMAIL_USE_TLS", True)
25 EMAIL_USE_SSL = env.bool("EMAIL_USE_SSL", False)
26 DEFAULT_FROM_EMAIL = "admin@{:s}".format(env("DOMAIN"))
27
28 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
29 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
30 LOCALE_PATHS = [
31 os.path.join(BASE_DIR, "locale"),
32 ]
33
34 DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
35
36 # Preview image
37 ENABLE_PREVIEW_IMAGES = env.bool("ENABLE_PREVIEW_IMAGES", False)
38 PREVIEW_BG_COLOR = env.str("PREVIEW_BG_COLOR", "use_dominant_color_light")
39 PREVIEW_TEXT_COLOR = env.str("PREVIEW_TEXT_COLOR", "#363636")
40 PREVIEW_IMG_WIDTH = env.int("PREVIEW_IMG_WIDTH", 1200)
41 PREVIEW_IMG_HEIGHT = env.int("PREVIEW_IMG_HEIGHT", 630)
42 PREVIEW_DEFAULT_COVER_COLOR = env.str("PREVIEW_DEFAULT_COVER_COLOR", "#002549")
43
44 # Quick-start development settings - unsuitable for production
45 # See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
46
47 # SECURITY WARNING: keep the secret key used in production secret!
48 SECRET_KEY = env("SECRET_KEY")
49
50 # SECURITY WARNING: don't run with debug turned on in production!
51 DEBUG = env.bool("DEBUG", True)
52 USE_HTTPS = env.bool("USE_HTTPS", False)
53
54 ALLOWED_HOSTS = env.list("ALLOWED_HOSTS", ["*"])
55
56 # Application definition
57
58 INSTALLED_APPS = [
59 "django.contrib.admin",
60 "django.contrib.auth",
61 "django.contrib.contenttypes",
62 "django.contrib.sessions",
63 "django.contrib.messages",
64 "django.contrib.staticfiles",
65 "django.contrib.humanize",
66 "django_rename_app",
67 "bookwyrm",
68 "celery",
69 "imagekit",
70 "storages",
71 ]
72
73 MIDDLEWARE = [
74 "django.middleware.security.SecurityMiddleware",
75 "django.contrib.sessions.middleware.SessionMiddleware",
76 "django.middleware.locale.LocaleMiddleware",
77 "django.middleware.common.CommonMiddleware",
78 "django.middleware.csrf.CsrfViewMiddleware",
79 "django.contrib.auth.middleware.AuthenticationMiddleware",
80 "bookwyrm.timezone_middleware.TimezoneMiddleware",
81 "django.contrib.messages.middleware.MessageMiddleware",
82 "django.middleware.clickjacking.XFrameOptionsMiddleware",
83 ]
84
85 ROOT_URLCONF = "bookwyrm.urls"
86
87 TEMPLATES = [
88 {
89 "BACKEND": "django.template.backends.django.DjangoTemplates",
90 "DIRS": ["templates"],
91 "APP_DIRS": True,
92 "OPTIONS": {
93 "context_processors": [
94 "django.template.context_processors.debug",
95 "django.template.context_processors.request",
96 "django.contrib.auth.context_processors.auth",
97 "django.contrib.messages.context_processors.messages",
98 "bookwyrm.context_processors.site_settings",
99 ],
100 },
101 },
102 ]
103
104
105 WSGI_APPLICATION = "bookwyrm.wsgi.application"
106
107 # redis/activity streams settings
108 REDIS_ACTIVITY_HOST = env("REDIS_ACTIVITY_HOST", "localhost")
109 REDIS_ACTIVITY_PORT = env("REDIS_ACTIVITY_PORT", 6379)
110 REDIS_ACTIVITY_PASSWORD = env("REDIS_ACTIVITY_PASSWORD", None)
111
112 MAX_STREAM_LENGTH = int(env("MAX_STREAM_LENGTH", 200))
113
114 STREAMS = [
115 {"key": "home", "name": _("Home Timeline"), "shortname": _("Home")},
116 {"key": "books", "name": _("Books Timeline"), "shortname": _("Books")},
117 ]
118
119 # Database
120 # https://docs.djangoproject.com/en/3.2/ref/settings/#databases
121
122 DATABASES = {
123 "default": {
124 "ENGINE": "django.db.backends.postgresql_psycopg2",
125 "NAME": env("POSTGRES_DB", "fedireads"),
126 "USER": env("POSTGRES_USER", "fedireads"),
127 "PASSWORD": env("POSTGRES_PASSWORD", "fedireads"),
128 "HOST": env("POSTGRES_HOST", ""),
129 "PORT": env("POSTGRES_PORT", 5432),
130 },
131 }
132
133
134 LOGIN_URL = "/login/"
135 AUTH_USER_MODEL = "bookwyrm.User"
136
137 # Password validation
138 # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
139
140 # pylint: disable=line-too-long
141 AUTH_PASSWORD_VALIDATORS = [
142 {
143 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
144 },
145 {
146 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
147 },
148 {
149 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
150 },
151 {
152 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
153 },
154 ]
155
156
157 # Internationalization
158 # https://docs.djangoproject.com/en/3.2/topics/i18n/
159
160 LANGUAGE_CODE = "en-us"
161 LANGUAGES = [
162 ("en-us", _("English")),
163 ("de-de", _("German")),
164 ("es", _("Spanish")),
165 ("fr-fr", _("French")),
166 ("zh-hans", _("Simplified Chinese")),
167 ("zh-hant", _("Traditional Chinese")),
168 ]
169
170
171 TIME_ZONE = "UTC"
172
173 USE_I18N = True
174
175 USE_L10N = True
176
177 USE_TZ = True
178
179
180 USER_AGENT = "%s (BookWyrm/%s; +https://%s/)" % (
181 requests.utils.default_user_agent(),
182 VERSION,
183 DOMAIN,
184 )
185
186 # Imagekit generated thumbnails
187 ENABLE_THUMBNAIL_GENERATION = env.bool("ENABLE_THUMBNAIL_GENERATION", False)
188 IMAGEKIT_CACHEFILE_DIR = "thumbnails"
189
190 # Static files (CSS, JavaScript, Images)
191 # https://docs.djangoproject.com/en/3.2/howto/static-files/
192
193 PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
194
195 # Storage
196
197 PROTOCOL = "http"
198 if USE_HTTPS:
199 PROTOCOL = "https"
200
201 USE_S3 = env.bool("USE_S3", False)
202
203 if USE_S3:
204 # AWS settings
205 AWS_ACCESS_KEY_ID = env("AWS_ACCESS_KEY_ID")
206 AWS_SECRET_ACCESS_KEY = env("AWS_SECRET_ACCESS_KEY")
207 AWS_STORAGE_BUCKET_NAME = env("AWS_STORAGE_BUCKET_NAME")
208 AWS_S3_CUSTOM_DOMAIN = env("AWS_S3_CUSTOM_DOMAIN")
209 AWS_S3_REGION_NAME = env("AWS_S3_REGION_NAME", "")
210 AWS_S3_ENDPOINT_URL = env("AWS_S3_ENDPOINT_URL")
211 AWS_DEFAULT_ACL = "public-read"
212 AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
213 # S3 Static settings
214 STATIC_LOCATION = "static"
215 STATIC_URL = "https://%s/%s/" % (AWS_S3_CUSTOM_DOMAIN, STATIC_LOCATION)
216 STATICFILES_STORAGE = "bookwyrm.storage_backends.StaticStorage"
217 # S3 Media settings
218 MEDIA_LOCATION = "images"
219 MEDIA_URL = "https://%s/%s/" % (AWS_S3_CUSTOM_DOMAIN, MEDIA_LOCATION)
220 MEDIA_FULL_URL = MEDIA_URL
221 DEFAULT_FILE_STORAGE = "bookwyrm.storage_backends.ImagesStorage"
222 # I don't know if it's used, but the site crashes without it
223 STATIC_ROOT = os.path.join(BASE_DIR, env("STATIC_ROOT", "static"))
224 MEDIA_ROOT = os.path.join(BASE_DIR, env("MEDIA_ROOT", "images"))
225 else:
226 STATIC_URL = "/static/"
227 STATIC_ROOT = os.path.join(BASE_DIR, env("STATIC_ROOT", "static"))
228 MEDIA_URL = "/images/"
229 MEDIA_FULL_URL = "%s://%s%s" % (PROTOCOL, DOMAIN, MEDIA_URL)
230 MEDIA_ROOT = os.path.join(BASE_DIR, env("MEDIA_ROOT", "images"))
231
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bookwyrm/settings.py b/bookwyrm/settings.py
--- a/bookwyrm/settings.py
+++ b/bookwyrm/settings.py
@@ -13,7 +13,7 @@
PAGE_LENGTH = env("PAGE_LENGTH", 15)
DEFAULT_LANGUAGE = env("DEFAULT_LANGUAGE", "English")
-JS_CACHE = "19447742"
+JS_CACHE = "e5832a26"
# email
EMAIL_BACKEND = env("EMAIL_BACKEND", "django.core.mail.backends.smtp.EmailBackend")
| {"golden_diff": "diff --git a/bookwyrm/settings.py b/bookwyrm/settings.py\n--- a/bookwyrm/settings.py\n+++ b/bookwyrm/settings.py\n@@ -13,7 +13,7 @@\n PAGE_LENGTH = env(\"PAGE_LENGTH\", 15)\n DEFAULT_LANGUAGE = env(\"DEFAULT_LANGUAGE\", \"English\")\n \n-JS_CACHE = \"19447742\"\n+JS_CACHE = \"e5832a26\"\n \n # email\n EMAIL_BACKEND = env(\"EMAIL_BACKEND\", \"django.core.mail.backends.smtp.EmailBackend\")\n", "issue": "Unable to post review in Safari on iPadOS\n**Describe the bug**\nWhen trying to post a long-ish review in Safari on iPadOS (desktop mode, content blockers disabled), the post button shows the spinner introduced with #1388, but the posting never concludes.\n\n**To Reproduce**\nSteps to reproduce the behavior:\n1. Get an iPad pro 13.9\u201d running iPadOS 14.7.1\n2. Open your bookwyrm.social account in Safari, ensuring it runs in desktop mode and content blockers are disabled\n3. Write a review of at least 2700 chars\n4. Try to post it\n\n**Expected behavior**\nAfter some spinning, the review appears on your feed.\n\n**Instance**\nbookwyrm.social\n\n**Extra context**\n[Book I\u2019m trying to review](https://bookwyrm.social/book/214201).\n\n---\n\n**Device Info:**\n - Device: iPad pro 2nd gen 13.9\u201d\n - OS: iPadOS 14.7.1\n - Browser: Safari\n - Version N/A\n", "before_files": [{"content": "\"\"\" bookwyrm settings and configuration \"\"\"\nimport os\nfrom environs import Env\n\nimport requests\nfrom django.utils.translation import gettext_lazy as _\n\n\nenv = Env()\nDOMAIN = env(\"DOMAIN\")\nVERSION = \"0.0.1\"\n\nPAGE_LENGTH = env(\"PAGE_LENGTH\", 15)\nDEFAULT_LANGUAGE = env(\"DEFAULT_LANGUAGE\", \"English\")\n\nJS_CACHE = \"19447742\"\n\n# email\nEMAIL_BACKEND = env(\"EMAIL_BACKEND\", \"django.core.mail.backends.smtp.EmailBackend\")\nEMAIL_HOST = env(\"EMAIL_HOST\")\nEMAIL_PORT = env(\"EMAIL_PORT\", 587)\nEMAIL_HOST_USER = env(\"EMAIL_HOST_USER\")\nEMAIL_HOST_PASSWORD = env(\"EMAIL_HOST_PASSWORD\")\nEMAIL_USE_TLS = env.bool(\"EMAIL_USE_TLS\", True)\nEMAIL_USE_SSL = env.bool(\"EMAIL_USE_SSL\", False)\nDEFAULT_FROM_EMAIL = \"admin@{:s}\".format(env(\"DOMAIN\"))\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nLOCALE_PATHS = [\n os.path.join(BASE_DIR, \"locale\"),\n]\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\n# Preview image\nENABLE_PREVIEW_IMAGES = env.bool(\"ENABLE_PREVIEW_IMAGES\", False)\nPREVIEW_BG_COLOR = env.str(\"PREVIEW_BG_COLOR\", \"use_dominant_color_light\")\nPREVIEW_TEXT_COLOR = env.str(\"PREVIEW_TEXT_COLOR\", \"#363636\")\nPREVIEW_IMG_WIDTH = env.int(\"PREVIEW_IMG_WIDTH\", 1200)\nPREVIEW_IMG_HEIGHT = env.int(\"PREVIEW_IMG_HEIGHT\", 630)\nPREVIEW_DEFAULT_COVER_COLOR = env.str(\"PREVIEW_DEFAULT_COVER_COLOR\", \"#002549\")\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = env(\"SECRET_KEY\")\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = env.bool(\"DEBUG\", True)\nUSE_HTTPS = env.bool(\"USE_HTTPS\", False)\n\nALLOWED_HOSTS = env.list(\"ALLOWED_HOSTS\", [\"*\"])\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.humanize\",\n \"django_rename_app\",\n \"bookwyrm\",\n \"celery\",\n \"imagekit\",\n \"storages\",\n]\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"bookwyrm.timezone_middleware.TimezoneMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nROOT_URLCONF = \"bookwyrm.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\"templates\"],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"bookwyrm.context_processors.site_settings\",\n ],\n },\n },\n]\n\n\nWSGI_APPLICATION = \"bookwyrm.wsgi.application\"\n\n# redis/activity streams settings\nREDIS_ACTIVITY_HOST = env(\"REDIS_ACTIVITY_HOST\", \"localhost\")\nREDIS_ACTIVITY_PORT = env(\"REDIS_ACTIVITY_PORT\", 6379)\nREDIS_ACTIVITY_PASSWORD = env(\"REDIS_ACTIVITY_PASSWORD\", None)\n\nMAX_STREAM_LENGTH = int(env(\"MAX_STREAM_LENGTH\", 200))\n\nSTREAMS = [\n {\"key\": \"home\", \"name\": _(\"Home Timeline\"), \"shortname\": _(\"Home\")},\n {\"key\": \"books\", \"name\": _(\"Books Timeline\"), \"shortname\": _(\"Books\")},\n]\n\n# Database\n# https://docs.djangoproject.com/en/3.2/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql_psycopg2\",\n \"NAME\": env(\"POSTGRES_DB\", \"fedireads\"),\n \"USER\": env(\"POSTGRES_USER\", \"fedireads\"),\n \"PASSWORD\": env(\"POSTGRES_PASSWORD\", \"fedireads\"),\n \"HOST\": env(\"POSTGRES_HOST\", \"\"),\n \"PORT\": env(\"POSTGRES_PORT\", 5432),\n },\n}\n\n\nLOGIN_URL = \"/login/\"\nAUTH_USER_MODEL = \"bookwyrm.User\"\n\n# Password validation\n# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators\n\n# pylint: disable=line-too-long\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.2/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\nLANGUAGES = [\n (\"en-us\", _(\"English\")),\n (\"de-de\", _(\"German\")),\n (\"es\", _(\"Spanish\")),\n (\"fr-fr\", _(\"French\")),\n (\"zh-hans\", _(\"Simplified Chinese\")),\n (\"zh-hant\", _(\"Traditional Chinese\")),\n]\n\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\nUSER_AGENT = \"%s (BookWyrm/%s; +https://%s/)\" % (\n requests.utils.default_user_agent(),\n VERSION,\n DOMAIN,\n)\n\n# Imagekit generated thumbnails\nENABLE_THUMBNAIL_GENERATION = env.bool(\"ENABLE_THUMBNAIL_GENERATION\", False)\nIMAGEKIT_CACHEFILE_DIR = \"thumbnails\"\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.2/howto/static-files/\n\nPROJECT_DIR = os.path.dirname(os.path.abspath(__file__))\n\n# Storage\n\nPROTOCOL = \"http\"\nif USE_HTTPS:\n PROTOCOL = \"https\"\n\nUSE_S3 = env.bool(\"USE_S3\", False)\n\nif USE_S3:\n # AWS settings\n AWS_ACCESS_KEY_ID = env(\"AWS_ACCESS_KEY_ID\")\n AWS_SECRET_ACCESS_KEY = env(\"AWS_SECRET_ACCESS_KEY\")\n AWS_STORAGE_BUCKET_NAME = env(\"AWS_STORAGE_BUCKET_NAME\")\n AWS_S3_CUSTOM_DOMAIN = env(\"AWS_S3_CUSTOM_DOMAIN\")\n AWS_S3_REGION_NAME = env(\"AWS_S3_REGION_NAME\", \"\")\n AWS_S3_ENDPOINT_URL = env(\"AWS_S3_ENDPOINT_URL\")\n AWS_DEFAULT_ACL = \"public-read\"\n AWS_S3_OBJECT_PARAMETERS = {\"CacheControl\": \"max-age=86400\"}\n # S3 Static settings\n STATIC_LOCATION = \"static\"\n STATIC_URL = \"https://%s/%s/\" % (AWS_S3_CUSTOM_DOMAIN, STATIC_LOCATION)\n STATICFILES_STORAGE = \"bookwyrm.storage_backends.StaticStorage\"\n # S3 Media settings\n MEDIA_LOCATION = \"images\"\n MEDIA_URL = \"https://%s/%s/\" % (AWS_S3_CUSTOM_DOMAIN, MEDIA_LOCATION)\n MEDIA_FULL_URL = MEDIA_URL\n DEFAULT_FILE_STORAGE = \"bookwyrm.storage_backends.ImagesStorage\"\n # I don't know if it's used, but the site crashes without it\n STATIC_ROOT = os.path.join(BASE_DIR, env(\"STATIC_ROOT\", \"static\"))\n MEDIA_ROOT = os.path.join(BASE_DIR, env(\"MEDIA_ROOT\", \"images\"))\nelse:\n STATIC_URL = \"/static/\"\n STATIC_ROOT = os.path.join(BASE_DIR, env(\"STATIC_ROOT\", \"static\"))\n MEDIA_URL = \"/images/\"\n MEDIA_FULL_URL = \"%s://%s%s\" % (PROTOCOL, DOMAIN, MEDIA_URL)\n MEDIA_ROOT = os.path.join(BASE_DIR, env(\"MEDIA_ROOT\", \"images\"))\n", "path": "bookwyrm/settings.py"}], "after_files": [{"content": "\"\"\" bookwyrm settings and configuration \"\"\"\nimport os\nfrom environs import Env\n\nimport requests\nfrom django.utils.translation import gettext_lazy as _\n\n\nenv = Env()\nDOMAIN = env(\"DOMAIN\")\nVERSION = \"0.0.1\"\n\nPAGE_LENGTH = env(\"PAGE_LENGTH\", 15)\nDEFAULT_LANGUAGE = env(\"DEFAULT_LANGUAGE\", \"English\")\n\nJS_CACHE = \"e5832a26\"\n\n# email\nEMAIL_BACKEND = env(\"EMAIL_BACKEND\", \"django.core.mail.backends.smtp.EmailBackend\")\nEMAIL_HOST = env(\"EMAIL_HOST\")\nEMAIL_PORT = env(\"EMAIL_PORT\", 587)\nEMAIL_HOST_USER = env(\"EMAIL_HOST_USER\")\nEMAIL_HOST_PASSWORD = env(\"EMAIL_HOST_PASSWORD\")\nEMAIL_USE_TLS = env.bool(\"EMAIL_USE_TLS\", True)\nEMAIL_USE_SSL = env.bool(\"EMAIL_USE_SSL\", False)\nDEFAULT_FROM_EMAIL = \"admin@{:s}\".format(env(\"DOMAIN\"))\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nLOCALE_PATHS = [\n os.path.join(BASE_DIR, \"locale\"),\n]\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\n# Preview image\nENABLE_PREVIEW_IMAGES = env.bool(\"ENABLE_PREVIEW_IMAGES\", False)\nPREVIEW_BG_COLOR = env.str(\"PREVIEW_BG_COLOR\", \"use_dominant_color_light\")\nPREVIEW_TEXT_COLOR = env.str(\"PREVIEW_TEXT_COLOR\", \"#363636\")\nPREVIEW_IMG_WIDTH = env.int(\"PREVIEW_IMG_WIDTH\", 1200)\nPREVIEW_IMG_HEIGHT = env.int(\"PREVIEW_IMG_HEIGHT\", 630)\nPREVIEW_DEFAULT_COVER_COLOR = env.str(\"PREVIEW_DEFAULT_COVER_COLOR\", \"#002549\")\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = env(\"SECRET_KEY\")\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = env.bool(\"DEBUG\", True)\nUSE_HTTPS = env.bool(\"USE_HTTPS\", False)\n\nALLOWED_HOSTS = env.list(\"ALLOWED_HOSTS\", [\"*\"])\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.humanize\",\n \"django_rename_app\",\n \"bookwyrm\",\n \"celery\",\n \"imagekit\",\n \"storages\",\n]\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"bookwyrm.timezone_middleware.TimezoneMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nROOT_URLCONF = \"bookwyrm.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\"templates\"],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"bookwyrm.context_processors.site_settings\",\n ],\n },\n },\n]\n\n\nWSGI_APPLICATION = \"bookwyrm.wsgi.application\"\n\n# redis/activity streams settings\nREDIS_ACTIVITY_HOST = env(\"REDIS_ACTIVITY_HOST\", \"localhost\")\nREDIS_ACTIVITY_PORT = env(\"REDIS_ACTIVITY_PORT\", 6379)\nREDIS_ACTIVITY_PASSWORD = env(\"REDIS_ACTIVITY_PASSWORD\", None)\n\nMAX_STREAM_LENGTH = int(env(\"MAX_STREAM_LENGTH\", 200))\n\nSTREAMS = [\n {\"key\": \"home\", \"name\": _(\"Home Timeline\"), \"shortname\": _(\"Home\")},\n {\"key\": \"books\", \"name\": _(\"Books Timeline\"), \"shortname\": _(\"Books\")},\n]\n\n# Database\n# https://docs.djangoproject.com/en/3.2/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql_psycopg2\",\n \"NAME\": env(\"POSTGRES_DB\", \"fedireads\"),\n \"USER\": env(\"POSTGRES_USER\", \"fedireads\"),\n \"PASSWORD\": env(\"POSTGRES_PASSWORD\", \"fedireads\"),\n \"HOST\": env(\"POSTGRES_HOST\", \"\"),\n \"PORT\": env(\"POSTGRES_PORT\", 5432),\n },\n}\n\n\nLOGIN_URL = \"/login/\"\nAUTH_USER_MODEL = \"bookwyrm.User\"\n\n# Password validation\n# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators\n\n# pylint: disable=line-too-long\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.2/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\nLANGUAGES = [\n (\"en-us\", _(\"English\")),\n (\"de-de\", _(\"German\")),\n (\"es\", _(\"Spanish\")),\n (\"fr-fr\", _(\"French\")),\n (\"zh-hans\", _(\"Simplified Chinese\")),\n (\"zh-hant\", _(\"Traditional Chinese\")),\n]\n\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\nUSER_AGENT = \"%s (BookWyrm/%s; +https://%s/)\" % (\n requests.utils.default_user_agent(),\n VERSION,\n DOMAIN,\n)\n\n# Imagekit generated thumbnails\nENABLE_THUMBNAIL_GENERATION = env.bool(\"ENABLE_THUMBNAIL_GENERATION\", False)\nIMAGEKIT_CACHEFILE_DIR = \"thumbnails\"\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.2/howto/static-files/\n\nPROJECT_DIR = os.path.dirname(os.path.abspath(__file__))\n\n# Storage\n\nPROTOCOL = \"http\"\nif USE_HTTPS:\n PROTOCOL = \"https\"\n\nUSE_S3 = env.bool(\"USE_S3\", False)\n\nif USE_S3:\n # AWS settings\n AWS_ACCESS_KEY_ID = env(\"AWS_ACCESS_KEY_ID\")\n AWS_SECRET_ACCESS_KEY = env(\"AWS_SECRET_ACCESS_KEY\")\n AWS_STORAGE_BUCKET_NAME = env(\"AWS_STORAGE_BUCKET_NAME\")\n AWS_S3_CUSTOM_DOMAIN = env(\"AWS_S3_CUSTOM_DOMAIN\")\n AWS_S3_REGION_NAME = env(\"AWS_S3_REGION_NAME\", \"\")\n AWS_S3_ENDPOINT_URL = env(\"AWS_S3_ENDPOINT_URL\")\n AWS_DEFAULT_ACL = \"public-read\"\n AWS_S3_OBJECT_PARAMETERS = {\"CacheControl\": \"max-age=86400\"}\n # S3 Static settings\n STATIC_LOCATION = \"static\"\n STATIC_URL = \"https://%s/%s/\" % (AWS_S3_CUSTOM_DOMAIN, STATIC_LOCATION)\n STATICFILES_STORAGE = \"bookwyrm.storage_backends.StaticStorage\"\n # S3 Media settings\n MEDIA_LOCATION = \"images\"\n MEDIA_URL = \"https://%s/%s/\" % (AWS_S3_CUSTOM_DOMAIN, MEDIA_LOCATION)\n MEDIA_FULL_URL = MEDIA_URL\n DEFAULT_FILE_STORAGE = \"bookwyrm.storage_backends.ImagesStorage\"\n # I don't know if it's used, but the site crashes without it\n STATIC_ROOT = os.path.join(BASE_DIR, env(\"STATIC_ROOT\", \"static\"))\n MEDIA_ROOT = os.path.join(BASE_DIR, env(\"MEDIA_ROOT\", \"images\"))\nelse:\n STATIC_URL = \"/static/\"\n STATIC_ROOT = os.path.join(BASE_DIR, env(\"STATIC_ROOT\", \"static\"))\n MEDIA_URL = \"/images/\"\n MEDIA_FULL_URL = \"%s://%s%s\" % (PROTOCOL, DOMAIN, MEDIA_URL)\n MEDIA_ROOT = os.path.join(BASE_DIR, env(\"MEDIA_ROOT\", \"images\"))\n", "path": "bookwyrm/settings.py"}]} | 2,869 | 117 |
gh_patches_debug_4276 | rasdani/github-patches | git_diff | awslabs__gluonts-68 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tutorial notebook Exception
I downloaded the notebook available on https://gluon-ts.mxnet.io/examples/forecasting/tutorial.html, and run it on my local laptop (Darwin Kernel Version 16.7.0, Anaconda3 Distro, Python 3.7.3, Jupyter 4.4.0, gluonts 0.1.1) and get the following exception at:
```
from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
from gluonts.trainer import Trainer
```
```
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
~/python/anaconda3/lib/python3.7/site-packages/pydantic/validators.py in find_validators(type_, arbitrary_types_allowed)
261 try:
--> 262 if issubclass(type_, val_type):
263 return validators
TypeError: issubclass() arg 1 must be a class
The above exception was the direct cause of the following exception:
RuntimeError Traceback (most recent call last)
<ipython-input-13-1fca1cb620ad> in <module>
----> 1 from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator
2 from gluonts.trainer import Trainer
~/python/anaconda3/lib/python3.7/site-packages/gluonts/model/simple_feedforward/__init__.py in <module>
1 # Relative imports
----> 2 from ._estimator import SimpleFeedForwardEstimator
3
4 __all__ = ['SimpleFeedForwardEstimator']
~/python/anaconda3/lib/python3.7/site-packages/gluonts/model/simple_feedforward/_estimator.py in <module>
7 # First-party imports
8 from gluonts.core.component import validated
----> 9 from gluonts.distribution import DistributionOutput, StudentTOutput
10 from gluonts.model.estimator import GluonEstimator
11 from gluonts.model.predictor import Predictor, RepresentableBlockPredictor
~/python/anaconda3/lib/python3.7/site-packages/gluonts/distribution/__init__.py in <module>
14 # Relative imports
15 from . import bijection
---> 16 from .binned import Binned, BinnedOutput
17 from .distribution import Distribution
18 from .distribution_output import DistributionOutput
~/python/anaconda3/lib/python3.7/site-packages/gluonts/distribution/binned.py in <module>
166
167
--> 168 class BinnedOutput(DistributionOutput):
169 distr_cls: type = Binned
170
~/python/anaconda3/lib/python3.7/site-packages/gluonts/distribution/binned.py in BinnedOutput()
170
171 @validated()
--> 172 def __init__(self, bin_centers: List) -> None:
173 # cannot pass directly nd.array because it is not serializable
174 bc = mx.nd.array(bin_centers)
~/python/anaconda3/lib/python3.7/site-packages/gluonts/core/component.py in validator(ctor)
167 f'{ctor_clsnme}Model',
168 __config__=ConfigBase.Config,
--> 169 **ctor_fields,
170 )
171 else:
~/python/anaconda3/lib/python3.7/site-packages/pydantic/main.py in create_model(model_name, __config__, __base__, **field_definitions)
408 annotation=f_annotation,
409 class_validators=vg.get_validators(f_name),
--> 410 config=config,
411 )
412
~/python/anaconda3/lib/python3.7/site-packages/pydantic/fields.py in infer(cls, name, value, annotation, class_validators, config)
105 required=required,
106 model_config=config,
--> 107 schema=schema,
108 )
109
~/python/anaconda3/lib/python3.7/site-packages/pydantic/fields.py in __init__(self, name, type_, class_validators, default, required, model_config, alias, allow_none, schema)
85 self.shape: Shape = Shape.SINGLETON
86 self._schema: Schema = schema
---> 87 self.prepare()
88
89 @classmethod
~/python/anaconda3/lib/python3.7/site-packages/pydantic/fields.py in prepare(self)
134
135 self._populate_sub_fields()
--> 136 self._populate_validators()
137
138 def schema(self, by_alias=True):
~/python/anaconda3/lib/python3.7/site-packages/pydantic/fields.py in _populate_validators(self)
264 *tuple(v.func for v in self.class_validators if not v.whole and v.pre),
265 *(get_validators() if get_validators else find_validators(self.type_,
--> 266 self.model_config.arbitrary_types_allowed)),
267 *tuple(v.func for v in self.class_validators if not v.whole and not v.pre),
268 )
~/python/anaconda3/lib/python3.7/site-packages/pydantic/validators.py in find_validators(type_, arbitrary_types_allowed)
263 return validators
264 except TypeError as e:
--> 265 raise RuntimeError(f'error checking inheritance of {type_!r} (type: {display_as_type(type_)})') from e
266
267 if arbitrary_types_allowed:
RuntimeError: error checking inheritance of ~T (type: T)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/gluonts/distribution/binned.py`
Content:
```
1 # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License").
4 # You may not use this file except in compliance with the License.
5 # A copy of the License is located at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # or in the "license" file accompanying this file. This file is distributed
10 # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
11 # express or implied. See the License for the specific language governing
12 # permissions and limitations under the License.
13
14 # Standard library imports
15 from typing import Tuple, List
16
17 # Third-party imports
18 import mxnet as mx
19 from mxnet import gluon
20
21 # First-party imports
22 from gluonts.core.component import validated
23 from gluonts.model.common import Tensor
24
25 # Relative imports
26 from .distribution import Distribution, _sample_multiple, getF
27 from .distribution_output import DistributionOutput
28
29
30 class Binned(Distribution):
31 r"""
32 A binned distribution defined by a set of bins via
33 bin centers and bin probabilities.
34
35 Parameters
36 ----------
37 bin_probs
38 Tensor containing the bin probabilities, of shape `(*batch_shape, num_bins)`.
39 bin_centers
40 Tensor containing the bin centers, of shape `(*batch_shape, num_bins)`.
41 F
42 """
43
44 is_reparameterizable = False
45
46 def __init__(self, bin_probs: Tensor, bin_centers: Tensor, F=None) -> None:
47 self.bin_centers = bin_centers
48 self.bin_probs = bin_probs
49 self.F = F if F else getF(bin_probs)
50
51 self.bin_edges = Binned._compute_edges(self.F, bin_centers)
52
53 @staticmethod
54 def _compute_edges(F, bin_centers: Tensor) -> Tensor:
55 r"""
56 Computes the edges of the bins based on the centers. The first and last edge are set to :math:`10^{-10}` and
57 :math:`10^{10}`, repsectively.
58
59 Parameters
60 ----------
61 F
62 bin_centers
63 Tensor of shape `(*batch_shape, num_bins)`.
64
65 Returns
66 -------
67 Tensor
68 Tensor of shape (*gamma.shape, num_bins+1)
69 """
70
71 low = (
72 F.zeros_like(bin_centers.slice_axis(axis=-1, begin=0, end=1))
73 - 1.0e10
74 )
75 high = (
76 F.zeros_like(bin_centers.slice_axis(axis=-1, begin=0, end=1))
77 + 1.0e10
78 )
79
80 means = (
81 bin_centers.slice_axis(axis=-1, begin=1, end=None)
82 + bin_centers.slice_axis(axis=-1, begin=0, end=-1)
83 ) / 2.0
84
85 return F.concat(low, means, high, dim=-1)
86
87 @property
88 def batch_shape(self) -> Tuple:
89 return self.bin_centers.shape[:-1]
90
91 @property
92 def event_shape(self) -> Tuple:
93 return ()
94
95 @property
96 def event_dim(self) -> int:
97 return 0
98
99 @property
100 def mean(self):
101 return (self.bin_probs * self.bin_centers).sum(axis=-1)
102
103 @property
104 def stddev(self):
105 Ex2 = (self.bin_probs * self.bin_centers.square()).sum(axis=-1)
106 return (Ex2 - self.mean.square()).sqrt()
107
108 def log_prob(self, x):
109 x = x.expand_dims(axis=-1)
110 # TODO: when mxnet has searchsorted replace this
111 left_edges = self.bin_edges.slice_axis(axis=-1, begin=0, end=-1)
112 right_edges = self.bin_edges.slice_axis(axis=-1, begin=1, end=None)
113 mask = self.F.broadcast_lesser_equal(
114 left_edges, x
115 ) * self.F.broadcast_lesser(x, right_edges)
116 return (self.bin_probs.log() * mask).sum(axis=-1)
117
118 def sample(self, num_samples=None):
119 def s(bin_probs):
120 F = self.F
121 indices = F.sample_multinomial(bin_probs)
122 if num_samples is None:
123 return self.bin_centers.pick(indices, -1).reshape_like(
124 F.zeros_like(indices.astype('float32'))
125 )
126 else:
127 return F.repeat(
128 F.expand_dims(self.bin_centers, axis=0),
129 repeats=num_samples,
130 axis=0,
131 ).pick(indices, -1)
132
133 return _sample_multiple(s, self.bin_probs, num_samples=num_samples)
134
135
136 class BinnedArgs(gluon.HybridBlock):
137 def __init__(self, bin_centers: mx.nd.NDArray, **kwargs) -> None:
138 super().__init__(**kwargs)
139 with self.name_scope():
140 self.bin_centers = self.params.get_constant(
141 'bincenters', bin_centers
142 )
143 self.num_bins = bin_centers.shape[0]
144
145 # needs to be named self.proj for consistency with the ArgProj class and the inference tests
146 self.proj = gluon.nn.HybridSequential()
147 self.proj.add(
148 gluon.nn.Dense(
149 self.num_bins,
150 prefix='binproj',
151 flatten=False,
152 weight_initializer=mx.init.Xavier(),
153 )
154 )
155 self.proj.add(gluon.nn.HybridLambda('softmax'))
156
157 def hybrid_forward(
158 self, F, x: Tensor, bin_centers: Tensor, **kwargs
159 ) -> Tuple[Tensor, Tensor]:
160 ps = self.proj(x)
161 return (
162 ps.reshape(shape=(-2, -1, self.num_bins), reverse=1),
163 # For some reason hybridize does not work when returning constants directly
164 bin_centers + 0.0,
165 )
166
167
168 class BinnedOutput(DistributionOutput):
169 distr_cls: type = Binned
170
171 @validated()
172 def __init__(self, bin_centers: List) -> None:
173 # cannot pass directly nd.array because it is not serializable
174 bc = mx.nd.array(bin_centers)
175 assert len(bc.shape) == 1
176 self.bin_centers = bc
177
178 def get_args_proj(self, *args, **kwargs) -> gluon.nn.HybridBlock:
179 return BinnedArgs(self.bin_centers)
180
181 def distribution(self, args, scale=None) -> Binned:
182 probs, centers = args
183 F = getF(probs)
184
185 if scale is not None:
186 centers = F.broadcast_mul(centers, scale).expand_dims(axis=-2)
187 else:
188 centers = F.broadcast_mul(
189 centers, F.ones_like(probs.slice_axis(axis=-2, begin=0, end=1))
190 )
191 return Binned(probs, centers)
192
193 @property
194 def event_shape(self) -> Tuple:
195 return ()
196
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/gluonts/distribution/binned.py b/src/gluonts/distribution/binned.py
--- a/src/gluonts/distribution/binned.py
+++ b/src/gluonts/distribution/binned.py
@@ -169,7 +169,7 @@
distr_cls: type = Binned
@validated()
- def __init__(self, bin_centers: List) -> None:
+ def __init__(self, bin_centers: List[float]) -> None:
# cannot pass directly nd.array because it is not serializable
bc = mx.nd.array(bin_centers)
assert len(bc.shape) == 1
| {"golden_diff": "diff --git a/src/gluonts/distribution/binned.py b/src/gluonts/distribution/binned.py\n--- a/src/gluonts/distribution/binned.py\n+++ b/src/gluonts/distribution/binned.py\n@@ -169,7 +169,7 @@\n distr_cls: type = Binned\n \n @validated()\n- def __init__(self, bin_centers: List) -> None:\n+ def __init__(self, bin_centers: List[float]) -> None:\n # cannot pass directly nd.array because it is not serializable\n bc = mx.nd.array(bin_centers)\n assert len(bc.shape) == 1\n", "issue": "Tutorial notebook Exception\nI downloaded the notebook available on https://gluon-ts.mxnet.io/examples/forecasting/tutorial.html, and run it on my local laptop (Darwin Kernel Version 16.7.0, Anaconda3 Distro, Python 3.7.3, Jupyter 4.4.0, gluonts 0.1.1) and get the following exception at:\r\n\r\n```\r\nfrom gluonts.model.simple_feedforward import SimpleFeedForwardEstimator\r\nfrom gluonts.trainer import Trainer\r\n```\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nTypeError Traceback (most recent call last)\r\n~/python/anaconda3/lib/python3.7/site-packages/pydantic/validators.py in find_validators(type_, arbitrary_types_allowed)\r\n 261 try:\r\n--> 262 if issubclass(type_, val_type):\r\n 263 return validators\r\n\r\nTypeError: issubclass() arg 1 must be a class\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nRuntimeError Traceback (most recent call last)\r\n<ipython-input-13-1fca1cb620ad> in <module>\r\n----> 1 from gluonts.model.simple_feedforward import SimpleFeedForwardEstimator\r\n 2 from gluonts.trainer import Trainer\r\n\r\n~/python/anaconda3/lib/python3.7/site-packages/gluonts/model/simple_feedforward/__init__.py in <module>\r\n 1 # Relative imports\r\n----> 2 from ._estimator import SimpleFeedForwardEstimator\r\n 3 \r\n 4 __all__ = ['SimpleFeedForwardEstimator']\r\n\r\n~/python/anaconda3/lib/python3.7/site-packages/gluonts/model/simple_feedforward/_estimator.py in <module>\r\n 7 # First-party imports\r\n 8 from gluonts.core.component import validated\r\n----> 9 from gluonts.distribution import DistributionOutput, StudentTOutput\r\n 10 from gluonts.model.estimator import GluonEstimator\r\n 11 from gluonts.model.predictor import Predictor, RepresentableBlockPredictor\r\n\r\n~/python/anaconda3/lib/python3.7/site-packages/gluonts/distribution/__init__.py in <module>\r\n 14 # Relative imports\r\n 15 from . import bijection\r\n---> 16 from .binned import Binned, BinnedOutput\r\n 17 from .distribution import Distribution\r\n 18 from .distribution_output import DistributionOutput\r\n\r\n~/python/anaconda3/lib/python3.7/site-packages/gluonts/distribution/binned.py in <module>\r\n 166 \r\n 167 \r\n--> 168 class BinnedOutput(DistributionOutput):\r\n 169 distr_cls: type = Binned\r\n 170 \r\n\r\n~/python/anaconda3/lib/python3.7/site-packages/gluonts/distribution/binned.py in BinnedOutput()\r\n 170 \r\n 171 @validated()\r\n--> 172 def __init__(self, bin_centers: List) -> None:\r\n 173 # cannot pass directly nd.array because it is not serializable\r\n 174 bc = mx.nd.array(bin_centers)\r\n\r\n~/python/anaconda3/lib/python3.7/site-packages/gluonts/core/component.py in validator(ctor)\r\n 167 f'{ctor_clsnme}Model',\r\n 168 __config__=ConfigBase.Config,\r\n--> 169 **ctor_fields,\r\n 170 )\r\n 171 else:\r\n\r\n~/python/anaconda3/lib/python3.7/site-packages/pydantic/main.py in create_model(model_name, __config__, __base__, **field_definitions)\r\n 408 annotation=f_annotation,\r\n 409 class_validators=vg.get_validators(f_name),\r\n--> 410 config=config,\r\n 411 )\r\n 412 \r\n\r\n~/python/anaconda3/lib/python3.7/site-packages/pydantic/fields.py in infer(cls, name, value, annotation, class_validators, config)\r\n 105 required=required,\r\n 106 model_config=config,\r\n--> 107 schema=schema,\r\n 108 )\r\n 109 \r\n\r\n~/python/anaconda3/lib/python3.7/site-packages/pydantic/fields.py in __init__(self, name, type_, class_validators, default, required, model_config, alias, allow_none, schema)\r\n 85 self.shape: Shape = Shape.SINGLETON\r\n 86 self._schema: Schema = schema\r\n---> 87 self.prepare()\r\n 88 \r\n 89 @classmethod\r\n\r\n~/python/anaconda3/lib/python3.7/site-packages/pydantic/fields.py in prepare(self)\r\n 134 \r\n 135 self._populate_sub_fields()\r\n--> 136 self._populate_validators()\r\n 137 \r\n 138 def schema(self, by_alias=True):\r\n\r\n~/python/anaconda3/lib/python3.7/site-packages/pydantic/fields.py in _populate_validators(self)\r\n 264 *tuple(v.func for v in self.class_validators if not v.whole and v.pre),\r\n 265 *(get_validators() if get_validators else find_validators(self.type_,\r\n--> 266 self.model_config.arbitrary_types_allowed)),\r\n 267 *tuple(v.func for v in self.class_validators if not v.whole and not v.pre),\r\n 268 )\r\n\r\n~/python/anaconda3/lib/python3.7/site-packages/pydantic/validators.py in find_validators(type_, arbitrary_types_allowed)\r\n 263 return validators\r\n 264 except TypeError as e:\r\n--> 265 raise RuntimeError(f'error checking inheritance of {type_!r} (type: {display_as_type(type_)})') from e\r\n 266 \r\n 267 if arbitrary_types_allowed:\r\n\r\nRuntimeError: error checking inheritance of ~T (type: T)\r\n```\n", "before_files": [{"content": "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\n# Standard library imports\nfrom typing import Tuple, List\n\n# Third-party imports\nimport mxnet as mx\nfrom mxnet import gluon\n\n# First-party imports\nfrom gluonts.core.component import validated\nfrom gluonts.model.common import Tensor\n\n# Relative imports\nfrom .distribution import Distribution, _sample_multiple, getF\nfrom .distribution_output import DistributionOutput\n\n\nclass Binned(Distribution):\n r\"\"\"\n A binned distribution defined by a set of bins via\n bin centers and bin probabilities.\n\n Parameters\n ----------\n bin_probs\n Tensor containing the bin probabilities, of shape `(*batch_shape, num_bins)`.\n bin_centers\n Tensor containing the bin centers, of shape `(*batch_shape, num_bins)`.\n F\n \"\"\"\n\n is_reparameterizable = False\n\n def __init__(self, bin_probs: Tensor, bin_centers: Tensor, F=None) -> None:\n self.bin_centers = bin_centers\n self.bin_probs = bin_probs\n self.F = F if F else getF(bin_probs)\n\n self.bin_edges = Binned._compute_edges(self.F, bin_centers)\n\n @staticmethod\n def _compute_edges(F, bin_centers: Tensor) -> Tensor:\n r\"\"\"\n Computes the edges of the bins based on the centers. The first and last edge are set to :math:`10^{-10}` and\n :math:`10^{10}`, repsectively.\n\n Parameters\n ----------\n F\n bin_centers\n Tensor of shape `(*batch_shape, num_bins)`.\n\n Returns\n -------\n Tensor\n Tensor of shape (*gamma.shape, num_bins+1)\n \"\"\"\n\n low = (\n F.zeros_like(bin_centers.slice_axis(axis=-1, begin=0, end=1))\n - 1.0e10\n )\n high = (\n F.zeros_like(bin_centers.slice_axis(axis=-1, begin=0, end=1))\n + 1.0e10\n )\n\n means = (\n bin_centers.slice_axis(axis=-1, begin=1, end=None)\n + bin_centers.slice_axis(axis=-1, begin=0, end=-1)\n ) / 2.0\n\n return F.concat(low, means, high, dim=-1)\n\n @property\n def batch_shape(self) -> Tuple:\n return self.bin_centers.shape[:-1]\n\n @property\n def event_shape(self) -> Tuple:\n return ()\n\n @property\n def event_dim(self) -> int:\n return 0\n\n @property\n def mean(self):\n return (self.bin_probs * self.bin_centers).sum(axis=-1)\n\n @property\n def stddev(self):\n Ex2 = (self.bin_probs * self.bin_centers.square()).sum(axis=-1)\n return (Ex2 - self.mean.square()).sqrt()\n\n def log_prob(self, x):\n x = x.expand_dims(axis=-1)\n # TODO: when mxnet has searchsorted replace this\n left_edges = self.bin_edges.slice_axis(axis=-1, begin=0, end=-1)\n right_edges = self.bin_edges.slice_axis(axis=-1, begin=1, end=None)\n mask = self.F.broadcast_lesser_equal(\n left_edges, x\n ) * self.F.broadcast_lesser(x, right_edges)\n return (self.bin_probs.log() * mask).sum(axis=-1)\n\n def sample(self, num_samples=None):\n def s(bin_probs):\n F = self.F\n indices = F.sample_multinomial(bin_probs)\n if num_samples is None:\n return self.bin_centers.pick(indices, -1).reshape_like(\n F.zeros_like(indices.astype('float32'))\n )\n else:\n return F.repeat(\n F.expand_dims(self.bin_centers, axis=0),\n repeats=num_samples,\n axis=0,\n ).pick(indices, -1)\n\n return _sample_multiple(s, self.bin_probs, num_samples=num_samples)\n\n\nclass BinnedArgs(gluon.HybridBlock):\n def __init__(self, bin_centers: mx.nd.NDArray, **kwargs) -> None:\n super().__init__(**kwargs)\n with self.name_scope():\n self.bin_centers = self.params.get_constant(\n 'bincenters', bin_centers\n )\n self.num_bins = bin_centers.shape[0]\n\n # needs to be named self.proj for consistency with the ArgProj class and the inference tests\n self.proj = gluon.nn.HybridSequential()\n self.proj.add(\n gluon.nn.Dense(\n self.num_bins,\n prefix='binproj',\n flatten=False,\n weight_initializer=mx.init.Xavier(),\n )\n )\n self.proj.add(gluon.nn.HybridLambda('softmax'))\n\n def hybrid_forward(\n self, F, x: Tensor, bin_centers: Tensor, **kwargs\n ) -> Tuple[Tensor, Tensor]:\n ps = self.proj(x)\n return (\n ps.reshape(shape=(-2, -1, self.num_bins), reverse=1),\n # For some reason hybridize does not work when returning constants directly\n bin_centers + 0.0,\n )\n\n\nclass BinnedOutput(DistributionOutput):\n distr_cls: type = Binned\n\n @validated()\n def __init__(self, bin_centers: List) -> None:\n # cannot pass directly nd.array because it is not serializable\n bc = mx.nd.array(bin_centers)\n assert len(bc.shape) == 1\n self.bin_centers = bc\n\n def get_args_proj(self, *args, **kwargs) -> gluon.nn.HybridBlock:\n return BinnedArgs(self.bin_centers)\n\n def distribution(self, args, scale=None) -> Binned:\n probs, centers = args\n F = getF(probs)\n\n if scale is not None:\n centers = F.broadcast_mul(centers, scale).expand_dims(axis=-2)\n else:\n centers = F.broadcast_mul(\n centers, F.ones_like(probs.slice_axis(axis=-2, begin=0, end=1))\n )\n return Binned(probs, centers)\n\n @property\n def event_shape(self) -> Tuple:\n return ()\n", "path": "src/gluonts/distribution/binned.py"}], "after_files": [{"content": "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\").\n# You may not use this file except in compliance with the License.\n# A copy of the License is located at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\n# Standard library imports\nfrom typing import Tuple, List\n\n# Third-party imports\nimport mxnet as mx\nfrom mxnet import gluon\n\n# First-party imports\nfrom gluonts.core.component import validated\nfrom gluonts.model.common import Tensor\n\n# Relative imports\nfrom .distribution import Distribution, _sample_multiple, getF\nfrom .distribution_output import DistributionOutput\n\n\nclass Binned(Distribution):\n r\"\"\"\n A binned distribution defined by a set of bins via\n bin centers and bin probabilities.\n\n Parameters\n ----------\n bin_probs\n Tensor containing the bin probabilities, of shape `(*batch_shape, num_bins)`.\n bin_centers\n Tensor containing the bin centers, of shape `(*batch_shape, num_bins)`.\n F\n \"\"\"\n\n is_reparameterizable = False\n\n def __init__(self, bin_probs: Tensor, bin_centers: Tensor, F=None) -> None:\n self.bin_centers = bin_centers\n self.bin_probs = bin_probs\n self.F = F if F else getF(bin_probs)\n\n self.bin_edges = Binned._compute_edges(self.F, bin_centers)\n\n @staticmethod\n def _compute_edges(F, bin_centers: Tensor) -> Tensor:\n r\"\"\"\n Computes the edges of the bins based on the centers. The first and last edge are set to :math:`10^{-10}` and\n :math:`10^{10}`, repsectively.\n\n Parameters\n ----------\n F\n bin_centers\n Tensor of shape `(*batch_shape, num_bins)`.\n\n Returns\n -------\n Tensor\n Tensor of shape (*gamma.shape, num_bins+1)\n \"\"\"\n\n low = (\n F.zeros_like(bin_centers.slice_axis(axis=-1, begin=0, end=1))\n - 1.0e10\n )\n high = (\n F.zeros_like(bin_centers.slice_axis(axis=-1, begin=0, end=1))\n + 1.0e10\n )\n\n means = (\n bin_centers.slice_axis(axis=-1, begin=1, end=None)\n + bin_centers.slice_axis(axis=-1, begin=0, end=-1)\n ) / 2.0\n\n return F.concat(low, means, high, dim=-1)\n\n @property\n def batch_shape(self) -> Tuple:\n return self.bin_centers.shape[:-1]\n\n @property\n def event_shape(self) -> Tuple:\n return ()\n\n @property\n def event_dim(self) -> int:\n return 0\n\n @property\n def mean(self):\n return (self.bin_probs * self.bin_centers).sum(axis=-1)\n\n @property\n def stddev(self):\n Ex2 = (self.bin_probs * self.bin_centers.square()).sum(axis=-1)\n return (Ex2 - self.mean.square()).sqrt()\n\n def log_prob(self, x):\n x = x.expand_dims(axis=-1)\n # TODO: when mxnet has searchsorted replace this\n left_edges = self.bin_edges.slice_axis(axis=-1, begin=0, end=-1)\n right_edges = self.bin_edges.slice_axis(axis=-1, begin=1, end=None)\n mask = self.F.broadcast_lesser_equal(\n left_edges, x\n ) * self.F.broadcast_lesser(x, right_edges)\n return (self.bin_probs.log() * mask).sum(axis=-1)\n\n def sample(self, num_samples=None):\n def s(bin_probs):\n F = self.F\n indices = F.sample_multinomial(bin_probs)\n if num_samples is None:\n return self.bin_centers.pick(indices, -1).reshape_like(\n F.zeros_like(indices.astype('float32'))\n )\n else:\n return F.repeat(\n F.expand_dims(self.bin_centers, axis=0),\n repeats=num_samples,\n axis=0,\n ).pick(indices, -1)\n\n return _sample_multiple(s, self.bin_probs, num_samples=num_samples)\n\n\nclass BinnedArgs(gluon.HybridBlock):\n def __init__(self, bin_centers: mx.nd.NDArray, **kwargs) -> None:\n super().__init__(**kwargs)\n with self.name_scope():\n self.bin_centers = self.params.get_constant(\n 'bincenters', bin_centers\n )\n self.num_bins = bin_centers.shape[0]\n\n # needs to be named self.proj for consistency with the ArgProj class and the inference tests\n self.proj = gluon.nn.HybridSequential()\n self.proj.add(\n gluon.nn.Dense(\n self.num_bins,\n prefix='binproj',\n flatten=False,\n weight_initializer=mx.init.Xavier(),\n )\n )\n self.proj.add(gluon.nn.HybridLambda('softmax'))\n\n def hybrid_forward(\n self, F, x: Tensor, bin_centers: Tensor, **kwargs\n ) -> Tuple[Tensor, Tensor]:\n ps = self.proj(x)\n return (\n ps.reshape(shape=(-2, -1, self.num_bins), reverse=1),\n # For some reason hybridize does not work when returning constants directly\n bin_centers + 0.0,\n )\n\n\nclass BinnedOutput(DistributionOutput):\n distr_cls: type = Binned\n\n @validated()\n def __init__(self, bin_centers: List[float]) -> None:\n # cannot pass directly nd.array because it is not serializable\n bc = mx.nd.array(bin_centers)\n assert len(bc.shape) == 1\n self.bin_centers = bc\n\n def get_args_proj(self, *args, **kwargs) -> gluon.nn.HybridBlock:\n return BinnedArgs(self.bin_centers)\n\n def distribution(self, args, scale=None) -> Binned:\n probs, centers = args\n F = getF(probs)\n\n if scale is not None:\n centers = F.broadcast_mul(centers, scale).expand_dims(axis=-2)\n else:\n centers = F.broadcast_mul(\n centers, F.ones_like(probs.slice_axis(axis=-2, begin=0, end=1))\n )\n return Binned(probs, centers)\n\n @property\n def event_shape(self) -> Tuple:\n return ()\n", "path": "src/gluonts/distribution/binned.py"}]} | 3,574 | 145 |
gh_patches_debug_19935 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-863 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Using exclude paths for all instances of a Flask app
This is less a feature request than a question about how to use `OPENTELEMETRY_PYTHON_FLASK_EXCLUDED_HOSTS` to exclude routes from being traced for all instances of a flask application (i.e. regardless of host being deployed to). I initially thought something like below would work -
```python
from os import environ
environ["OPENTELEMETRY_PYTHON_FLASK_EXCLUDED_HOSTS"] = "/route1,/route2"
```
But it appears that this would be executed after the `Configuration` singleton gets initialized, and won't get picked up. Calling `configuration.Configuration._reset()` after setting `environ` seems like a hack to make this work but the docs state it's not for production code and it feels hacky as well.
Of course setting the environment on the deployed to host would solve this. The issue is that in some instances we may have a flask application that has e.g. a ping route that should never be traced, and we may not have control over the environment variables of the host being deployed to (so the app has to somehow set the exclude paths). So I suppose my question is, can the application programmatically set it's own exclude paths? Thanks in advance for the help.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opentelemetry-api/src/opentelemetry/configuration/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 Simple configuration manager
17
18 This is a configuration manager for OpenTelemetry. It reads configuration
19 values from environment variables prefixed with ``OPENTELEMETRY_PYTHON_`` whose
20 characters are only alphanumeric characters and unserscores, except for the
21 first character after ``OPENTELEMETRY_PYTHON_`` which must not be a number.
22
23 For example, these environment variables will be read:
24
25 1. ``OPENTELEMETRY_PYTHON_SOMETHING``
26 2. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_``
27 3. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND__ELSE``
28 4. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND_else``
29 5. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND_else2``
30
31 These won't:
32
33 1. ``OPENTELEMETRY_PYTH_SOMETHING``
34 2. ``OPENTELEMETRY_PYTHON_2_SOMETHING_AND__ELSE``
35 3. ``OPENTELEMETRY_PYTHON_SOMETHING_%_ELSE``
36
37 The values stored in the environment variables can be found in an instance of
38 ``opentelemetry.configuration.Configuration``. This class can be instantiated
39 freely because instantiating it returns always the same object.
40
41 For example, if the environment variable
42 ``OPENTELEMETRY_PYTHON_METER_PROVIDER`` value is ``my_meter_provider``, then
43 ``Configuration().meter_provider == "my_meter_provider"`` would be ``True``.
44
45 Non defined attributes will always return ``None``. This is intended to make it
46 easier to use the ``Configuration`` object in actual code, because it won't be
47 necessary to check for the attribute to be defined first.
48
49 Environment variables used by OpenTelemetry
50 -------------------------------------------
51
52 1. OPENTELEMETRY_PYTHON_METER_PROVIDER
53 2. OPENTELEMETRY_PYTHON_TRACER_PROVIDER
54
55 The value of these environment variables should be the name of the entry point
56 that points to the class that implements either provider. This OpenTelemetry
57 API package provides one entry point for each, which can be found in the
58 setup.py file::
59
60 entry_points={
61 ...
62 "opentelemetry_meter_provider": [
63 "default_meter_provider = "
64 "opentelemetry.metrics:DefaultMeterProvider"
65 ],
66 "opentelemetry_tracer_provider": [
67 "default_tracer_provider = "
68 "opentelemetry.trace:DefaultTracerProvider"
69 ],
70 }
71
72 To use the meter provider above, then the
73 ``OPENTELEMETRY_PYTHON_METER_PROVIDER`` should be set to
74 ``"default_meter_provider"`` (this is not actually necessary since the
75 OpenTelemetry API provided providers are the default ones used if no
76 configuration is found in the environment variables).
77
78 Configuration values that are exactly ``"True"`` or ``"False"`` will be
79 converted to its boolean values of ``True`` and ``False`` respectively.
80
81 Configuration values that can be casted to integers or floats will be casted.
82
83 This object can be used by any OpenTelemetry component, native or external.
84 For that reason, the ``Configuration`` object is designed to be immutable.
85 If a component would change the value of one of the ``Configuration`` object
86 attributes then another component that relied on that value may break, leading
87 to bugs that are very hard to debug. To avoid this situation, the preferred
88 approach for components that need a different value than the one provided by
89 the ``Configuration`` object is to implement a mechanism that allows the user
90 to override this value instead of changing it.
91 """
92
93 from os import environ
94 from re import fullmatch
95 from typing import ClassVar, Dict, Optional, TypeVar, Union
96
97 ConfigValue = Union[str, bool, int, float]
98 _T = TypeVar("_T", ConfigValue, Optional[ConfigValue])
99
100
101 class Configuration:
102 _instance = None # type: ClassVar[Optional[Configuration]]
103 _config_map = {} # type: ClassVar[Dict[str, ConfigValue]]
104
105 def __new__(cls) -> "Configuration":
106 if cls._instance is not None:
107 instance = cls._instance
108 else:
109
110 instance = super().__new__(cls)
111 for key, value_str in environ.items():
112
113 match = fullmatch(
114 r"OPENTELEMETRY_PYTHON_([A-Za-z_][\w_]*)", key
115 )
116
117 if match is not None:
118
119 key = match.group(1)
120 value = value_str # type: ConfigValue
121
122 if value_str == "True":
123 value = True
124 elif value_str == "False":
125 value = False
126 else:
127 try:
128 value = int(value_str)
129 except ValueError:
130 pass
131 try:
132 value = float(value_str)
133 except ValueError:
134 pass
135
136 instance._config_map[key] = value
137
138 cls._instance = instance
139
140 return instance
141
142 def __getattr__(self, name: str) -> Optional[ConfigValue]:
143 return self._config_map.get(name)
144
145 def __setattr__(self, key: str, val: ConfigValue) -> None:
146 if key == "_config_map":
147 super().__setattr__(key, val)
148 else:
149 raise AttributeError(key)
150
151 def get(self, name: str, default: _T) -> _T:
152 """Use this typed method for dynamic access instead of `getattr`
153
154 :rtype: str or bool or int or float or None
155 """
156 val = self._config_map.get(name, default)
157 return val
158
159 @classmethod
160 def _reset(cls) -> None:
161 """
162 This method "resets" the global configuration attributes
163
164 It is not intended to be used by production code but by testing code
165 only.
166 """
167
168 if cls._instance:
169 cls._instance._config_map.clear() # pylint: disable=protected-access
170 cls._instance = None
171
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opentelemetry-api/src/opentelemetry/configuration/__init__.py b/opentelemetry-api/src/opentelemetry/configuration/__init__.py
--- a/opentelemetry-api/src/opentelemetry/configuration/__init__.py
+++ b/opentelemetry-api/src/opentelemetry/configuration/__init__.py
@@ -142,19 +142,18 @@
def __getattr__(self, name: str) -> Optional[ConfigValue]:
return self._config_map.get(name)
- def __setattr__(self, key: str, val: ConfigValue) -> None:
- if key == "_config_map":
- super().__setattr__(key, val)
+ def __setattr__(self, name: str, value: ConfigValue) -> None:
+ if name not in self._config_map.keys():
+ self._config_map[name] = value
else:
- raise AttributeError(key)
+ raise AttributeError(name)
def get(self, name: str, default: _T) -> _T:
"""Use this typed method for dynamic access instead of `getattr`
:rtype: str or bool or int or float or None
"""
- val = self._config_map.get(name, default)
- return val
+ return self._config_map.get(name, default)
@classmethod
def _reset(cls) -> None:
| {"golden_diff": "diff --git a/opentelemetry-api/src/opentelemetry/configuration/__init__.py b/opentelemetry-api/src/opentelemetry/configuration/__init__.py\n--- a/opentelemetry-api/src/opentelemetry/configuration/__init__.py\n+++ b/opentelemetry-api/src/opentelemetry/configuration/__init__.py\n@@ -142,19 +142,18 @@\n def __getattr__(self, name: str) -> Optional[ConfigValue]:\n return self._config_map.get(name)\n \n- def __setattr__(self, key: str, val: ConfigValue) -> None:\n- if key == \"_config_map\":\n- super().__setattr__(key, val)\n+ def __setattr__(self, name: str, value: ConfigValue) -> None:\n+ if name not in self._config_map.keys():\n+ self._config_map[name] = value\n else:\n- raise AttributeError(key)\n+ raise AttributeError(name)\n \n def get(self, name: str, default: _T) -> _T:\n \"\"\"Use this typed method for dynamic access instead of `getattr`\n \n :rtype: str or bool or int or float or None\n \"\"\"\n- val = self._config_map.get(name, default)\n- return val\n+ return self._config_map.get(name, default)\n \n @classmethod\n def _reset(cls) -> None:\n", "issue": "Using exclude paths for all instances of a Flask app\nThis is less a feature request than a question about how to use `OPENTELEMETRY_PYTHON_FLASK_EXCLUDED_HOSTS` to exclude routes from being traced for all instances of a flask application (i.e. regardless of host being deployed to). I initially thought something like below would work -\r\n\r\n```python\r\nfrom os import environ\r\nenviron[\"OPENTELEMETRY_PYTHON_FLASK_EXCLUDED_HOSTS\"] = \"/route1,/route2\"\r\n```\r\n\r\nBut it appears that this would be executed after the `Configuration` singleton gets initialized, and won't get picked up. Calling `configuration.Configuration._reset()` after setting `environ` seems like a hack to make this work but the docs state it's not for production code and it feels hacky as well.\r\n\r\nOf course setting the environment on the deployed to host would solve this. The issue is that in some instances we may have a flask application that has e.g. a ping route that should never be traced, and we may not have control over the environment variables of the host being deployed to (so the app has to somehow set the exclude paths). So I suppose my question is, can the application programmatically set it's own exclude paths? Thanks in advance for the help.\r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nSimple configuration manager\n\nThis is a configuration manager for OpenTelemetry. It reads configuration\nvalues from environment variables prefixed with ``OPENTELEMETRY_PYTHON_`` whose\ncharacters are only alphanumeric characters and unserscores, except for the\nfirst character after ``OPENTELEMETRY_PYTHON_`` which must not be a number.\n\nFor example, these environment variables will be read:\n\n1. ``OPENTELEMETRY_PYTHON_SOMETHING``\n2. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_``\n3. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND__ELSE``\n4. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND_else``\n5. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND_else2``\n\nThese won't:\n\n1. ``OPENTELEMETRY_PYTH_SOMETHING``\n2. ``OPENTELEMETRY_PYTHON_2_SOMETHING_AND__ELSE``\n3. ``OPENTELEMETRY_PYTHON_SOMETHING_%_ELSE``\n\nThe values stored in the environment variables can be found in an instance of\n``opentelemetry.configuration.Configuration``. This class can be instantiated\nfreely because instantiating it returns always the same object.\n\nFor example, if the environment variable\n``OPENTELEMETRY_PYTHON_METER_PROVIDER`` value is ``my_meter_provider``, then\n``Configuration().meter_provider == \"my_meter_provider\"`` would be ``True``.\n\nNon defined attributes will always return ``None``. This is intended to make it\neasier to use the ``Configuration`` object in actual code, because it won't be\nnecessary to check for the attribute to be defined first.\n\nEnvironment variables used by OpenTelemetry\n-------------------------------------------\n\n1. OPENTELEMETRY_PYTHON_METER_PROVIDER\n2. OPENTELEMETRY_PYTHON_TRACER_PROVIDER\n\nThe value of these environment variables should be the name of the entry point\nthat points to the class that implements either provider. This OpenTelemetry\nAPI package provides one entry point for each, which can be found in the\nsetup.py file::\n\n entry_points={\n ...\n \"opentelemetry_meter_provider\": [\n \"default_meter_provider = \"\n \"opentelemetry.metrics:DefaultMeterProvider\"\n ],\n \"opentelemetry_tracer_provider\": [\n \"default_tracer_provider = \"\n \"opentelemetry.trace:DefaultTracerProvider\"\n ],\n }\n\nTo use the meter provider above, then the\n``OPENTELEMETRY_PYTHON_METER_PROVIDER`` should be set to\n``\"default_meter_provider\"`` (this is not actually necessary since the\nOpenTelemetry API provided providers are the default ones used if no\nconfiguration is found in the environment variables).\n\nConfiguration values that are exactly ``\"True\"`` or ``\"False\"`` will be\nconverted to its boolean values of ``True`` and ``False`` respectively.\n\nConfiguration values that can be casted to integers or floats will be casted.\n\nThis object can be used by any OpenTelemetry component, native or external.\nFor that reason, the ``Configuration`` object is designed to be immutable.\nIf a component would change the value of one of the ``Configuration`` object\nattributes then another component that relied on that value may break, leading\nto bugs that are very hard to debug. To avoid this situation, the preferred\napproach for components that need a different value than the one provided by\nthe ``Configuration`` object is to implement a mechanism that allows the user\nto override this value instead of changing it.\n\"\"\"\n\nfrom os import environ\nfrom re import fullmatch\nfrom typing import ClassVar, Dict, Optional, TypeVar, Union\n\nConfigValue = Union[str, bool, int, float]\n_T = TypeVar(\"_T\", ConfigValue, Optional[ConfigValue])\n\n\nclass Configuration:\n _instance = None # type: ClassVar[Optional[Configuration]]\n _config_map = {} # type: ClassVar[Dict[str, ConfigValue]]\n\n def __new__(cls) -> \"Configuration\":\n if cls._instance is not None:\n instance = cls._instance\n else:\n\n instance = super().__new__(cls)\n for key, value_str in environ.items():\n\n match = fullmatch(\n r\"OPENTELEMETRY_PYTHON_([A-Za-z_][\\w_]*)\", key\n )\n\n if match is not None:\n\n key = match.group(1)\n value = value_str # type: ConfigValue\n\n if value_str == \"True\":\n value = True\n elif value_str == \"False\":\n value = False\n else:\n try:\n value = int(value_str)\n except ValueError:\n pass\n try:\n value = float(value_str)\n except ValueError:\n pass\n\n instance._config_map[key] = value\n\n cls._instance = instance\n\n return instance\n\n def __getattr__(self, name: str) -> Optional[ConfigValue]:\n return self._config_map.get(name)\n\n def __setattr__(self, key: str, val: ConfigValue) -> None:\n if key == \"_config_map\":\n super().__setattr__(key, val)\n else:\n raise AttributeError(key)\n\n def get(self, name: str, default: _T) -> _T:\n \"\"\"Use this typed method for dynamic access instead of `getattr`\n\n :rtype: str or bool or int or float or None\n \"\"\"\n val = self._config_map.get(name, default)\n return val\n\n @classmethod\n def _reset(cls) -> None:\n \"\"\"\n This method \"resets\" the global configuration attributes\n\n It is not intended to be used by production code but by testing code\n only.\n \"\"\"\n\n if cls._instance:\n cls._instance._config_map.clear() # pylint: disable=protected-access\n cls._instance = None\n", "path": "opentelemetry-api/src/opentelemetry/configuration/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nSimple configuration manager\n\nThis is a configuration manager for OpenTelemetry. It reads configuration\nvalues from environment variables prefixed with ``OPENTELEMETRY_PYTHON_`` whose\ncharacters are only alphanumeric characters and unserscores, except for the\nfirst character after ``OPENTELEMETRY_PYTHON_`` which must not be a number.\n\nFor example, these environment variables will be read:\n\n1. ``OPENTELEMETRY_PYTHON_SOMETHING``\n2. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_``\n3. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND__ELSE``\n4. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND_else``\n5. ``OPENTELEMETRY_PYTHON_SOMETHING_ELSE_AND_else2``\n\nThese won't:\n\n1. ``OPENTELEMETRY_PYTH_SOMETHING``\n2. ``OPENTELEMETRY_PYTHON_2_SOMETHING_AND__ELSE``\n3. ``OPENTELEMETRY_PYTHON_SOMETHING_%_ELSE``\n\nThe values stored in the environment variables can be found in an instance of\n``opentelemetry.configuration.Configuration``. This class can be instantiated\nfreely because instantiating it returns always the same object.\n\nFor example, if the environment variable\n``OPENTELEMETRY_PYTHON_METER_PROVIDER`` value is ``my_meter_provider``, then\n``Configuration().meter_provider == \"my_meter_provider\"`` would be ``True``.\n\nNon defined attributes will always return ``None``. This is intended to make it\neasier to use the ``Configuration`` object in actual code, because it won't be\nnecessary to check for the attribute to be defined first.\n\nEnvironment variables used by OpenTelemetry\n-------------------------------------------\n\n1. OPENTELEMETRY_PYTHON_METER_PROVIDER\n2. OPENTELEMETRY_PYTHON_TRACER_PROVIDER\n\nThe value of these environment variables should be the name of the entry point\nthat points to the class that implements either provider. This OpenTelemetry\nAPI package provides one entry point for each, which can be found in the\nsetup.py file::\n\n entry_points={\n ...\n \"opentelemetry_meter_provider\": [\n \"default_meter_provider = \"\n \"opentelemetry.metrics:DefaultMeterProvider\"\n ],\n \"opentelemetry_tracer_provider\": [\n \"default_tracer_provider = \"\n \"opentelemetry.trace:DefaultTracerProvider\"\n ],\n }\n\nTo use the meter provider above, then the\n``OPENTELEMETRY_PYTHON_METER_PROVIDER`` should be set to\n``\"default_meter_provider\"`` (this is not actually necessary since the\nOpenTelemetry API provided providers are the default ones used if no\nconfiguration is found in the environment variables).\n\nConfiguration values that are exactly ``\"True\"`` or ``\"False\"`` will be\nconverted to its boolean values of ``True`` and ``False`` respectively.\n\nConfiguration values that can be casted to integers or floats will be casted.\n\nThis object can be used by any OpenTelemetry component, native or external.\nFor that reason, the ``Configuration`` object is designed to be immutable.\nIf a component would change the value of one of the ``Configuration`` object\nattributes then another component that relied on that value may break, leading\nto bugs that are very hard to debug. To avoid this situation, the preferred\napproach for components that need a different value than the one provided by\nthe ``Configuration`` object is to implement a mechanism that allows the user\nto override this value instead of changing it.\n\"\"\"\n\nfrom os import environ\nfrom re import fullmatch\nfrom typing import ClassVar, Dict, Optional, TypeVar, Union\n\nConfigValue = Union[str, bool, int, float]\n_T = TypeVar(\"_T\", ConfigValue, Optional[ConfigValue])\n\n\nclass Configuration:\n _instance = None # type: ClassVar[Optional[Configuration]]\n _config_map = {} # type: ClassVar[Dict[str, ConfigValue]]\n\n def __new__(cls) -> \"Configuration\":\n if cls._instance is not None:\n instance = cls._instance\n else:\n\n instance = super().__new__(cls)\n for key, value_str in environ.items():\n\n match = fullmatch(\n r\"OPENTELEMETRY_PYTHON_([A-Za-z_][\\w_]*)\", key\n )\n\n if match is not None:\n\n key = match.group(1)\n value = value_str # type: ConfigValue\n\n if value_str == \"True\":\n value = True\n elif value_str == \"False\":\n value = False\n else:\n try:\n value = int(value_str)\n except ValueError:\n pass\n try:\n value = float(value_str)\n except ValueError:\n pass\n\n instance._config_map[key] = value\n\n cls._instance = instance\n\n return instance\n\n def __getattr__(self, name: str) -> Optional[ConfigValue]:\n return self._config_map.get(name)\n\n def __setattr__(self, name: str, value: ConfigValue) -> None:\n if name not in self._config_map.keys():\n self._config_map[name] = value\n else:\n raise AttributeError(name)\n\n def get(self, name: str, default: _T) -> _T:\n \"\"\"Use this typed method for dynamic access instead of `getattr`\n\n :rtype: str or bool or int or float or None\n \"\"\"\n return self._config_map.get(name, default)\n\n @classmethod\n def _reset(cls) -> None:\n \"\"\"\n This method \"resets\" the global configuration attributes\n\n It is not intended to be used by production code but by testing code\n only.\n \"\"\"\n\n if cls._instance:\n cls._instance._config_map.clear() # pylint: disable=protected-access\n cls._instance = None\n", "path": "opentelemetry-api/src/opentelemetry/configuration/__init__.py"}]} | 2,363 | 298 |
gh_patches_debug_1686 | rasdani/github-patches | git_diff | elastic__apm-agent-python-1064 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add support for Django 3.2
Django 3.2 is slated for a release in April. Running the test suite, a few problems came up:
- [ ] App label needs to be a valid Python identifier, ours is not (renaming it from `elasticapm.contrib.django` to `elasticapm` should suffice)
Several test failures:
- [ ] `test_broken_500_handler_with_middleware`
- [ ] `test_404_middleware`
- [ ] `test_response_error_id_middleware`
- [ ] `test_django_logging_request_kwarg`
- [ ] `test_django_logging_middleware`
- [ ] `test_capture_body_config_is_dynamic_for_transactions`
- [ ] `test_capture_headers_config_is_dynamic_for_transactions`
- [ ] `test_capture_headers`
- [ ] `test_transaction_name_from_route`
Most of these look similar in nature, I suspect an issue with middlewares. Nothing jumps out in the [release notes](https://docs.djangoproject.com/en/3.2/releases/3.2/), though.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticapm/contrib/django/apps.py`
Content:
```
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2019, Elasticsearch BV
4 # All rights reserved.
5 #
6 # Redistribution and use in source and binary forms, with or without
7 # modification, are permitted provided that the following conditions are met:
8 #
9 # * Redistributions of source code must retain the above copyright notice, this
10 # list of conditions and the following disclaimer.
11 #
12 # * Redistributions in binary form must reproduce the above copyright notice,
13 # this list of conditions and the following disclaimer in the documentation
14 # and/or other materials provided with the distribution.
15 #
16 # * Neither the name of the copyright holder nor the names of its
17 # contributors may be used to endorse or promote products derived from
18 # this software without specific prior written permission.
19 #
20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31 from functools import partial
32
33 from django.apps import AppConfig
34 from django.conf import settings as django_settings
35
36 from elasticapm.conf import constants
37 from elasticapm.contrib.django.client import get_client
38 from elasticapm.utils.disttracing import TraceParent
39 from elasticapm.utils.logging import get_logger
40 from elasticapm.utils.wsgi import get_current_url
41
42 logger = get_logger("elasticapm.traces")
43
44 ERROR_DISPATCH_UID = "elasticapm-exceptions"
45 REQUEST_START_DISPATCH_UID = "elasticapm-request-start"
46 REQUEST_FINISH_DISPATCH_UID = "elasticapm-request-stop"
47
48 MIDDLEWARE_NAME = "elasticapm.contrib.django.middleware.TracingMiddleware"
49
50 TRACEPARENT_HEADER_NAME_WSGI = "HTTP_" + constants.TRACEPARENT_HEADER_NAME.upper().replace("-", "_")
51 TRACEPARENT_LEGACY_HEADER_NAME_WSGI = "HTTP_" + constants.TRACEPARENT_LEGACY_HEADER_NAME.upper().replace("-", "_")
52 TRACESTATE_HEADER_NAME_WSGI = "HTTP_" + constants.TRACESTATE_HEADER_NAME.upper().replace("-", "_")
53
54
55 class ElasticAPMConfig(AppConfig):
56 name = "elasticapm.contrib.django"
57 label = "elasticapm.contrib.django"
58 verbose_name = "ElasticAPM"
59
60 def __init__(self, *args, **kwargs):
61 super(ElasticAPMConfig, self).__init__(*args, **kwargs)
62 self.client = None
63
64 def ready(self):
65 self.client = get_client()
66 if self.client.config.autoinsert_django_middleware:
67 self.insert_middleware(django_settings)
68 register_handlers(self.client)
69 if self.client.config.instrument and self.client.config.enabled:
70 instrument(self.client)
71 else:
72 self.client.logger.debug("Skipping instrumentation. INSTRUMENT is set to False.")
73
74 @staticmethod
75 def insert_middleware(settings):
76 if hasattr(settings, "MIDDLEWARE"):
77 middleware_list = settings.MIDDLEWARE
78 middleware_attr = "MIDDLEWARE"
79 elif hasattr(settings, "MIDDLEWARE_CLASSES"): # can be removed when we drop support for Django 1.x
80 middleware_list = settings.MIDDLEWARE_CLASSES
81 middleware_attr = "MIDDLEWARE_CLASSES"
82 else:
83 logger.debug("Could not find middleware setting, not autoinserting tracing middleware")
84 return
85 is_tuple = isinstance(middleware_list, tuple)
86 if is_tuple:
87 middleware_list = list(middleware_list)
88 elif not isinstance(middleware_list, list):
89 logger.debug("%s setting is not of type list or tuple, not autoinserting tracing middleware")
90 return
91 if middleware_list is not None and MIDDLEWARE_NAME not in middleware_list:
92 logger.debug("Inserting tracing middleware into settings.%s", middleware_attr)
93 middleware_list.insert(0, MIDDLEWARE_NAME)
94 if is_tuple:
95 middleware_list = tuple(middleware_list)
96 if middleware_list:
97 setattr(settings, middleware_attr, middleware_list)
98
99
100 def register_handlers(client):
101 from django.core.signals import got_request_exception, request_finished, request_started
102
103 from elasticapm.contrib.django.handlers import exception_handler
104
105 # Connect to Django's internal signal handlers
106 got_request_exception.disconnect(dispatch_uid=ERROR_DISPATCH_UID)
107 got_request_exception.connect(partial(exception_handler, client), dispatch_uid=ERROR_DISPATCH_UID, weak=False)
108
109 request_started.disconnect(dispatch_uid=REQUEST_START_DISPATCH_UID)
110 request_started.connect(
111 partial(_request_started_handler, client), dispatch_uid=REQUEST_START_DISPATCH_UID, weak=False
112 )
113
114 request_finished.disconnect(dispatch_uid=REQUEST_FINISH_DISPATCH_UID)
115 request_finished.connect(
116 lambda sender, **kwargs: client.end_transaction() if _should_start_transaction(client) else None,
117 dispatch_uid=REQUEST_FINISH_DISPATCH_UID,
118 weak=False,
119 )
120
121 # If we can import celery, register ourselves as exception handler
122 try:
123 import celery # noqa F401
124
125 from elasticapm.contrib.celery import register_exception_tracking
126
127 try:
128 register_exception_tracking(client)
129 except Exception as e:
130 client.logger.exception("Failed installing django-celery hook: %s" % e)
131 except ImportError:
132 client.logger.debug("Not instrumenting Celery, couldn't import")
133
134
135 def _request_started_handler(client, sender, *args, **kwargs):
136 if not _should_start_transaction(client):
137 return
138 # try to find trace id
139 trace_parent = None
140 if "environ" in kwargs:
141 url = get_current_url(kwargs["environ"], strip_querystring=True, path_only=True)
142 if client.should_ignore_url(url):
143 logger.debug("Ignoring request due to %s matching transaction_ignore_urls")
144 return
145 trace_parent = TraceParent.from_headers(
146 kwargs["environ"],
147 TRACEPARENT_HEADER_NAME_WSGI,
148 TRACEPARENT_LEGACY_HEADER_NAME_WSGI,
149 TRACESTATE_HEADER_NAME_WSGI,
150 )
151 elif "scope" in kwargs:
152 scope = kwargs["scope"]
153 fake_environ = {"SCRIPT_NAME": scope.get("root_path", ""), "PATH_INFO": scope["path"], "QUERY_STRING": ""}
154 url = get_current_url(fake_environ, strip_querystring=True, path_only=True)
155 if client.should_ignore_url(url):
156 logger.debug("Ignoring request due to %s matching transaction_ignore_urls")
157 return
158 if "headers" in scope:
159 trace_parent = TraceParent.from_headers(scope["headers"])
160 client.begin_transaction("request", trace_parent=trace_parent)
161
162
163 def instrument(client):
164 """
165 Auto-instruments code to get nice spans
166 """
167 from elasticapm.instrumentation.control import instrument
168
169 instrument()
170 try:
171 import celery # noqa F401
172
173 from elasticapm.contrib.celery import register_instrumentation
174
175 register_instrumentation(client)
176 except ImportError:
177 client.logger.debug("Not instrumenting Celery, couldn't import")
178
179
180 def _should_start_transaction(client):
181 middleware_attr = "MIDDLEWARE" if getattr(django_settings, "MIDDLEWARE", None) is not None else "MIDDLEWARE_CLASSES"
182 middleware = getattr(django_settings, middleware_attr)
183 return (
184 (not django_settings.DEBUG or client.config.debug)
185 and middleware
186 and "elasticapm.contrib.django.middleware.TracingMiddleware" in middleware
187 )
188
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticapm/contrib/django/apps.py b/elasticapm/contrib/django/apps.py
--- a/elasticapm/contrib/django/apps.py
+++ b/elasticapm/contrib/django/apps.py
@@ -54,7 +54,7 @@
class ElasticAPMConfig(AppConfig):
name = "elasticapm.contrib.django"
- label = "elasticapm.contrib.django"
+ label = "elasticapm"
verbose_name = "ElasticAPM"
def __init__(self, *args, **kwargs):
| {"golden_diff": "diff --git a/elasticapm/contrib/django/apps.py b/elasticapm/contrib/django/apps.py\n--- a/elasticapm/contrib/django/apps.py\n+++ b/elasticapm/contrib/django/apps.py\n@@ -54,7 +54,7 @@\n \n class ElasticAPMConfig(AppConfig):\n name = \"elasticapm.contrib.django\"\n- label = \"elasticapm.contrib.django\"\n+ label = \"elasticapm\"\n verbose_name = \"ElasticAPM\"\n \n def __init__(self, *args, **kwargs):\n", "issue": "Add support for Django 3.2\nDjango 3.2 is slated for a release in April. Running the test suite, a few problems came up:\r\n\r\n- [ ] App label needs to be a valid Python identifier, ours is not (renaming it from `elasticapm.contrib.django` to `elasticapm` should suffice)\r\n\r\nSeveral test failures:\r\n\r\n- [ ] `test_broken_500_handler_with_middleware`\r\n- [ ] `test_404_middleware`\r\n- [ ] `test_response_error_id_middleware`\r\n- [ ] `test_django_logging_request_kwarg`\r\n- [ ] `test_django_logging_middleware`\r\n- [ ] `test_capture_body_config_is_dynamic_for_transactions`\r\n- [ ] `test_capture_headers_config_is_dynamic_for_transactions`\r\n- [ ] `test_capture_headers`\r\n- [ ] `test_transaction_name_from_route`\r\n\r\nMost of these look similar in nature, I suspect an issue with middlewares. Nothing jumps out in the [release notes](https://docs.djangoproject.com/en/3.2/releases/3.2/), though.\r\n\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom functools import partial\n\nfrom django.apps import AppConfig\nfrom django.conf import settings as django_settings\n\nfrom elasticapm.conf import constants\nfrom elasticapm.contrib.django.client import get_client\nfrom elasticapm.utils.disttracing import TraceParent\nfrom elasticapm.utils.logging import get_logger\nfrom elasticapm.utils.wsgi import get_current_url\n\nlogger = get_logger(\"elasticapm.traces\")\n\nERROR_DISPATCH_UID = \"elasticapm-exceptions\"\nREQUEST_START_DISPATCH_UID = \"elasticapm-request-start\"\nREQUEST_FINISH_DISPATCH_UID = \"elasticapm-request-stop\"\n\nMIDDLEWARE_NAME = \"elasticapm.contrib.django.middleware.TracingMiddleware\"\n\nTRACEPARENT_HEADER_NAME_WSGI = \"HTTP_\" + constants.TRACEPARENT_HEADER_NAME.upper().replace(\"-\", \"_\")\nTRACEPARENT_LEGACY_HEADER_NAME_WSGI = \"HTTP_\" + constants.TRACEPARENT_LEGACY_HEADER_NAME.upper().replace(\"-\", \"_\")\nTRACESTATE_HEADER_NAME_WSGI = \"HTTP_\" + constants.TRACESTATE_HEADER_NAME.upper().replace(\"-\", \"_\")\n\n\nclass ElasticAPMConfig(AppConfig):\n name = \"elasticapm.contrib.django\"\n label = \"elasticapm.contrib.django\"\n verbose_name = \"ElasticAPM\"\n\n def __init__(self, *args, **kwargs):\n super(ElasticAPMConfig, self).__init__(*args, **kwargs)\n self.client = None\n\n def ready(self):\n self.client = get_client()\n if self.client.config.autoinsert_django_middleware:\n self.insert_middleware(django_settings)\n register_handlers(self.client)\n if self.client.config.instrument and self.client.config.enabled:\n instrument(self.client)\n else:\n self.client.logger.debug(\"Skipping instrumentation. INSTRUMENT is set to False.\")\n\n @staticmethod\n def insert_middleware(settings):\n if hasattr(settings, \"MIDDLEWARE\"):\n middleware_list = settings.MIDDLEWARE\n middleware_attr = \"MIDDLEWARE\"\n elif hasattr(settings, \"MIDDLEWARE_CLASSES\"): # can be removed when we drop support for Django 1.x\n middleware_list = settings.MIDDLEWARE_CLASSES\n middleware_attr = \"MIDDLEWARE_CLASSES\"\n else:\n logger.debug(\"Could not find middleware setting, not autoinserting tracing middleware\")\n return\n is_tuple = isinstance(middleware_list, tuple)\n if is_tuple:\n middleware_list = list(middleware_list)\n elif not isinstance(middleware_list, list):\n logger.debug(\"%s setting is not of type list or tuple, not autoinserting tracing middleware\")\n return\n if middleware_list is not None and MIDDLEWARE_NAME not in middleware_list:\n logger.debug(\"Inserting tracing middleware into settings.%s\", middleware_attr)\n middleware_list.insert(0, MIDDLEWARE_NAME)\n if is_tuple:\n middleware_list = tuple(middleware_list)\n if middleware_list:\n setattr(settings, middleware_attr, middleware_list)\n\n\ndef register_handlers(client):\n from django.core.signals import got_request_exception, request_finished, request_started\n\n from elasticapm.contrib.django.handlers import exception_handler\n\n # Connect to Django's internal signal handlers\n got_request_exception.disconnect(dispatch_uid=ERROR_DISPATCH_UID)\n got_request_exception.connect(partial(exception_handler, client), dispatch_uid=ERROR_DISPATCH_UID, weak=False)\n\n request_started.disconnect(dispatch_uid=REQUEST_START_DISPATCH_UID)\n request_started.connect(\n partial(_request_started_handler, client), dispatch_uid=REQUEST_START_DISPATCH_UID, weak=False\n )\n\n request_finished.disconnect(dispatch_uid=REQUEST_FINISH_DISPATCH_UID)\n request_finished.connect(\n lambda sender, **kwargs: client.end_transaction() if _should_start_transaction(client) else None,\n dispatch_uid=REQUEST_FINISH_DISPATCH_UID,\n weak=False,\n )\n\n # If we can import celery, register ourselves as exception handler\n try:\n import celery # noqa F401\n\n from elasticapm.contrib.celery import register_exception_tracking\n\n try:\n register_exception_tracking(client)\n except Exception as e:\n client.logger.exception(\"Failed installing django-celery hook: %s\" % e)\n except ImportError:\n client.logger.debug(\"Not instrumenting Celery, couldn't import\")\n\n\ndef _request_started_handler(client, sender, *args, **kwargs):\n if not _should_start_transaction(client):\n return\n # try to find trace id\n trace_parent = None\n if \"environ\" in kwargs:\n url = get_current_url(kwargs[\"environ\"], strip_querystring=True, path_only=True)\n if client.should_ignore_url(url):\n logger.debug(\"Ignoring request due to %s matching transaction_ignore_urls\")\n return\n trace_parent = TraceParent.from_headers(\n kwargs[\"environ\"],\n TRACEPARENT_HEADER_NAME_WSGI,\n TRACEPARENT_LEGACY_HEADER_NAME_WSGI,\n TRACESTATE_HEADER_NAME_WSGI,\n )\n elif \"scope\" in kwargs:\n scope = kwargs[\"scope\"]\n fake_environ = {\"SCRIPT_NAME\": scope.get(\"root_path\", \"\"), \"PATH_INFO\": scope[\"path\"], \"QUERY_STRING\": \"\"}\n url = get_current_url(fake_environ, strip_querystring=True, path_only=True)\n if client.should_ignore_url(url):\n logger.debug(\"Ignoring request due to %s matching transaction_ignore_urls\")\n return\n if \"headers\" in scope:\n trace_parent = TraceParent.from_headers(scope[\"headers\"])\n client.begin_transaction(\"request\", trace_parent=trace_parent)\n\n\ndef instrument(client):\n \"\"\"\n Auto-instruments code to get nice spans\n \"\"\"\n from elasticapm.instrumentation.control import instrument\n\n instrument()\n try:\n import celery # noqa F401\n\n from elasticapm.contrib.celery import register_instrumentation\n\n register_instrumentation(client)\n except ImportError:\n client.logger.debug(\"Not instrumenting Celery, couldn't import\")\n\n\ndef _should_start_transaction(client):\n middleware_attr = \"MIDDLEWARE\" if getattr(django_settings, \"MIDDLEWARE\", None) is not None else \"MIDDLEWARE_CLASSES\"\n middleware = getattr(django_settings, middleware_attr)\n return (\n (not django_settings.DEBUG or client.config.debug)\n and middleware\n and \"elasticapm.contrib.django.middleware.TracingMiddleware\" in middleware\n )\n", "path": "elasticapm/contrib/django/apps.py"}], "after_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom functools import partial\n\nfrom django.apps import AppConfig\nfrom django.conf import settings as django_settings\n\nfrom elasticapm.conf import constants\nfrom elasticapm.contrib.django.client import get_client\nfrom elasticapm.utils.disttracing import TraceParent\nfrom elasticapm.utils.logging import get_logger\nfrom elasticapm.utils.wsgi import get_current_url\n\nlogger = get_logger(\"elasticapm.traces\")\n\nERROR_DISPATCH_UID = \"elasticapm-exceptions\"\nREQUEST_START_DISPATCH_UID = \"elasticapm-request-start\"\nREQUEST_FINISH_DISPATCH_UID = \"elasticapm-request-stop\"\n\nMIDDLEWARE_NAME = \"elasticapm.contrib.django.middleware.TracingMiddleware\"\n\nTRACEPARENT_HEADER_NAME_WSGI = \"HTTP_\" + constants.TRACEPARENT_HEADER_NAME.upper().replace(\"-\", \"_\")\nTRACEPARENT_LEGACY_HEADER_NAME_WSGI = \"HTTP_\" + constants.TRACEPARENT_LEGACY_HEADER_NAME.upper().replace(\"-\", \"_\")\nTRACESTATE_HEADER_NAME_WSGI = \"HTTP_\" + constants.TRACESTATE_HEADER_NAME.upper().replace(\"-\", \"_\")\n\n\nclass ElasticAPMConfig(AppConfig):\n name = \"elasticapm.contrib.django\"\n label = \"elasticapm\"\n verbose_name = \"ElasticAPM\"\n\n def __init__(self, *args, **kwargs):\n super(ElasticAPMConfig, self).__init__(*args, **kwargs)\n self.client = None\n\n def ready(self):\n self.client = get_client()\n if self.client.config.autoinsert_django_middleware:\n self.insert_middleware(django_settings)\n register_handlers(self.client)\n if self.client.config.instrument and self.client.config.enabled:\n instrument(self.client)\n else:\n self.client.logger.debug(\"Skipping instrumentation. INSTRUMENT is set to False.\")\n\n @staticmethod\n def insert_middleware(settings):\n if hasattr(settings, \"MIDDLEWARE\"):\n middleware_list = settings.MIDDLEWARE\n middleware_attr = \"MIDDLEWARE\"\n elif hasattr(settings, \"MIDDLEWARE_CLASSES\"): # can be removed when we drop support for Django 1.x\n middleware_list = settings.MIDDLEWARE_CLASSES\n middleware_attr = \"MIDDLEWARE_CLASSES\"\n else:\n logger.debug(\"Could not find middleware setting, not autoinserting tracing middleware\")\n return\n is_tuple = isinstance(middleware_list, tuple)\n if is_tuple:\n middleware_list = list(middleware_list)\n elif not isinstance(middleware_list, list):\n logger.debug(\"%s setting is not of type list or tuple, not autoinserting tracing middleware\")\n return\n if middleware_list is not None and MIDDLEWARE_NAME not in middleware_list:\n logger.debug(\"Inserting tracing middleware into settings.%s\", middleware_attr)\n middleware_list.insert(0, MIDDLEWARE_NAME)\n if is_tuple:\n middleware_list = tuple(middleware_list)\n if middleware_list:\n setattr(settings, middleware_attr, middleware_list)\n\n\ndef register_handlers(client):\n from django.core.signals import got_request_exception, request_finished, request_started\n\n from elasticapm.contrib.django.handlers import exception_handler\n\n # Connect to Django's internal signal handlers\n got_request_exception.disconnect(dispatch_uid=ERROR_DISPATCH_UID)\n got_request_exception.connect(partial(exception_handler, client), dispatch_uid=ERROR_DISPATCH_UID, weak=False)\n\n request_started.disconnect(dispatch_uid=REQUEST_START_DISPATCH_UID)\n request_started.connect(\n partial(_request_started_handler, client), dispatch_uid=REQUEST_START_DISPATCH_UID, weak=False\n )\n\n request_finished.disconnect(dispatch_uid=REQUEST_FINISH_DISPATCH_UID)\n request_finished.connect(\n lambda sender, **kwargs: client.end_transaction() if _should_start_transaction(client) else None,\n dispatch_uid=REQUEST_FINISH_DISPATCH_UID,\n weak=False,\n )\n\n # If we can import celery, register ourselves as exception handler\n try:\n import celery # noqa F401\n\n from elasticapm.contrib.celery import register_exception_tracking\n\n try:\n register_exception_tracking(client)\n except Exception as e:\n client.logger.exception(\"Failed installing django-celery hook: %s\" % e)\n except ImportError:\n client.logger.debug(\"Not instrumenting Celery, couldn't import\")\n\n\ndef _request_started_handler(client, sender, *args, **kwargs):\n if not _should_start_transaction(client):\n return\n # try to find trace id\n trace_parent = None\n if \"environ\" in kwargs:\n url = get_current_url(kwargs[\"environ\"], strip_querystring=True, path_only=True)\n if client.should_ignore_url(url):\n logger.debug(\"Ignoring request due to %s matching transaction_ignore_urls\")\n return\n trace_parent = TraceParent.from_headers(\n kwargs[\"environ\"],\n TRACEPARENT_HEADER_NAME_WSGI,\n TRACEPARENT_LEGACY_HEADER_NAME_WSGI,\n TRACESTATE_HEADER_NAME_WSGI,\n )\n elif \"scope\" in kwargs:\n scope = kwargs[\"scope\"]\n fake_environ = {\"SCRIPT_NAME\": scope.get(\"root_path\", \"\"), \"PATH_INFO\": scope[\"path\"], \"QUERY_STRING\": \"\"}\n url = get_current_url(fake_environ, strip_querystring=True, path_only=True)\n if client.should_ignore_url(url):\n logger.debug(\"Ignoring request due to %s matching transaction_ignore_urls\")\n return\n if \"headers\" in scope:\n trace_parent = TraceParent.from_headers(scope[\"headers\"])\n client.begin_transaction(\"request\", trace_parent=trace_parent)\n\n\ndef instrument(client):\n \"\"\"\n Auto-instruments code to get nice spans\n \"\"\"\n from elasticapm.instrumentation.control import instrument\n\n instrument()\n try:\n import celery # noqa F401\n\n from elasticapm.contrib.celery import register_instrumentation\n\n register_instrumentation(client)\n except ImportError:\n client.logger.debug(\"Not instrumenting Celery, couldn't import\")\n\n\ndef _should_start_transaction(client):\n middleware_attr = \"MIDDLEWARE\" if getattr(django_settings, \"MIDDLEWARE\", None) is not None else \"MIDDLEWARE_CLASSES\"\n middleware = getattr(django_settings, middleware_attr)\n return (\n (not django_settings.DEBUG or client.config.debug)\n and middleware\n and \"elasticapm.contrib.django.middleware.TracingMiddleware\" in middleware\n )\n", "path": "elasticapm/contrib/django/apps.py"}]} | 2,636 | 129 |
gh_patches_debug_17245 | rasdani/github-patches | git_diff | mars-project__mars-2683 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Optimization that compacts multiple filters into `eval` generates unexpected node in graph
<!--
Thank you for your contribution!
Please review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue.
-->
**Describe the bug**
Optimization that compacts multiple filters into eval generates unexpected node in graph.
**To Reproduce**
To help us reproducing this bug, please provide information below:
1. Your Python version
2. The version of Mars you use
3. Versions of crucial packages, such as numpy, scipy and pandas
4. Full stack of the error.
5. Minimized code to reproduce the error.
```python
@enter_mode(build=True)
def test_arithmetic_query(setup):
df1 = md.DataFrame(raw, chunk_size=10)
df2 = md.DataFrame(raw2, chunk_size=10)
df3 = df1.merge(df2, on='A', suffixes=('', '_'))
df3['K'] = df4 = df3["A"] * (1 - df3["B"])
graph = TileableGraph([df3.data])
next(TileableGraphBuilder(graph).build())
records = optimize(graph)
opt_df4 = records.get_optimization_result(df4.data)
assert opt_df4.op.expr == "(`A`) * ((1) - (`B`))"
assert len(graph) == 5 # for now len(graph) is 6
assert len([n for n in graph if isinstance(n.op, DataFrameEval)]) == 1 # and 2 evals exist
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mars/optimization/logical/core.py`
Content:
```
1 # Copyright 1999-2021 Alibaba Group Holding Ltd.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import weakref
16 from abc import ABC, abstractmethod
17 from collections import defaultdict
18 from dataclasses import dataclass
19 from enum import Enum
20 from typing import Dict, List, Tuple, Type
21
22 from ...core import OperandType, ChunkType, EntityType, enter_mode
23 from ...core.graph import EntityGraph
24 from ...core.operand import Operand
25
26
27 class OptimizationRecordType(Enum):
28 replace = 0
29 new = 1
30 delete = 2
31
32
33 @dataclass
34 class OptimizationRecord:
35 original_chunk: ChunkType = None
36 new_chunk: ChunkType = None
37 record_type: OptimizationRecordType = None
38
39
40 class OptimizationRecords:
41 _records: List[OptimizationRecord]
42 _original_chunk_to_records: Dict[ChunkType, OptimizationRecord]
43
44 def __init__(self):
45 self._records = list()
46 self._original_chunk_to_records = dict()
47 self._optimized_chunk_to_records = dict()
48
49 def append_record(self, record: OptimizationRecord):
50 self._records.append(record)
51 if record.record_type in (
52 OptimizationRecordType.replace,
53 OptimizationRecordType.delete,
54 ):
55 self._original_chunk_to_records[record.original_chunk] = record
56 if record.record_type in (
57 OptimizationRecordType.new,
58 OptimizationRecordType.replace,
59 ):
60 self._optimized_chunk_to_records[record.new_chunk] = record
61
62 def get_optimization_result(self, original_chunk: ChunkType) -> ChunkType:
63 chunk = original_chunk
64 if chunk not in self._original_chunk_to_records:
65 return
66 while chunk in self._original_chunk_to_records:
67 record = self._original_chunk_to_records[chunk]
68 if record.record_type == OptimizationRecordType.replace:
69 chunk = record.new_chunk
70 else:
71 assert record.record_type == OptimizationRecordType.delete
72 return None
73 return chunk
74
75 def get_original_chunk(self, optimized_chunk: ChunkType) -> ChunkType:
76 chunk = optimized_chunk
77 if chunk not in self._optimized_chunk_to_records:
78 return
79 while chunk in self._optimized_chunk_to_records:
80 record = self._optimized_chunk_to_records[chunk]
81 if record.record_type == OptimizationRecordType.replace:
82 chunk = record.original_chunk
83 else:
84 assert record.record_type == OptimizationRecordType.new
85 return None
86 return chunk
87
88
89 class OptimizationRule(ABC):
90 _instances: Dict[
91 Tuple[Type["OptimizationRule"], EntityGraph, OptimizationRecords],
92 "OptimizationRule",
93 ] = dict()
94 _preds_to_remove = weakref.WeakKeyDictionary()
95
96 def __init__(
97 self,
98 graph: EntityGraph,
99 records: OptimizationRecords,
100 optimizer_cls: Type["Optimizer"],
101 ):
102 self._graph = graph
103 self._records = records
104 self._optimizer_cls = optimizer_cls
105
106 def __new__(
107 cls,
108 graph: EntityGraph,
109 records: OptimizationRecords,
110 optimizer_cls: Type["Optimizer"],
111 ):
112 if (cls, graph, records) in cls._instances:
113 return cls._instances[cls, graph, records]
114 inst = cls._instances[cls, graph, records] = object.__new__(cls)
115 return inst
116
117 @abstractmethod
118 def match(self, op: OperandType) -> bool:
119 """
120 If this operand matches this rule.
121
122 Parameters
123 ----------
124 op : OperandType
125 Operand.
126
127 Returns
128 -------
129 matched : bool
130 Matched rule or not.
131 """
132
133 @abstractmethod
134 def apply(self, op: OperandType):
135 """
136 Apply rule to an operand.
137
138 Parameters
139 ----------
140 op : OperandType
141 Operand
142 """
143
144 def _replace_node(self, original_node: EntityType, new_node: EntityType):
145 predecessors = self._graph.predecessors(original_node)
146 successors = self._graph.successors(original_node)
147 self._graph.remove_node(original_node)
148 self._graph.add_node(new_node)
149 for pred in predecessors:
150 self._graph.add_edge(pred, new_node)
151 for succ in successors:
152 self._graph.add_edge(new_node, succ)
153
154 @classmethod
155 def _add_collapsable_predecessor(cls, node: EntityType, predecessor: EntityType):
156 if predecessor not in cls._preds_to_remove:
157 cls._preds_to_remove[predecessor] = {node}
158 else:
159 cls._preds_to_remove[predecessor].add(node)
160
161 def _remove_collapsable_predecessors(self, node: EntityType):
162 node = self._records.get_optimization_result(node) or node
163 preds_opt_to_remove = []
164 for pred in self._graph.predecessors(node):
165 pred_original = self._records.get_original_chunk(pred) or pred
166 pred_opt = self._records.get_optimization_result(pred) or pred
167 if pred_opt in self._graph.results or pred_original in self._graph.results:
168 continue
169 affect_succ = self._preds_to_remove.get(pred_original) or []
170 affect_succ_opt = [
171 self._records.get_optimization_result(s) or s for s in affect_succ
172 ]
173 if all(s in affect_succ_opt for s in self._graph.successors(pred)):
174 preds_opt_to_remove.append((pred_original, pred_opt))
175
176 for pred_original, pred_opt in preds_opt_to_remove:
177 self._graph.remove_node(pred_opt)
178 self._records.append_record(
179 OptimizationRecord(pred_original, None, OptimizationRecordType.delete)
180 )
181
182
183 class Optimizer(ABC):
184 _rules: List[Type[OptimizationRule]]
185 _op_to_rules: Dict[Type[OperandType], List[Type[OptimizationRule]]]
186
187 @classmethod
188 def register_rule(
189 cls, operand_types: List[Type[OperandType]], rule: Type[OptimizationRule]
190 ):
191 if not hasattr(cls, "_rules"):
192 cls._rules = []
193 cls._rules.append(rule)
194
195 if not hasattr(cls, "_op_to_rules"):
196 cls._op_to_rules = defaultdict(list)
197 for operand_type in operand_types:
198 cls._op_to_rules[operand_type].append(rule)
199
200 @classmethod
201 def get_rule_types(
202 cls, operand_type: Type[OperandType]
203 ) -> List[Type[OptimizationRule]]:
204 rule_types = cls._op_to_rules.get(operand_type, None)
205 if rule_types is None:
206 for op_cls in operand_type.__mro__:
207 if op_cls is Operand:
208 break
209 rule_types = cls._op_to_rules.get(op_cls)
210 if rule_types is not None:
211 break
212 cls._op_to_rules[operand_type] = rule_types or []
213 return rule_types
214
215 @classmethod
216 def _replace_inputs(cls, graph: EntityGraph, records: OptimizationRecords):
217 for node in graph:
218 for succ in graph.successors(node):
219 input_optimized = False
220 new_inputs = []
221 for inp in succ.inputs:
222 optimized = records.get_optimization_result(inp)
223 if optimized is None:
224 optimized = inp
225 if optimized is not inp:
226 input_optimized = True
227 new_inputs.append(optimized)
228 if input_optimized:
229 succ.inputs = new_inputs
230
231 @classmethod
232 @enter_mode(build=True)
233 def optimize(cls, graph: EntityGraph) -> OptimizationRecords:
234 """
235 Optimize a graph.
236
237 Parameters
238 ----------
239 graph : EntityGraph
240 Tileable or chunk graph.
241
242 Returns
243 -------
244 optimization_records : OptimizationRecords
245 Optimization records.
246 """
247 records = OptimizationRecords()
248 optimized = False
249 for rule_type in cls._rules:
250 visited = set()
251 for entity in list(graph.topological_iter()):
252 op = entity.op
253 if op in visited:
254 continue
255 visited.add(op)
256
257 rule_types = cls.get_rule_types(type(op)) or []
258 if rule_type not in rule_types:
259 continue
260
261 rule = rule_type(graph, records, cls)
262 if entity not in graph: # pragma: no cover
263 # maybe removed during optimization
264 continue
265 if rule.match(op):
266 optimized = True
267 rule.apply(op)
268 if optimized:
269 cls._replace_inputs(graph, records)
270 return records
271
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mars/optimization/logical/core.py b/mars/optimization/logical/core.py
--- a/mars/optimization/logical/core.py
+++ b/mars/optimization/logical/core.py
@@ -162,8 +162,12 @@
node = self._records.get_optimization_result(node) or node
preds_opt_to_remove = []
for pred in self._graph.predecessors(node):
- pred_original = self._records.get_original_chunk(pred) or pred
- pred_opt = self._records.get_optimization_result(pred) or pred
+ pred_original = self._records.get_original_chunk(pred)
+ pred_original = pred_original if pred_original is not None else pred
+
+ pred_opt = self._records.get_optimization_result(pred)
+ pred_opt = pred_opt if pred_opt is not None else pred
+
if pred_opt in self._graph.results or pred_original in self._graph.results:
continue
affect_succ = self._preds_to_remove.get(pred_original) or []
| {"golden_diff": "diff --git a/mars/optimization/logical/core.py b/mars/optimization/logical/core.py\n--- a/mars/optimization/logical/core.py\n+++ b/mars/optimization/logical/core.py\n@@ -162,8 +162,12 @@\n node = self._records.get_optimization_result(node) or node\n preds_opt_to_remove = []\n for pred in self._graph.predecessors(node):\n- pred_original = self._records.get_original_chunk(pred) or pred\n- pred_opt = self._records.get_optimization_result(pred) or pred\n+ pred_original = self._records.get_original_chunk(pred)\n+ pred_original = pred_original if pred_original is not None else pred\n+\n+ pred_opt = self._records.get_optimization_result(pred)\n+ pred_opt = pred_opt if pred_opt is not None else pred\n+\n if pred_opt in self._graph.results or pred_original in self._graph.results:\n continue\n affect_succ = self._preds_to_remove.get(pred_original) or []\n", "issue": "[BUG] Optimization that compacts multiple filters into `eval` generates unexpected node in graph\n<!--\r\nThank you for your contribution!\r\n\r\nPlease review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue.\r\n-->\r\n\r\n**Describe the bug**\r\n\r\nOptimization that compacts multiple filters into eval generates unexpected node in graph.\r\n\r\n**To Reproduce**\r\nTo help us reproducing this bug, please provide information below:\r\n1. Your Python version\r\n2. The version of Mars you use\r\n3. Versions of crucial packages, such as numpy, scipy and pandas\r\n4. Full stack of the error.\r\n5. Minimized code to reproduce the error.\r\n\r\n```python\r\n@enter_mode(build=True)\r\ndef test_arithmetic_query(setup):\r\n df1 = md.DataFrame(raw, chunk_size=10)\r\n df2 = md.DataFrame(raw2, chunk_size=10)\r\n df3 = df1.merge(df2, on='A', suffixes=('', '_'))\r\n df3['K'] = df4 = df3[\"A\"] * (1 - df3[\"B\"])\r\n graph = TileableGraph([df3.data])\r\n next(TileableGraphBuilder(graph).build())\r\n records = optimize(graph)\r\n opt_df4 = records.get_optimization_result(df4.data)\r\n assert opt_df4.op.expr == \"(`A`) * ((1) - (`B`))\"\r\n assert len(graph) == 5 # for now len(graph) is 6\r\n assert len([n for n in graph if isinstance(n.op, DataFrameEval)]) == 1 # and 2 evals exist\r\n```\r\n\n", "before_files": [{"content": "# Copyright 1999-2021 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport weakref\nfrom abc import ABC, abstractmethod\nfrom collections import defaultdict\nfrom dataclasses import dataclass\nfrom enum import Enum\nfrom typing import Dict, List, Tuple, Type\n\nfrom ...core import OperandType, ChunkType, EntityType, enter_mode\nfrom ...core.graph import EntityGraph\nfrom ...core.operand import Operand\n\n\nclass OptimizationRecordType(Enum):\n replace = 0\n new = 1\n delete = 2\n\n\n@dataclass\nclass OptimizationRecord:\n original_chunk: ChunkType = None\n new_chunk: ChunkType = None\n record_type: OptimizationRecordType = None\n\n\nclass OptimizationRecords:\n _records: List[OptimizationRecord]\n _original_chunk_to_records: Dict[ChunkType, OptimizationRecord]\n\n def __init__(self):\n self._records = list()\n self._original_chunk_to_records = dict()\n self._optimized_chunk_to_records = dict()\n\n def append_record(self, record: OptimizationRecord):\n self._records.append(record)\n if record.record_type in (\n OptimizationRecordType.replace,\n OptimizationRecordType.delete,\n ):\n self._original_chunk_to_records[record.original_chunk] = record\n if record.record_type in (\n OptimizationRecordType.new,\n OptimizationRecordType.replace,\n ):\n self._optimized_chunk_to_records[record.new_chunk] = record\n\n def get_optimization_result(self, original_chunk: ChunkType) -> ChunkType:\n chunk = original_chunk\n if chunk not in self._original_chunk_to_records:\n return\n while chunk in self._original_chunk_to_records:\n record = self._original_chunk_to_records[chunk]\n if record.record_type == OptimizationRecordType.replace:\n chunk = record.new_chunk\n else:\n assert record.record_type == OptimizationRecordType.delete\n return None\n return chunk\n\n def get_original_chunk(self, optimized_chunk: ChunkType) -> ChunkType:\n chunk = optimized_chunk\n if chunk not in self._optimized_chunk_to_records:\n return\n while chunk in self._optimized_chunk_to_records:\n record = self._optimized_chunk_to_records[chunk]\n if record.record_type == OptimizationRecordType.replace:\n chunk = record.original_chunk\n else:\n assert record.record_type == OptimizationRecordType.new\n return None\n return chunk\n\n\nclass OptimizationRule(ABC):\n _instances: Dict[\n Tuple[Type[\"OptimizationRule\"], EntityGraph, OptimizationRecords],\n \"OptimizationRule\",\n ] = dict()\n _preds_to_remove = weakref.WeakKeyDictionary()\n\n def __init__(\n self,\n graph: EntityGraph,\n records: OptimizationRecords,\n optimizer_cls: Type[\"Optimizer\"],\n ):\n self._graph = graph\n self._records = records\n self._optimizer_cls = optimizer_cls\n\n def __new__(\n cls,\n graph: EntityGraph,\n records: OptimizationRecords,\n optimizer_cls: Type[\"Optimizer\"],\n ):\n if (cls, graph, records) in cls._instances:\n return cls._instances[cls, graph, records]\n inst = cls._instances[cls, graph, records] = object.__new__(cls)\n return inst\n\n @abstractmethod\n def match(self, op: OperandType) -> bool:\n \"\"\"\n If this operand matches this rule.\n\n Parameters\n ----------\n op : OperandType\n Operand.\n\n Returns\n -------\n matched : bool\n Matched rule or not.\n \"\"\"\n\n @abstractmethod\n def apply(self, op: OperandType):\n \"\"\"\n Apply rule to an operand.\n\n Parameters\n ----------\n op : OperandType\n Operand\n \"\"\"\n\n def _replace_node(self, original_node: EntityType, new_node: EntityType):\n predecessors = self._graph.predecessors(original_node)\n successors = self._graph.successors(original_node)\n self._graph.remove_node(original_node)\n self._graph.add_node(new_node)\n for pred in predecessors:\n self._graph.add_edge(pred, new_node)\n for succ in successors:\n self._graph.add_edge(new_node, succ)\n\n @classmethod\n def _add_collapsable_predecessor(cls, node: EntityType, predecessor: EntityType):\n if predecessor not in cls._preds_to_remove:\n cls._preds_to_remove[predecessor] = {node}\n else:\n cls._preds_to_remove[predecessor].add(node)\n\n def _remove_collapsable_predecessors(self, node: EntityType):\n node = self._records.get_optimization_result(node) or node\n preds_opt_to_remove = []\n for pred in self._graph.predecessors(node):\n pred_original = self._records.get_original_chunk(pred) or pred\n pred_opt = self._records.get_optimization_result(pred) or pred\n if pred_opt in self._graph.results or pred_original in self._graph.results:\n continue\n affect_succ = self._preds_to_remove.get(pred_original) or []\n affect_succ_opt = [\n self._records.get_optimization_result(s) or s for s in affect_succ\n ]\n if all(s in affect_succ_opt for s in self._graph.successors(pred)):\n preds_opt_to_remove.append((pred_original, pred_opt))\n\n for pred_original, pred_opt in preds_opt_to_remove:\n self._graph.remove_node(pred_opt)\n self._records.append_record(\n OptimizationRecord(pred_original, None, OptimizationRecordType.delete)\n )\n\n\nclass Optimizer(ABC):\n _rules: List[Type[OptimizationRule]]\n _op_to_rules: Dict[Type[OperandType], List[Type[OptimizationRule]]]\n\n @classmethod\n def register_rule(\n cls, operand_types: List[Type[OperandType]], rule: Type[OptimizationRule]\n ):\n if not hasattr(cls, \"_rules\"):\n cls._rules = []\n cls._rules.append(rule)\n\n if not hasattr(cls, \"_op_to_rules\"):\n cls._op_to_rules = defaultdict(list)\n for operand_type in operand_types:\n cls._op_to_rules[operand_type].append(rule)\n\n @classmethod\n def get_rule_types(\n cls, operand_type: Type[OperandType]\n ) -> List[Type[OptimizationRule]]:\n rule_types = cls._op_to_rules.get(operand_type, None)\n if rule_types is None:\n for op_cls in operand_type.__mro__:\n if op_cls is Operand:\n break\n rule_types = cls._op_to_rules.get(op_cls)\n if rule_types is not None:\n break\n cls._op_to_rules[operand_type] = rule_types or []\n return rule_types\n\n @classmethod\n def _replace_inputs(cls, graph: EntityGraph, records: OptimizationRecords):\n for node in graph:\n for succ in graph.successors(node):\n input_optimized = False\n new_inputs = []\n for inp in succ.inputs:\n optimized = records.get_optimization_result(inp)\n if optimized is None:\n optimized = inp\n if optimized is not inp:\n input_optimized = True\n new_inputs.append(optimized)\n if input_optimized:\n succ.inputs = new_inputs\n\n @classmethod\n @enter_mode(build=True)\n def optimize(cls, graph: EntityGraph) -> OptimizationRecords:\n \"\"\"\n Optimize a graph.\n\n Parameters\n ----------\n graph : EntityGraph\n Tileable or chunk graph.\n\n Returns\n -------\n optimization_records : OptimizationRecords\n Optimization records.\n \"\"\"\n records = OptimizationRecords()\n optimized = False\n for rule_type in cls._rules:\n visited = set()\n for entity in list(graph.topological_iter()):\n op = entity.op\n if op in visited:\n continue\n visited.add(op)\n\n rule_types = cls.get_rule_types(type(op)) or []\n if rule_type not in rule_types:\n continue\n\n rule = rule_type(graph, records, cls)\n if entity not in graph: # pragma: no cover\n # maybe removed during optimization\n continue\n if rule.match(op):\n optimized = True\n rule.apply(op)\n if optimized:\n cls._replace_inputs(graph, records)\n return records\n", "path": "mars/optimization/logical/core.py"}], "after_files": [{"content": "# Copyright 1999-2021 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport weakref\nfrom abc import ABC, abstractmethod\nfrom collections import defaultdict\nfrom dataclasses import dataclass\nfrom enum import Enum\nfrom typing import Dict, List, Tuple, Type\n\nfrom ...core import OperandType, ChunkType, EntityType, enter_mode\nfrom ...core.graph import EntityGraph\nfrom ...core.operand import Operand\n\n\nclass OptimizationRecordType(Enum):\n replace = 0\n new = 1\n delete = 2\n\n\n@dataclass\nclass OptimizationRecord:\n original_chunk: ChunkType = None\n new_chunk: ChunkType = None\n record_type: OptimizationRecordType = None\n\n\nclass OptimizationRecords:\n _records: List[OptimizationRecord]\n _original_chunk_to_records: Dict[ChunkType, OptimizationRecord]\n\n def __init__(self):\n self._records = list()\n self._original_chunk_to_records = dict()\n self._optimized_chunk_to_records = dict()\n\n def append_record(self, record: OptimizationRecord):\n self._records.append(record)\n if record.record_type in (\n OptimizationRecordType.replace,\n OptimizationRecordType.delete,\n ):\n self._original_chunk_to_records[record.original_chunk] = record\n if record.record_type in (\n OptimizationRecordType.new,\n OptimizationRecordType.replace,\n ):\n self._optimized_chunk_to_records[record.new_chunk] = record\n\n def get_optimization_result(self, original_chunk: ChunkType) -> ChunkType:\n chunk = original_chunk\n if chunk not in self._original_chunk_to_records:\n return\n while chunk in self._original_chunk_to_records:\n record = self._original_chunk_to_records[chunk]\n if record.record_type == OptimizationRecordType.replace:\n chunk = record.new_chunk\n else:\n assert record.record_type == OptimizationRecordType.delete\n return None\n return chunk\n\n def get_original_chunk(self, optimized_chunk: ChunkType) -> ChunkType:\n chunk = optimized_chunk\n if chunk not in self._optimized_chunk_to_records:\n return\n while chunk in self._optimized_chunk_to_records:\n record = self._optimized_chunk_to_records[chunk]\n if record.record_type == OptimizationRecordType.replace:\n chunk = record.original_chunk\n else:\n assert record.record_type == OptimizationRecordType.new\n return None\n return chunk\n\n\nclass OptimizationRule(ABC):\n _instances: Dict[\n Tuple[Type[\"OptimizationRule\"], EntityGraph, OptimizationRecords],\n \"OptimizationRule\",\n ] = dict()\n _preds_to_remove = weakref.WeakKeyDictionary()\n\n def __init__(\n self,\n graph: EntityGraph,\n records: OptimizationRecords,\n optimizer_cls: Type[\"Optimizer\"],\n ):\n self._graph = graph\n self._records = records\n self._optimizer_cls = optimizer_cls\n\n def __new__(\n cls,\n graph: EntityGraph,\n records: OptimizationRecords,\n optimizer_cls: Type[\"Optimizer\"],\n ):\n if (cls, graph, records) in cls._instances:\n return cls._instances[cls, graph, records]\n inst = cls._instances[cls, graph, records] = object.__new__(cls)\n return inst\n\n @abstractmethod\n def match(self, op: OperandType) -> bool:\n \"\"\"\n If this operand matches this rule.\n\n Parameters\n ----------\n op : OperandType\n Operand.\n\n Returns\n -------\n matched : bool\n Matched rule or not.\n \"\"\"\n\n @abstractmethod\n def apply(self, op: OperandType):\n \"\"\"\n Apply rule to an operand.\n\n Parameters\n ----------\n op : OperandType\n Operand\n \"\"\"\n\n def _replace_node(self, original_node: EntityType, new_node: EntityType):\n predecessors = self._graph.predecessors(original_node)\n successors = self._graph.successors(original_node)\n self._graph.remove_node(original_node)\n self._graph.add_node(new_node)\n for pred in predecessors:\n self._graph.add_edge(pred, new_node)\n for succ in successors:\n self._graph.add_edge(new_node, succ)\n\n @classmethod\n def _add_collapsable_predecessor(cls, node: EntityType, predecessor: EntityType):\n if predecessor not in cls._preds_to_remove:\n cls._preds_to_remove[predecessor] = {node}\n else:\n cls._preds_to_remove[predecessor].add(node)\n\n def _remove_collapsable_predecessors(self, node: EntityType):\n node = self._records.get_optimization_result(node) or node\n preds_opt_to_remove = []\n for pred in self._graph.predecessors(node):\n pred_original = self._records.get_original_chunk(pred)\n pred_original = pred_original if pred_original is not None else pred\n\n pred_opt = self._records.get_optimization_result(pred)\n pred_opt = pred_opt if pred_opt is not None else pred\n\n if pred_opt in self._graph.results or pred_original in self._graph.results:\n continue\n affect_succ = self._preds_to_remove.get(pred_original) or []\n affect_succ_opt = [\n self._records.get_optimization_result(s) or s for s in affect_succ\n ]\n if all(s in affect_succ_opt for s in self._graph.successors(pred)):\n preds_opt_to_remove.append((pred_original, pred_opt))\n\n for pred_original, pred_opt in preds_opt_to_remove:\n self._graph.remove_node(pred_opt)\n self._records.append_record(\n OptimizationRecord(pred_original, None, OptimizationRecordType.delete)\n )\n\n\nclass Optimizer(ABC):\n _rules: List[Type[OptimizationRule]]\n _op_to_rules: Dict[Type[OperandType], List[Type[OptimizationRule]]]\n\n @classmethod\n def register_rule(\n cls, operand_types: List[Type[OperandType]], rule: Type[OptimizationRule]\n ):\n if not hasattr(cls, \"_rules\"):\n cls._rules = []\n cls._rules.append(rule)\n\n if not hasattr(cls, \"_op_to_rules\"):\n cls._op_to_rules = defaultdict(list)\n for operand_type in operand_types:\n cls._op_to_rules[operand_type].append(rule)\n\n @classmethod\n def get_rule_types(\n cls, operand_type: Type[OperandType]\n ) -> List[Type[OptimizationRule]]:\n rule_types = cls._op_to_rules.get(operand_type, None)\n if rule_types is None:\n for op_cls in operand_type.__mro__:\n if op_cls is Operand:\n break\n rule_types = cls._op_to_rules.get(op_cls)\n if rule_types is not None:\n break\n cls._op_to_rules[operand_type] = rule_types or []\n return rule_types\n\n @classmethod\n def _replace_inputs(cls, graph: EntityGraph, records: OptimizationRecords):\n for node in graph:\n for succ in graph.successors(node):\n input_optimized = False\n new_inputs = []\n for inp in succ.inputs:\n optimized = records.get_optimization_result(inp)\n if optimized is None:\n optimized = inp\n if optimized is not inp:\n input_optimized = True\n new_inputs.append(optimized)\n if input_optimized:\n succ.inputs = new_inputs\n\n @classmethod\n @enter_mode(build=True)\n def optimize(cls, graph: EntityGraph) -> OptimizationRecords:\n \"\"\"\n Optimize a graph.\n\n Parameters\n ----------\n graph : EntityGraph\n Tileable or chunk graph.\n\n Returns\n -------\n optimization_records : OptimizationRecords\n Optimization records.\n \"\"\"\n records = OptimizationRecords()\n optimized = False\n for rule_type in cls._rules:\n visited = set()\n for entity in list(graph.topological_iter()):\n op = entity.op\n if op in visited:\n continue\n visited.add(op)\n\n rule_types = cls.get_rule_types(type(op)) or []\n if rule_type not in rule_types:\n continue\n\n rule = rule_type(graph, records, cls)\n if entity not in graph: # pragma: no cover\n # maybe removed during optimization\n continue\n if rule.match(op):\n optimized = True\n rule.apply(op)\n if optimized:\n cls._replace_inputs(graph, records)\n return records\n", "path": "mars/optimization/logical/core.py"}]} | 3,222 | 228 |
gh_patches_debug_17694 | rasdani/github-patches | git_diff | learningequality__kolibri-2484 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Starting up Kolibri Development Server
### Observed behavior
Trying to run the Kolobri Development Server from Ubuntu Bash on Windows, but it raises a ValueError for incorrect timezone (local).
### Expected behavior
Should start up the Kolibri server on local machine.
### Errors and logs
Relevant errors and tracebacks from:
*virtual environment command line:
```
Traceback (most recent call last):
File "/home/alan/.venvs/kolibri/bin/kolibri", line 11, in <module>
load_entry_point('kolibri', 'console_scripts', 'kolibri')()
File "/home/alan/Kolibri1/kolibri/kolibri/utils/cli.py", line 607, in main
initialize(debug=debug)
File "/home/alan/Kolibri1/kolibri/kolibri/utils/cli.py", line 176, in initialize
django.setup()
File "/home/alan/.venvs/kolibri/local/lib/python2.7/site-packages/django/__init__.py", line 17, in setup
configure_logging(settings.LOGGING_CONFIG, settings.LOGGING)
File "/home/alan/.venvs/kolibri/local/lib/python2.7/site-packages/django/conf/__init__.py", line 55, in __getattr__
self._setup(name)
File "/home/alan/.venvs/kolibri/local/lib/python2.7/site-packages/django/conf/__init__.py", line 43, in _setup
self._wrapped = Settings(settings_module)
File "/home/alan/.venvs/kolibri/local/lib/python2.7/site-packages/django/conf/__init__.py", line 138, in __init__
raise ValueError("Incorrect timezone setting: %s" % self.TIME_ZONE)
ValueError: Incorrect timezone setting: local
```
### Steps to reproduce
Run command to start up Kolibri Development server.
kolibri --debug manage devserver --webpack
### Context
* Kolibri version: Develop Branch
* Operating system: Ubuntu Bash for Windows
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kolibri/deployment/default/settings/base.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 Django settings for kolibri project.
4
5 For more information on this file, see
6 https://docs.djangoproject.com/en/1.9/topics/settings/
7
8 For the full list of settings and their values, see
9 https://docs.djangoproject.com/en/1.9/ref/settings/
10 """
11 from __future__ import absolute_import, print_function, unicode_literals
12
13 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
14 import os
15
16 # import kolibri, so we can get the path to the module.
17 import kolibri
18 # we load other utilities related to i18n
19 # This is essential! We load the kolibri conf INSIDE the Django conf
20 from kolibri.utils import conf, i18n
21 from tzlocal import get_localzone
22
23 KOLIBRI_MODULE_PATH = os.path.dirname(kolibri.__file__)
24
25 BASE_DIR = os.path.abspath(os.path.dirname(__name__))
26
27 KOLIBRI_HOME = os.environ['KOLIBRI_HOME']
28
29 KOLIBRI_CORE_JS_NAME = 'kolibriGlobal'
30
31 LOCALE_PATHS = [
32 os.path.join(KOLIBRI_MODULE_PATH, "locale"),
33 ]
34
35 # Quick-start development settings - unsuitable for production
36 # See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
37
38 # SECURITY WARNING: keep the secret key used in production secret!
39 SECRET_KEY = 'f@ey3)y^03r9^@mou97apom*+c1m#b1!cwbm50^s4yk72xce27'
40
41 # SECURITY WARNING: don't run with debug turned on in production!
42 DEBUG = False
43
44 ALLOWED_HOSTS = ['*']
45
46 # Application definition
47
48 INSTALLED_APPS = [
49 'kolibri.core',
50 'django.contrib.admin',
51 'django.contrib.auth',
52 'django.contrib.contenttypes',
53 'django.contrib.sessions',
54 'django.contrib.messages',
55 'django.contrib.staticfiles',
56 'kolibri.auth.apps.KolibriAuthConfig',
57 'kolibri.content',
58 'kolibri.logger',
59 'kolibri.tasks.apps.KolibriTasksConfig',
60 'kolibri.core.webpack',
61 'kolibri.core.exams',
62 'kolibri.core.device',
63 'kolibri.core.discovery',
64 'rest_framework',
65 'django_js_reverse',
66 'jsonfield',
67 'morango',
68 ] + conf.config['INSTALLED_APPS']
69
70 # Add in the external plugins' locale paths. Our frontend messages depends
71 # specifically on the value of LOCALE_PATHS to find its catalog files.
72 LOCALE_PATHS += [
73 i18n.get_installed_app_locale_path(app) for app in INSTALLED_APPS
74 if i18n.is_external_plugin(app)
75 ]
76
77 MIDDLEWARE_CLASSES = (
78 'django.contrib.sessions.middleware.SessionMiddleware',
79 'kolibri.core.device.middleware.KolibriLocaleMiddleware',
80 'django.middleware.common.CommonMiddleware',
81 'django.middleware.csrf.CsrfViewMiddleware',
82 'kolibri.plugins.setup_wizard.middleware.SetupWizardMiddleware',
83 'kolibri.auth.middleware.CustomAuthenticationMiddleware',
84 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
85 'django.contrib.messages.middleware.MessageMiddleware',
86 'django.middleware.clickjacking.XFrameOptionsMiddleware',
87 'django.middleware.security.SecurityMiddleware',
88 )
89
90 QUEUE_JOB_STORAGE_PATH = os.path.join(KOLIBRI_HOME, "job_storage.sqlite3")
91
92 ROOT_URLCONF = 'kolibri.deployment.default.urls'
93
94 TEMPLATES = [
95 {
96 'BACKEND': 'django.template.backends.django.DjangoTemplates',
97 'DIRS': [],
98 'APP_DIRS': True,
99 'OPTIONS': {
100 'context_processors': [
101 'django.template.context_processors.debug',
102 'django.template.context_processors.request',
103 'django.contrib.auth.context_processors.auth',
104 'django.contrib.messages.context_processors.messages',
105 'kolibri.core.context_processors.custom_context_processor.return_session',
106 ],
107 },
108 },
109 ]
110
111 WSGI_APPLICATION = 'kolibri.deployment.default.wsgi.application'
112
113
114 # Database
115 # https://docs.djangoproject.com/en/1.9/ref/settings/#databases
116
117 DATABASES = {
118 'default': {
119 'ENGINE': 'django.db.backends.sqlite3',
120 'NAME': os.path.join(KOLIBRI_HOME, 'db.sqlite3'),
121 'OPTIONS': {
122 'timeout': 100,
123 }
124 },
125 }
126
127 # Content directories and URLs for channel metadata and content files
128
129 # Directory and URL for storing content databases for channel data
130 CONTENT_DATABASE_DIR = os.path.join(KOLIBRI_HOME, 'content', 'databases')
131 if not os.path.exists(CONTENT_DATABASE_DIR):
132 os.makedirs(CONTENT_DATABASE_DIR)
133
134 # Directory and URL for storing de-duped content files for all channels
135 CONTENT_STORAGE_DIR = os.path.join(KOLIBRI_HOME, 'content', 'storage')
136 if not os.path.exists(CONTENT_STORAGE_DIR):
137 os.makedirs(CONTENT_STORAGE_DIR)
138
139 # Base default URL for downloading content from an online server
140 CENTRAL_CONTENT_DOWNLOAD_BASE_URL = "http://studio.learningequality.org"
141
142 # Internationalization
143 # https://docs.djangoproject.com/en/1.9/topics/i18n/
144
145 LANGUAGES = [
146 ('en', 'English'),
147 ('sw-tz', 'Kiswahili'),
148 ('es-es', 'Español'),
149 ('es-mx', 'Español (México)'),
150 ('fr-fr', 'Français'),
151 ('pt-pt', 'Português'),
152 ('hi-in', 'हिंदी'),
153 ('ar-eg', 'العَرَبِيَّة')
154 ]
155
156 LANGUAGE_CODE = conf.config.get("LANGUAGE_CODE") or "en"
157
158 TIME_ZONE = get_localzone().zone
159
160 USE_I18N = True
161
162 USE_L10N = True
163
164 USE_TZ = True
165
166 # Static files (CSS, JavaScript, Images)
167 # https://docs.djangoproject.com/en/1.9/howto/static-files/
168
169 STATIC_URL = '/static/'
170 STATIC_ROOT = os.path.join(KOLIBRI_HOME, "static")
171
172 # https://docs.djangoproject.com/en/1.9/ref/settings/#std:setting-LOGGING
173 # https://docs.djangoproject.com/en/1.9/topics/logging/
174
175 LOGGING = {
176 'version': 1,
177 'disable_existing_loggers': False,
178 'formatters': {
179 'verbose': {
180 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
181 },
182 'simple': {
183 'format': '%(levelname)s %(message)s'
184 },
185 'simple_date': {
186 'format': '%(levelname)s %(asctime)s %(module)s %(message)s'
187 },
188 'color': {
189 '()': 'colorlog.ColoredFormatter',
190 'format': '%(log_color)s%(levelname)-8s %(message)s',
191 'log_colors': {
192 'DEBUG': 'bold_black',
193 'INFO': 'white',
194 'WARNING': 'yellow',
195 'ERROR': 'red',
196 'CRITICAL': 'bold_red',
197 },
198 }
199 },
200 'filters': {
201 'require_debug_true': {
202 '()': 'django.utils.log.RequireDebugTrue',
203 },
204 'require_debug_false': {
205 '()': 'django.utils.log.RequireDebugFalse',
206 },
207 },
208 'handlers': {
209 'console': {
210 'level': 'INFO',
211 'class': 'logging.StreamHandler',
212 'formatter': 'color'
213 },
214 'mail_admins': {
215 'level': 'ERROR',
216 'class': 'django.utils.log.AdminEmailHandler',
217 'filters': ['require_debug_false'],
218 },
219 'request_debug': {
220 'level': 'ERROR',
221 'class': 'logging.StreamHandler',
222 'formatter': 'color',
223 'filters': ['require_debug_true'],
224 },
225 'file_debug': {
226 'level': 'DEBUG',
227 'filters': ['require_debug_true'],
228 'class': 'logging.FileHandler',
229 'filename': os.path.join(KOLIBRI_HOME, 'debug.log'),
230 'formatter': 'simple_date',
231 },
232 'file': {
233 'level': 'INFO',
234 'filters': [],
235 'class': 'logging.FileHandler',
236 'filename': os.path.join(KOLIBRI_HOME, 'kolibri.log'),
237 'formatter': 'simple_date',
238 },
239 },
240 'loggers': {
241 'django': {
242 'handlers': ['console', 'file'],
243 'propagate': True,
244 },
245 'django.request': {
246 'handlers': ['mail_admins', 'file', 'request_debug'],
247 'level': 'ERROR',
248 'propagate': False,
249 },
250 'kolibri': {
251 'handlers': ['console', 'mail_admins', 'file', 'file_debug'],
252 'level': 'INFO',
253 }
254 }
255 }
256
257
258 # Customizing Django auth system
259 # https://docs.djangoproject.com/en/1.9/topics/auth/customizing/
260
261 AUTH_USER_MODEL = 'kolibriauth.FacilityUser'
262
263 AUTHENTICATION_BACKENDS = ['kolibri.auth.backends.FacilityUserBackend']
264
265
266 # Django REST Framework
267 # http://www.django-rest-framework.org/api-guide/settings/
268
269 REST_FRAMEWORK = {
270 "UNAUTHENTICATED_USER": "kolibri.auth.models.KolibriAnonymousUser",
271 'DEFAULT_RENDERER_CLASSES': (
272 'rest_framework.renderers.JSONRenderer',
273 'rest_framework.renderers.BrowsableAPIRenderer',
274 'rest_framework_csv.renderers.CSVRenderer',
275 ),
276 }
277
278 # System warnings to disable
279 # see https://docs.djangoproject.com/en/1.9/ref/settings/#silenced-system-checks
280 SILENCED_SYSTEM_CHECKS = ["auth.W004"]
281
282 # Configuration for Django JS Reverse
283 # https://github.com/ierror/django-js-reverse#options
284
285 JS_REVERSE_JS_VAR_NAME = 'kolibriUrls'
286
287 JS_REVERSE_EXCLUDE_NAMESPACES = ['admin', ]
288
289 ENABLE_DATA_BOOTSTRAPPING = True
290
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kolibri/deployment/default/settings/base.py b/kolibri/deployment/default/settings/base.py
--- a/kolibri/deployment/default/settings/base.py
+++ b/kolibri/deployment/default/settings/base.py
@@ -13,6 +13,8 @@
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
+import pytz
+
# import kolibri, so we can get the path to the module.
import kolibri
# we load other utilities related to i18n
@@ -155,7 +157,18 @@
LANGUAGE_CODE = conf.config.get("LANGUAGE_CODE") or "en"
-TIME_ZONE = get_localzone().zone
+try:
+ TIME_ZONE = get_localzone().zone
+except pytz.UnknownTimeZoneError:
+ # Do not fail at this point because a timezone was not
+ # detected.
+ TIME_ZONE = pytz.utc.zone
+
+# Fixes https://github.com/regebro/tzlocal/issues/44
+# tzlocal 1.4 returns 'local' if unable to detect the timezone,
+# and this TZ id is invalid
+if TIME_ZONE == "local":
+ TIME_ZONE = pytz.utc.zone
USE_I18N = True
| {"golden_diff": "diff --git a/kolibri/deployment/default/settings/base.py b/kolibri/deployment/default/settings/base.py\n--- a/kolibri/deployment/default/settings/base.py\n+++ b/kolibri/deployment/default/settings/base.py\n@@ -13,6 +13,8 @@\n # Build paths inside the project like this: os.path.join(BASE_DIR, ...)\n import os\n \n+import pytz\n+\n # import kolibri, so we can get the path to the module.\n import kolibri\n # we load other utilities related to i18n\n@@ -155,7 +157,18 @@\n \n LANGUAGE_CODE = conf.config.get(\"LANGUAGE_CODE\") or \"en\"\n \n-TIME_ZONE = get_localzone().zone\n+try:\n+ TIME_ZONE = get_localzone().zone\n+except pytz.UnknownTimeZoneError:\n+ # Do not fail at this point because a timezone was not\n+ # detected.\n+ TIME_ZONE = pytz.utc.zone\n+\n+# Fixes https://github.com/regebro/tzlocal/issues/44\n+# tzlocal 1.4 returns 'local' if unable to detect the timezone,\n+# and this TZ id is invalid\n+if TIME_ZONE == \"local\":\n+ TIME_ZONE = pytz.utc.zone\n \n USE_I18N = True\n", "issue": "Starting up Kolibri Development Server\n### Observed behavior\r\n\r\nTrying to run the Kolobri Development Server from Ubuntu Bash on Windows, but it raises a ValueError for incorrect timezone (local).\r\n\r\n### Expected behavior\r\n\r\nShould start up the Kolibri server on local machine.\r\n\r\n### Errors and logs\r\n\r\nRelevant errors and tracebacks from:\r\n\r\n*virtual environment command line:\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/alan/.venvs/kolibri/bin/kolibri\", line 11, in <module>\r\n load_entry_point('kolibri', 'console_scripts', 'kolibri')()\r\n File \"/home/alan/Kolibri1/kolibri/kolibri/utils/cli.py\", line 607, in main\r\n initialize(debug=debug)\r\n File \"/home/alan/Kolibri1/kolibri/kolibri/utils/cli.py\", line 176, in initialize\r\n django.setup()\r\n File \"/home/alan/.venvs/kolibri/local/lib/python2.7/site-packages/django/__init__.py\", line 17, in setup\r\n configure_logging(settings.LOGGING_CONFIG, settings.LOGGING)\r\n File \"/home/alan/.venvs/kolibri/local/lib/python2.7/site-packages/django/conf/__init__.py\", line 55, in __getattr__\r\n self._setup(name)\r\n File \"/home/alan/.venvs/kolibri/local/lib/python2.7/site-packages/django/conf/__init__.py\", line 43, in _setup\r\n self._wrapped = Settings(settings_module)\r\n File \"/home/alan/.venvs/kolibri/local/lib/python2.7/site-packages/django/conf/__init__.py\", line 138, in __init__\r\n raise ValueError(\"Incorrect timezone setting: %s\" % self.TIME_ZONE)\r\nValueError: Incorrect timezone setting: local\r\n\r\n```\r\n\r\n### Steps to reproduce\r\n\r\nRun command to start up Kolibri Development server. \r\n\r\nkolibri --debug manage devserver --webpack\r\n\r\n### Context\r\n\r\n* Kolibri version: Develop Branch\r\n* Operating system: Ubuntu Bash for Windows\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDjango settings for kolibri project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.9/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.9/ref/settings/\n\"\"\"\nfrom __future__ import absolute_import, print_function, unicode_literals\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\n\n# import kolibri, so we can get the path to the module.\nimport kolibri\n# we load other utilities related to i18n\n# This is essential! We load the kolibri conf INSIDE the Django conf\nfrom kolibri.utils import conf, i18n\nfrom tzlocal import get_localzone\n\nKOLIBRI_MODULE_PATH = os.path.dirname(kolibri.__file__)\n\nBASE_DIR = os.path.abspath(os.path.dirname(__name__))\n\nKOLIBRI_HOME = os.environ['KOLIBRI_HOME']\n\nKOLIBRI_CORE_JS_NAME = 'kolibriGlobal'\n\nLOCALE_PATHS = [\n os.path.join(KOLIBRI_MODULE_PATH, \"locale\"),\n]\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'f@ey3)y^03r9^@mou97apom*+c1m#b1!cwbm50^s4yk72xce27'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False\n\nALLOWED_HOSTS = ['*']\n\n# Application definition\n\nINSTALLED_APPS = [\n 'kolibri.core',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'kolibri.auth.apps.KolibriAuthConfig',\n 'kolibri.content',\n 'kolibri.logger',\n 'kolibri.tasks.apps.KolibriTasksConfig',\n 'kolibri.core.webpack',\n 'kolibri.core.exams',\n 'kolibri.core.device',\n 'kolibri.core.discovery',\n 'rest_framework',\n 'django_js_reverse',\n 'jsonfield',\n 'morango',\n] + conf.config['INSTALLED_APPS']\n\n# Add in the external plugins' locale paths. Our frontend messages depends\n# specifically on the value of LOCALE_PATHS to find its catalog files.\nLOCALE_PATHS += [\n i18n.get_installed_app_locale_path(app) for app in INSTALLED_APPS\n if i18n.is_external_plugin(app)\n]\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'kolibri.core.device.middleware.KolibriLocaleMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'kolibri.plugins.setup_wizard.middleware.SetupWizardMiddleware',\n 'kolibri.auth.middleware.CustomAuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n)\n\nQUEUE_JOB_STORAGE_PATH = os.path.join(KOLIBRI_HOME, \"job_storage.sqlite3\")\n\nROOT_URLCONF = 'kolibri.deployment.default.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'kolibri.core.context_processors.custom_context_processor.return_session',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'kolibri.deployment.default.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.9/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(KOLIBRI_HOME, 'db.sqlite3'),\n 'OPTIONS': {\n 'timeout': 100,\n }\n },\n}\n\n# Content directories and URLs for channel metadata and content files\n\n# Directory and URL for storing content databases for channel data\nCONTENT_DATABASE_DIR = os.path.join(KOLIBRI_HOME, 'content', 'databases')\nif not os.path.exists(CONTENT_DATABASE_DIR):\n os.makedirs(CONTENT_DATABASE_DIR)\n\n# Directory and URL for storing de-duped content files for all channels\nCONTENT_STORAGE_DIR = os.path.join(KOLIBRI_HOME, 'content', 'storage')\nif not os.path.exists(CONTENT_STORAGE_DIR):\n os.makedirs(CONTENT_STORAGE_DIR)\n\n# Base default URL for downloading content from an online server\nCENTRAL_CONTENT_DOWNLOAD_BASE_URL = \"http://studio.learningequality.org\"\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.9/topics/i18n/\n\nLANGUAGES = [\n ('en', 'English'),\n ('sw-tz', 'Kiswahili'),\n ('es-es', 'Espa\u00f1ol'),\n ('es-mx', 'Espa\u00f1ol (M\u00e9xico)'),\n ('fr-fr', 'Fran\u00e7ais'),\n ('pt-pt', 'Portugu\u00eas'),\n ('hi-in', '\u0939\u093f\u0902\u0926\u0940'),\n ('ar-eg', '\u0627\u0644\u0639\u064e\u0631\u064e\u0628\u0650\u064a\u064e\u0651\u0629\u200e\u200e')\n]\n\nLANGUAGE_CODE = conf.config.get(\"LANGUAGE_CODE\") or \"en\"\n\nTIME_ZONE = get_localzone().zone\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.9/howto/static-files/\n\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(KOLIBRI_HOME, \"static\")\n\n# https://docs.djangoproject.com/en/1.9/ref/settings/#std:setting-LOGGING\n# https://docs.djangoproject.com/en/1.9/topics/logging/\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'\n },\n 'simple': {\n 'format': '%(levelname)s %(message)s'\n },\n 'simple_date': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(message)s'\n },\n 'color': {\n '()': 'colorlog.ColoredFormatter',\n 'format': '%(log_color)s%(levelname)-8s %(message)s',\n 'log_colors': {\n 'DEBUG': 'bold_black',\n 'INFO': 'white',\n 'WARNING': 'yellow',\n 'ERROR': 'red',\n 'CRITICAL': 'bold_red',\n },\n }\n },\n 'filters': {\n 'require_debug_true': {\n '()': 'django.utils.log.RequireDebugTrue',\n },\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse',\n },\n },\n 'handlers': {\n 'console': {\n 'level': 'INFO',\n 'class': 'logging.StreamHandler',\n 'formatter': 'color'\n },\n 'mail_admins': {\n 'level': 'ERROR',\n 'class': 'django.utils.log.AdminEmailHandler',\n 'filters': ['require_debug_false'],\n },\n 'request_debug': {\n 'level': 'ERROR',\n 'class': 'logging.StreamHandler',\n 'formatter': 'color',\n 'filters': ['require_debug_true'],\n },\n 'file_debug': {\n 'level': 'DEBUG',\n 'filters': ['require_debug_true'],\n 'class': 'logging.FileHandler',\n 'filename': os.path.join(KOLIBRI_HOME, 'debug.log'),\n 'formatter': 'simple_date',\n },\n 'file': {\n 'level': 'INFO',\n 'filters': [],\n 'class': 'logging.FileHandler',\n 'filename': os.path.join(KOLIBRI_HOME, 'kolibri.log'),\n 'formatter': 'simple_date',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console', 'file'],\n 'propagate': True,\n },\n 'django.request': {\n 'handlers': ['mail_admins', 'file', 'request_debug'],\n 'level': 'ERROR',\n 'propagate': False,\n },\n 'kolibri': {\n 'handlers': ['console', 'mail_admins', 'file', 'file_debug'],\n 'level': 'INFO',\n }\n }\n}\n\n\n# Customizing Django auth system\n# https://docs.djangoproject.com/en/1.9/topics/auth/customizing/\n\nAUTH_USER_MODEL = 'kolibriauth.FacilityUser'\n\nAUTHENTICATION_BACKENDS = ['kolibri.auth.backends.FacilityUserBackend']\n\n\n# Django REST Framework\n# http://www.django-rest-framework.org/api-guide/settings/\n\nREST_FRAMEWORK = {\n \"UNAUTHENTICATED_USER\": \"kolibri.auth.models.KolibriAnonymousUser\",\n 'DEFAULT_RENDERER_CLASSES': (\n 'rest_framework.renderers.JSONRenderer',\n 'rest_framework.renderers.BrowsableAPIRenderer',\n 'rest_framework_csv.renderers.CSVRenderer',\n ),\n}\n\n# System warnings to disable\n# see https://docs.djangoproject.com/en/1.9/ref/settings/#silenced-system-checks\nSILENCED_SYSTEM_CHECKS = [\"auth.W004\"]\n\n# Configuration for Django JS Reverse\n# https://github.com/ierror/django-js-reverse#options\n\nJS_REVERSE_JS_VAR_NAME = 'kolibriUrls'\n\nJS_REVERSE_EXCLUDE_NAMESPACES = ['admin', ]\n\nENABLE_DATA_BOOTSTRAPPING = True\n", "path": "kolibri/deployment/default/settings/base.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDjango settings for kolibri project.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.9/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.9/ref/settings/\n\"\"\"\nfrom __future__ import absolute_import, print_function, unicode_literals\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\n\nimport pytz\n\n# import kolibri, so we can get the path to the module.\nimport kolibri\n# we load other utilities related to i18n\n# This is essential! We load the kolibri conf INSIDE the Django conf\nfrom kolibri.utils import conf, i18n\nfrom tzlocal import get_localzone\n\nKOLIBRI_MODULE_PATH = os.path.dirname(kolibri.__file__)\n\nBASE_DIR = os.path.abspath(os.path.dirname(__name__))\n\nKOLIBRI_HOME = os.environ['KOLIBRI_HOME']\n\nKOLIBRI_CORE_JS_NAME = 'kolibriGlobal'\n\nLOCALE_PATHS = [\n os.path.join(KOLIBRI_MODULE_PATH, \"locale\"),\n]\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = 'f@ey3)y^03r9^@mou97apom*+c1m#b1!cwbm50^s4yk72xce27'\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = False\n\nALLOWED_HOSTS = ['*']\n\n# Application definition\n\nINSTALLED_APPS = [\n 'kolibri.core',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'kolibri.auth.apps.KolibriAuthConfig',\n 'kolibri.content',\n 'kolibri.logger',\n 'kolibri.tasks.apps.KolibriTasksConfig',\n 'kolibri.core.webpack',\n 'kolibri.core.exams',\n 'kolibri.core.device',\n 'kolibri.core.discovery',\n 'rest_framework',\n 'django_js_reverse',\n 'jsonfield',\n 'morango',\n] + conf.config['INSTALLED_APPS']\n\n# Add in the external plugins' locale paths. Our frontend messages depends\n# specifically on the value of LOCALE_PATHS to find its catalog files.\nLOCALE_PATHS += [\n i18n.get_installed_app_locale_path(app) for app in INSTALLED_APPS\n if i18n.is_external_plugin(app)\n]\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'kolibri.core.device.middleware.KolibriLocaleMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'kolibri.plugins.setup_wizard.middleware.SetupWizardMiddleware',\n 'kolibri.auth.middleware.CustomAuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n)\n\nQUEUE_JOB_STORAGE_PATH = os.path.join(KOLIBRI_HOME, \"job_storage.sqlite3\")\n\nROOT_URLCONF = 'kolibri.deployment.default.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'kolibri.core.context_processors.custom_context_processor.return_session',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'kolibri.deployment.default.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.9/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(KOLIBRI_HOME, 'db.sqlite3'),\n 'OPTIONS': {\n 'timeout': 100,\n }\n },\n}\n\n# Content directories and URLs for channel metadata and content files\n\n# Directory and URL for storing content databases for channel data\nCONTENT_DATABASE_DIR = os.path.join(KOLIBRI_HOME, 'content', 'databases')\nif not os.path.exists(CONTENT_DATABASE_DIR):\n os.makedirs(CONTENT_DATABASE_DIR)\n\n# Directory and URL for storing de-duped content files for all channels\nCONTENT_STORAGE_DIR = os.path.join(KOLIBRI_HOME, 'content', 'storage')\nif not os.path.exists(CONTENT_STORAGE_DIR):\n os.makedirs(CONTENT_STORAGE_DIR)\n\n# Base default URL for downloading content from an online server\nCENTRAL_CONTENT_DOWNLOAD_BASE_URL = \"http://studio.learningequality.org\"\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.9/topics/i18n/\n\nLANGUAGES = [\n ('en', 'English'),\n ('sw-tz', 'Kiswahili'),\n ('es-es', 'Espa\u00f1ol'),\n ('es-mx', 'Espa\u00f1ol (M\u00e9xico)'),\n ('fr-fr', 'Fran\u00e7ais'),\n ('pt-pt', 'Portugu\u00eas'),\n ('hi-in', '\u0939\u093f\u0902\u0926\u0940'),\n ('ar-eg', '\u0627\u0644\u0639\u064e\u0631\u064e\u0628\u0650\u064a\u064e\u0651\u0629\u200e\u200e')\n]\n\nLANGUAGE_CODE = conf.config.get(\"LANGUAGE_CODE\") or \"en\"\n\ntry:\n TIME_ZONE = get_localzone().zone\nexcept pytz.UnknownTimeZoneError:\n # Do not fail at this point because a timezone was not\n # detected.\n TIME_ZONE = pytz.utc.zone\n\n# Fixes https://github.com/regebro/tzlocal/issues/44\n# tzlocal 1.4 returns 'local' if unable to detect the timezone,\n# and this TZ id is invalid\nif TIME_ZONE == \"local\":\n TIME_ZONE = pytz.utc.zone\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.9/howto/static-files/\n\nSTATIC_URL = '/static/'\nSTATIC_ROOT = os.path.join(KOLIBRI_HOME, \"static\")\n\n# https://docs.djangoproject.com/en/1.9/ref/settings/#std:setting-LOGGING\n# https://docs.djangoproject.com/en/1.9/topics/logging/\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'verbose': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'\n },\n 'simple': {\n 'format': '%(levelname)s %(message)s'\n },\n 'simple_date': {\n 'format': '%(levelname)s %(asctime)s %(module)s %(message)s'\n },\n 'color': {\n '()': 'colorlog.ColoredFormatter',\n 'format': '%(log_color)s%(levelname)-8s %(message)s',\n 'log_colors': {\n 'DEBUG': 'bold_black',\n 'INFO': 'white',\n 'WARNING': 'yellow',\n 'ERROR': 'red',\n 'CRITICAL': 'bold_red',\n },\n }\n },\n 'filters': {\n 'require_debug_true': {\n '()': 'django.utils.log.RequireDebugTrue',\n },\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse',\n },\n },\n 'handlers': {\n 'console': {\n 'level': 'INFO',\n 'class': 'logging.StreamHandler',\n 'formatter': 'color'\n },\n 'mail_admins': {\n 'level': 'ERROR',\n 'class': 'django.utils.log.AdminEmailHandler',\n 'filters': ['require_debug_false'],\n },\n 'request_debug': {\n 'level': 'ERROR',\n 'class': 'logging.StreamHandler',\n 'formatter': 'color',\n 'filters': ['require_debug_true'],\n },\n 'file_debug': {\n 'level': 'DEBUG',\n 'filters': ['require_debug_true'],\n 'class': 'logging.FileHandler',\n 'filename': os.path.join(KOLIBRI_HOME, 'debug.log'),\n 'formatter': 'simple_date',\n },\n 'file': {\n 'level': 'INFO',\n 'filters': [],\n 'class': 'logging.FileHandler',\n 'filename': os.path.join(KOLIBRI_HOME, 'kolibri.log'),\n 'formatter': 'simple_date',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console', 'file'],\n 'propagate': True,\n },\n 'django.request': {\n 'handlers': ['mail_admins', 'file', 'request_debug'],\n 'level': 'ERROR',\n 'propagate': False,\n },\n 'kolibri': {\n 'handlers': ['console', 'mail_admins', 'file', 'file_debug'],\n 'level': 'INFO',\n }\n }\n}\n\n\n# Customizing Django auth system\n# https://docs.djangoproject.com/en/1.9/topics/auth/customizing/\n\nAUTH_USER_MODEL = 'kolibriauth.FacilityUser'\n\nAUTHENTICATION_BACKENDS = ['kolibri.auth.backends.FacilityUserBackend']\n\n\n# Django REST Framework\n# http://www.django-rest-framework.org/api-guide/settings/\n\nREST_FRAMEWORK = {\n \"UNAUTHENTICATED_USER\": \"kolibri.auth.models.KolibriAnonymousUser\",\n 'DEFAULT_RENDERER_CLASSES': (\n 'rest_framework.renderers.JSONRenderer',\n 'rest_framework.renderers.BrowsableAPIRenderer',\n 'rest_framework_csv.renderers.CSVRenderer',\n ),\n}\n\n# System warnings to disable\n# see https://docs.djangoproject.com/en/1.9/ref/settings/#silenced-system-checks\nSILENCED_SYSTEM_CHECKS = [\"auth.W004\"]\n\n# Configuration for Django JS Reverse\n# https://github.com/ierror/django-js-reverse#options\n\nJS_REVERSE_JS_VAR_NAME = 'kolibriUrls'\n\nJS_REVERSE_EXCLUDE_NAMESPACES = ['admin', ]\n\nENABLE_DATA_BOOTSTRAPPING = True\n", "path": "kolibri/deployment/default/settings/base.py"}]} | 3,617 | 279 |
gh_patches_debug_34388 | rasdani/github-patches | git_diff | Lightning-AI__pytorch-lightning-1908 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Save checkpoint for the last epoch
How to save the checkpoint only for the last epoch?
In the docs:
```
if save_top_k == k, the best k models according to the quantity monitored will be saved. if save_top_k == 0, no models are saved. if save_top_k == -1, all models are saved. Please note that the monitors are checked every period epochs. if save_top_k >= 2 and the callback is called multiple times inside an epoch, the name of the saved file will be appended with a version count starting with v0.
```
* `k = 0` does not save any.
* `k > 1` saves only a few the best
* `k = -1` saves all of them
Currently, I am using k = -1, but it space consuming.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pytorch_lightning/callbacks/model_checkpoint.py`
Content:
```
1 """
2 Model Checkpointing
3 ===================
4
5 Automatically save model checkpoints during training.
6
7 """
8
9 import os
10 import re
11
12 import numpy as np
13 from typing import Optional
14
15 import torch
16 from pytorch_lightning import _logger as log
17 from pytorch_lightning.callbacks.base import Callback
18 from pytorch_lightning.utilities import rank_zero_warn, rank_zero_only
19
20
21 class ModelCheckpoint(Callback):
22 r"""
23 Save the model after every epoch.
24
25 Args:
26 filepath: path to save the model file.
27 Can contain named formatting options to be auto-filled.
28
29 Example::
30
31 # custom path
32 # saves a file like: my/path/epoch_0.ckpt
33 >>> checkpoint_callback = ModelCheckpoint('my/path/')
34
35 # save any arbitrary metrics like `val_loss`, etc. in name
36 # saves a file like: my/path/epoch=2-val_loss=0.2_other_metric=0.3.ckpt
37 >>> checkpoint_callback = ModelCheckpoint(
38 ... filepath='my/path/{epoch}-{val_loss:.2f}-{other_metric:.2f}'
39 ... )
40
41 Can also be set to `None`, then it will be set to default location
42 during trainer construction.
43
44 monitor: quantity to monitor.
45 verbose: verbosity mode. Default: ``False``.
46 save_top_k: if `save_top_k == k`,
47 the best k models according to
48 the quantity monitored will be saved.
49 if ``save_top_k == 0``, no models are saved.
50 if ``save_top_k == -1``, all models are saved.
51 Please note that the monitors are checked every `period` epochs.
52 if ``save_top_k >= 2`` and the callback is called multiple
53 times inside an epoch, the name of the saved file will be
54 appended with a version count starting with `v0`.
55 mode: one of {auto, min, max}.
56 If ``save_top_k != 0``, the decision
57 to overwrite the current save file is made
58 based on either the maximization or the
59 minimization of the monitored quantity. For `val_acc`,
60 this should be `max`, for `val_loss` this should
61 be `min`, etc. In `auto` mode, the direction is
62 automatically inferred from the name of the monitored quantity.
63 save_weights_only: if ``True``, then only the model's weights will be
64 saved (``model.save_weights(filepath)``), else the full model
65 is saved (``model.save(filepath)``).
66 period: Interval (number of epochs) between checkpoints.
67
68 Example::
69
70 >>> from pytorch_lightning import Trainer
71 >>> from pytorch_lightning.callbacks import ModelCheckpoint
72
73 # saves checkpoints to 'my/path/' whenever 'val_loss' has a new min
74 >>> checkpoint_callback = ModelCheckpoint(filepath='my/path/')
75 >>> trainer = Trainer(checkpoint_callback=checkpoint_callback)
76
77 # save epoch and val_loss in name
78 # saves a file like: my/path/sample-mnist_epoch=02_val_loss=0.32.ckpt
79 >>> checkpoint_callback = ModelCheckpoint(
80 ... filepath='my/path/sample-mnist_{epoch:02d}-{val_loss:.2f}'
81 ... )
82
83 """
84
85 def __init__(self, filepath: Optional[str] = None, monitor: str = 'val_loss', verbose: bool = False,
86 save_top_k: int = 1, save_weights_only: bool = False,
87 mode: str = 'auto', period: int = 1, prefix: str = ''):
88 super().__init__()
89 if save_top_k > 0 and filepath is not None and os.path.isdir(filepath) and len(os.listdir(filepath)) > 0:
90 rank_zero_warn(
91 f"Checkpoint directory {filepath} exists and is not empty with save_top_k != 0."
92 "All files in this directory will be deleted when a checkpoint is saved!"
93 )
94 self._rank = 0
95
96 self.monitor = monitor
97 self.verbose = verbose
98 if filepath is None: # will be determined by trainer at runtime
99 self.dirpath, self.filename = None, None
100 else:
101 if os.path.isdir(filepath):
102 self.dirpath, self.filename = filepath, '{epoch}'
103 else:
104 self.dirpath, self.filename = os.path.split(filepath)
105 os.makedirs(self.dirpath, exist_ok=True)
106 self.save_top_k = save_top_k
107 self.save_weights_only = save_weights_only
108 self.period = period
109 self.epoch_last_check = None
110 self.prefix = prefix
111 self.best_k_models = {}
112 # {filename: monitor}
113 self.kth_best_model = ''
114 self.best = 0
115 self.save_function = None
116
117 torch_inf = torch.tensor(np.Inf)
118 mode_dict = {
119 'min': (torch_inf, 'min'),
120 'max': (-torch_inf, 'max'),
121 'auto': (-torch_inf, 'max') if 'acc' in self.monitor or self.monitor.startswith('fmeasure')
122 else (torch_inf, 'min'),
123 }
124
125 if mode not in mode_dict:
126 rank_zero_warn(f'ModelCheckpoint mode {mode} is unknown, '
127 f'fallback to auto mode.', RuntimeWarning)
128 mode = 'auto'
129
130 self.kth_value, self.mode = mode_dict[mode]
131
132 def _del_model(self, filepath):
133 if os.path.isfile(filepath):
134 os.remove(filepath)
135
136 def _save_model(self, filepath):
137 # make paths
138 os.makedirs(os.path.dirname(filepath), exist_ok=True)
139
140 # delegate the saving to the model
141 if self.save_function is not None:
142 self.save_function(filepath, self.save_weights_only)
143 else:
144 raise ValueError(".save_function() not set")
145
146 def check_monitor_top_k(self, current):
147 less_than_k_models = len(self.best_k_models) < self.save_top_k
148 if less_than_k_models:
149 return True
150
151 if not isinstance(current, torch.Tensor):
152 rank_zero_warn(
153 f'{current} is supposed to be a torch.Tensor. Saving checkpoint may not work correctly. '
154 f'HINT: check the value of {self.monitor} in your validation loop', RuntimeWarning
155 )
156 current = torch.tensor(current)
157
158 monitor_op = {
159 "min": torch.lt,
160 "max": torch.gt,
161 }[self.mode]
162
163 return monitor_op(current, self.best_k_models[self.kth_best_model])
164
165 def format_checkpoint_name(self, epoch, metrics, ver=None):
166 """Generate a filename according to the defined template.
167
168 Example::
169
170 >>> tmpdir = os.path.dirname(__file__)
171 >>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{epoch}'))
172 >>> os.path.basename(ckpt.format_checkpoint_name(0, {}))
173 'epoch=0.ckpt'
174 >>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{epoch:03d}'))
175 >>> os.path.basename(ckpt.format_checkpoint_name(5, {}))
176 'epoch=005.ckpt'
177 >>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{epoch}-{val_loss:.2f}'))
178 >>> os.path.basename(ckpt.format_checkpoint_name(2, dict(val_loss=0.123456)))
179 'epoch=2-val_loss=0.12.ckpt'
180 >>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{missing:d}'))
181 >>> os.path.basename(ckpt.format_checkpoint_name(0, {}))
182 'missing=0.ckpt'
183 """
184 # check if user passed in keys to the string
185 groups = re.findall(r'(\{.*?)[:\}]', self.filename)
186
187 if len(groups) == 0:
188 # default name
189 filename = f'{self.prefix}_ckpt_epoch_{epoch}'
190 else:
191 metrics['epoch'] = epoch
192 filename = self.filename
193 for tmp in groups:
194 name = tmp[1:]
195 filename = filename.replace(tmp, name + '={' + name)
196 if name not in metrics:
197 metrics[name] = 0
198 filename = filename.format(**metrics)
199 str_ver = f'_v{ver}' if ver is not None else ''
200 filepath = os.path.join(self.dirpath, self.prefix + filename + str_ver + '.ckpt')
201 return filepath
202
203 @rank_zero_only
204 def on_validation_end(self, trainer, pl_module):
205 # only run on main process
206 if trainer.proc_rank != 0:
207 return
208
209 metrics = trainer.callback_metrics
210 epoch = trainer.current_epoch
211 if self.save_top_k == 0:
212 # no models are saved
213 return
214 if self.epoch_last_check is not None and (epoch - self.epoch_last_check) < self.period:
215 # skipping in this term
216 return
217
218 self.epoch_last_check = epoch
219
220 filepath = self.format_checkpoint_name(epoch, metrics)
221 version_cnt = 0
222 while os.path.isfile(filepath):
223 filepath = self.format_checkpoint_name(epoch, metrics, ver=version_cnt)
224 # this epoch called before
225 version_cnt += 1
226
227 if self.save_top_k != -1:
228 current = metrics.get(self.monitor)
229
230 if not isinstance(current, torch.Tensor):
231 rank_zero_warn(
232 f'The metric you returned {current} must be a Torch.Tensor instance, checkpoint not saved '
233 f'HINT: what is the value of {self.monitor} in validation_end()?', RuntimeWarning
234 )
235
236 if current is None:
237 rank_zero_warn(
238 f'Can save best model only with {self.monitor} available, skipping.', RuntimeWarning
239 )
240 elif self.check_monitor_top_k(current):
241 self._do_check_save(filepath, current, epoch)
242 elif self.verbose > 0:
243 log.info(f'\nEpoch {epoch:05d}: {self.monitor} was not in top {self.save_top_k}')
244
245 else:
246 if self.verbose > 0:
247 log.info(f'\nEpoch {epoch:05d}: saving model to {filepath}')
248 self._save_model(filepath)
249
250 def _do_check_save(self, filepath, current, epoch):
251 # remove kth
252
253 del_list = []
254 if len(self.best_k_models) == self.save_top_k and self.save_top_k > 0:
255 delpath = self.kth_best_model
256 self.best_k_models.pop(self.kth_best_model)
257 del_list.append(delpath)
258
259 self.best_k_models[filepath] = current
260 if len(self.best_k_models) == self.save_top_k:
261 # monitor dict has reached k elements
262 _op = max if self.mode == 'min' else min
263 self.kth_best_model = _op(self.best_k_models,
264 key=self.best_k_models.get)
265 self.kth_value = self.best_k_models[self.kth_best_model]
266
267 _op = min if self.mode == 'min' else max
268 self.best = _op(self.best_k_models.values())
269
270 if self.verbose > 0:
271 log.info(
272 f'\nEpoch {epoch:05d}: {self.monitor} reached'
273 f' {current:0.5f} (best {self.best:0.5f}), saving model to'
274 f' {filepath} as top {self.save_top_k}')
275 self._save_model(filepath)
276
277 for cur_path in del_list:
278 if cur_path != filepath:
279 self._del_model(cur_path)
280
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pytorch_lightning/callbacks/model_checkpoint.py b/pytorch_lightning/callbacks/model_checkpoint.py
--- a/pytorch_lightning/callbacks/model_checkpoint.py
+++ b/pytorch_lightning/callbacks/model_checkpoint.py
@@ -43,6 +43,7 @@
monitor: quantity to monitor.
verbose: verbosity mode. Default: ``False``.
+ save_last: always saves the model at the end of the epoch. Default: ``False``.
save_top_k: if `save_top_k == k`,
the best k models according to
the quantity monitored will be saved.
@@ -83,7 +84,7 @@
"""
def __init__(self, filepath: Optional[str] = None, monitor: str = 'val_loss', verbose: bool = False,
- save_top_k: int = 1, save_weights_only: bool = False,
+ save_last: bool = False, save_top_k: int = 1, save_weights_only: bool = False,
mode: str = 'auto', period: int = 1, prefix: str = ''):
super().__init__()
if save_top_k > 0 and filepath is not None and os.path.isdir(filepath) and len(os.listdir(filepath)) > 0:
@@ -103,6 +104,7 @@
else:
self.dirpath, self.filename = os.path.split(filepath)
os.makedirs(self.dirpath, exist_ok=True)
+ self.save_last = save_last
self.save_top_k = save_top_k
self.save_weights_only = save_weights_only
self.period = period
@@ -217,6 +219,10 @@
self.epoch_last_check = epoch
+ if self.save_last:
+ filepath = os.path.join(self.dirpath, self.prefix + 'last.ckpt')
+ self._save_model(filepath)
+
filepath = self.format_checkpoint_name(epoch, metrics)
version_cnt = 0
while os.path.isfile(filepath):
| {"golden_diff": "diff --git a/pytorch_lightning/callbacks/model_checkpoint.py b/pytorch_lightning/callbacks/model_checkpoint.py\n--- a/pytorch_lightning/callbacks/model_checkpoint.py\n+++ b/pytorch_lightning/callbacks/model_checkpoint.py\n@@ -43,6 +43,7 @@\n \n monitor: quantity to monitor.\n verbose: verbosity mode. Default: ``False``.\n+ save_last: always saves the model at the end of the epoch. Default: ``False``.\n save_top_k: if `save_top_k == k`,\n the best k models according to\n the quantity monitored will be saved.\n@@ -83,7 +84,7 @@\n \"\"\"\n \n def __init__(self, filepath: Optional[str] = None, monitor: str = 'val_loss', verbose: bool = False,\n- save_top_k: int = 1, save_weights_only: bool = False,\n+ save_last: bool = False, save_top_k: int = 1, save_weights_only: bool = False,\n mode: str = 'auto', period: int = 1, prefix: str = ''):\n super().__init__()\n if save_top_k > 0 and filepath is not None and os.path.isdir(filepath) and len(os.listdir(filepath)) > 0:\n@@ -103,6 +104,7 @@\n else:\n self.dirpath, self.filename = os.path.split(filepath)\n os.makedirs(self.dirpath, exist_ok=True)\n+ self.save_last = save_last\n self.save_top_k = save_top_k\n self.save_weights_only = save_weights_only\n self.period = period\n@@ -217,6 +219,10 @@\n \n self.epoch_last_check = epoch\n \n+ if self.save_last:\n+ filepath = os.path.join(self.dirpath, self.prefix + 'last.ckpt')\n+ self._save_model(filepath)\n+\n filepath = self.format_checkpoint_name(epoch, metrics)\n version_cnt = 0\n while os.path.isfile(filepath):\n", "issue": "Save checkpoint for the last epoch\nHow to save the checkpoint only for the last epoch?\r\nIn the docs:\r\n\r\n```\r\nif save_top_k == k, the best k models according to the quantity monitored will be saved. if save_top_k == 0, no models are saved. if save_top_k == -1, all models are saved. Please note that the monitors are checked every period epochs. if save_top_k >= 2 and the callback is called multiple times inside an epoch, the name of the saved file will be appended with a version count starting with v0.\r\n```\r\n* `k = 0` does not save any.\r\n* `k > 1` saves only a few the best\r\n* `k = -1` saves all of them\r\n\r\nCurrently, I am using k = -1, but it space consuming.\n", "before_files": [{"content": "\"\"\"\nModel Checkpointing\n===================\n\nAutomatically save model checkpoints during training.\n\n\"\"\"\n\nimport os\nimport re\n\nimport numpy as np\nfrom typing import Optional\n\nimport torch\nfrom pytorch_lightning import _logger as log\nfrom pytorch_lightning.callbacks.base import Callback\nfrom pytorch_lightning.utilities import rank_zero_warn, rank_zero_only\n\n\nclass ModelCheckpoint(Callback):\n r\"\"\"\n Save the model after every epoch.\n\n Args:\n filepath: path to save the model file.\n Can contain named formatting options to be auto-filled.\n\n Example::\n\n # custom path\n # saves a file like: my/path/epoch_0.ckpt\n >>> checkpoint_callback = ModelCheckpoint('my/path/')\n\n # save any arbitrary metrics like `val_loss`, etc. in name\n # saves a file like: my/path/epoch=2-val_loss=0.2_other_metric=0.3.ckpt\n >>> checkpoint_callback = ModelCheckpoint(\n ... filepath='my/path/{epoch}-{val_loss:.2f}-{other_metric:.2f}'\n ... )\n\n Can also be set to `None`, then it will be set to default location\n during trainer construction.\n\n monitor: quantity to monitor.\n verbose: verbosity mode. Default: ``False``.\n save_top_k: if `save_top_k == k`,\n the best k models according to\n the quantity monitored will be saved.\n if ``save_top_k == 0``, no models are saved.\n if ``save_top_k == -1``, all models are saved.\n Please note that the monitors are checked every `period` epochs.\n if ``save_top_k >= 2`` and the callback is called multiple\n times inside an epoch, the name of the saved file will be\n appended with a version count starting with `v0`.\n mode: one of {auto, min, max}.\n If ``save_top_k != 0``, the decision\n to overwrite the current save file is made\n based on either the maximization or the\n minimization of the monitored quantity. For `val_acc`,\n this should be `max`, for `val_loss` this should\n be `min`, etc. In `auto` mode, the direction is\n automatically inferred from the name of the monitored quantity.\n save_weights_only: if ``True``, then only the model's weights will be\n saved (``model.save_weights(filepath)``), else the full model\n is saved (``model.save(filepath)``).\n period: Interval (number of epochs) between checkpoints.\n\n Example::\n\n >>> from pytorch_lightning import Trainer\n >>> from pytorch_lightning.callbacks import ModelCheckpoint\n\n # saves checkpoints to 'my/path/' whenever 'val_loss' has a new min\n >>> checkpoint_callback = ModelCheckpoint(filepath='my/path/')\n >>> trainer = Trainer(checkpoint_callback=checkpoint_callback)\n\n # save epoch and val_loss in name\n # saves a file like: my/path/sample-mnist_epoch=02_val_loss=0.32.ckpt\n >>> checkpoint_callback = ModelCheckpoint(\n ... filepath='my/path/sample-mnist_{epoch:02d}-{val_loss:.2f}'\n ... )\n\n \"\"\"\n\n def __init__(self, filepath: Optional[str] = None, monitor: str = 'val_loss', verbose: bool = False,\n save_top_k: int = 1, save_weights_only: bool = False,\n mode: str = 'auto', period: int = 1, prefix: str = ''):\n super().__init__()\n if save_top_k > 0 and filepath is not None and os.path.isdir(filepath) and len(os.listdir(filepath)) > 0:\n rank_zero_warn(\n f\"Checkpoint directory {filepath} exists and is not empty with save_top_k != 0.\"\n \"All files in this directory will be deleted when a checkpoint is saved!\"\n )\n self._rank = 0\n\n self.monitor = monitor\n self.verbose = verbose\n if filepath is None: # will be determined by trainer at runtime\n self.dirpath, self.filename = None, None\n else:\n if os.path.isdir(filepath):\n self.dirpath, self.filename = filepath, '{epoch}'\n else:\n self.dirpath, self.filename = os.path.split(filepath)\n os.makedirs(self.dirpath, exist_ok=True)\n self.save_top_k = save_top_k\n self.save_weights_only = save_weights_only\n self.period = period\n self.epoch_last_check = None\n self.prefix = prefix\n self.best_k_models = {}\n # {filename: monitor}\n self.kth_best_model = ''\n self.best = 0\n self.save_function = None\n\n torch_inf = torch.tensor(np.Inf)\n mode_dict = {\n 'min': (torch_inf, 'min'),\n 'max': (-torch_inf, 'max'),\n 'auto': (-torch_inf, 'max') if 'acc' in self.monitor or self.monitor.startswith('fmeasure')\n else (torch_inf, 'min'),\n }\n\n if mode not in mode_dict:\n rank_zero_warn(f'ModelCheckpoint mode {mode} is unknown, '\n f'fallback to auto mode.', RuntimeWarning)\n mode = 'auto'\n\n self.kth_value, self.mode = mode_dict[mode]\n\n def _del_model(self, filepath):\n if os.path.isfile(filepath):\n os.remove(filepath)\n\n def _save_model(self, filepath):\n # make paths\n os.makedirs(os.path.dirname(filepath), exist_ok=True)\n\n # delegate the saving to the model\n if self.save_function is not None:\n self.save_function(filepath, self.save_weights_only)\n else:\n raise ValueError(\".save_function() not set\")\n\n def check_monitor_top_k(self, current):\n less_than_k_models = len(self.best_k_models) < self.save_top_k\n if less_than_k_models:\n return True\n\n if not isinstance(current, torch.Tensor):\n rank_zero_warn(\n f'{current} is supposed to be a torch.Tensor. Saving checkpoint may not work correctly. '\n f'HINT: check the value of {self.monitor} in your validation loop', RuntimeWarning\n )\n current = torch.tensor(current)\n\n monitor_op = {\n \"min\": torch.lt,\n \"max\": torch.gt,\n }[self.mode]\n\n return monitor_op(current, self.best_k_models[self.kth_best_model])\n\n def format_checkpoint_name(self, epoch, metrics, ver=None):\n \"\"\"Generate a filename according to the defined template.\n\n Example::\n\n >>> tmpdir = os.path.dirname(__file__)\n >>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{epoch}'))\n >>> os.path.basename(ckpt.format_checkpoint_name(0, {}))\n 'epoch=0.ckpt'\n >>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{epoch:03d}'))\n >>> os.path.basename(ckpt.format_checkpoint_name(5, {}))\n 'epoch=005.ckpt'\n >>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{epoch}-{val_loss:.2f}'))\n >>> os.path.basename(ckpt.format_checkpoint_name(2, dict(val_loss=0.123456)))\n 'epoch=2-val_loss=0.12.ckpt'\n >>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{missing:d}'))\n >>> os.path.basename(ckpt.format_checkpoint_name(0, {}))\n 'missing=0.ckpt'\n \"\"\"\n # check if user passed in keys to the string\n groups = re.findall(r'(\\{.*?)[:\\}]', self.filename)\n\n if len(groups) == 0:\n # default name\n filename = f'{self.prefix}_ckpt_epoch_{epoch}'\n else:\n metrics['epoch'] = epoch\n filename = self.filename\n for tmp in groups:\n name = tmp[1:]\n filename = filename.replace(tmp, name + '={' + name)\n if name not in metrics:\n metrics[name] = 0\n filename = filename.format(**metrics)\n str_ver = f'_v{ver}' if ver is not None else ''\n filepath = os.path.join(self.dirpath, self.prefix + filename + str_ver + '.ckpt')\n return filepath\n\n @rank_zero_only\n def on_validation_end(self, trainer, pl_module):\n # only run on main process\n if trainer.proc_rank != 0:\n return\n\n metrics = trainer.callback_metrics\n epoch = trainer.current_epoch\n if self.save_top_k == 0:\n # no models are saved\n return\n if self.epoch_last_check is not None and (epoch - self.epoch_last_check) < self.period:\n # skipping in this term\n return\n\n self.epoch_last_check = epoch\n\n filepath = self.format_checkpoint_name(epoch, metrics)\n version_cnt = 0\n while os.path.isfile(filepath):\n filepath = self.format_checkpoint_name(epoch, metrics, ver=version_cnt)\n # this epoch called before\n version_cnt += 1\n\n if self.save_top_k != -1:\n current = metrics.get(self.monitor)\n\n if not isinstance(current, torch.Tensor):\n rank_zero_warn(\n f'The metric you returned {current} must be a Torch.Tensor instance, checkpoint not saved '\n f'HINT: what is the value of {self.monitor} in validation_end()?', RuntimeWarning\n )\n\n if current is None:\n rank_zero_warn(\n f'Can save best model only with {self.monitor} available, skipping.', RuntimeWarning\n )\n elif self.check_monitor_top_k(current):\n self._do_check_save(filepath, current, epoch)\n elif self.verbose > 0:\n log.info(f'\\nEpoch {epoch:05d}: {self.monitor} was not in top {self.save_top_k}')\n\n else:\n if self.verbose > 0:\n log.info(f'\\nEpoch {epoch:05d}: saving model to {filepath}')\n self._save_model(filepath)\n\n def _do_check_save(self, filepath, current, epoch):\n # remove kth\n\n del_list = []\n if len(self.best_k_models) == self.save_top_k and self.save_top_k > 0:\n delpath = self.kth_best_model\n self.best_k_models.pop(self.kth_best_model)\n del_list.append(delpath)\n\n self.best_k_models[filepath] = current\n if len(self.best_k_models) == self.save_top_k:\n # monitor dict has reached k elements\n _op = max if self.mode == 'min' else min\n self.kth_best_model = _op(self.best_k_models,\n key=self.best_k_models.get)\n self.kth_value = self.best_k_models[self.kth_best_model]\n\n _op = min if self.mode == 'min' else max\n self.best = _op(self.best_k_models.values())\n\n if self.verbose > 0:\n log.info(\n f'\\nEpoch {epoch:05d}: {self.monitor} reached'\n f' {current:0.5f} (best {self.best:0.5f}), saving model to'\n f' {filepath} as top {self.save_top_k}')\n self._save_model(filepath)\n\n for cur_path in del_list:\n if cur_path != filepath:\n self._del_model(cur_path)\n", "path": "pytorch_lightning/callbacks/model_checkpoint.py"}], "after_files": [{"content": "\"\"\"\nModel Checkpointing\n===================\n\nAutomatically save model checkpoints during training.\n\n\"\"\"\n\nimport os\nimport re\n\nimport numpy as np\nfrom typing import Optional\n\nimport torch\nfrom pytorch_lightning import _logger as log\nfrom pytorch_lightning.callbacks.base import Callback\nfrom pytorch_lightning.utilities import rank_zero_warn, rank_zero_only\n\n\nclass ModelCheckpoint(Callback):\n r\"\"\"\n Save the model after every epoch.\n\n Args:\n filepath: path to save the model file.\n Can contain named formatting options to be auto-filled.\n\n Example::\n\n # custom path\n # saves a file like: my/path/epoch_0.ckpt\n >>> checkpoint_callback = ModelCheckpoint('my/path/')\n\n # save any arbitrary metrics like `val_loss`, etc. in name\n # saves a file like: my/path/epoch=2-val_loss=0.2_other_metric=0.3.ckpt\n >>> checkpoint_callback = ModelCheckpoint(\n ... filepath='my/path/{epoch}-{val_loss:.2f}-{other_metric:.2f}'\n ... )\n\n Can also be set to `None`, then it will be set to default location\n during trainer construction.\n\n monitor: quantity to monitor.\n verbose: verbosity mode. Default: ``False``.\n save_last: always saves the model at the end of the epoch. Default: ``False``.\n save_top_k: if `save_top_k == k`,\n the best k models according to\n the quantity monitored will be saved.\n if ``save_top_k == 0``, no models are saved.\n if ``save_top_k == -1``, all models are saved.\n Please note that the monitors are checked every `period` epochs.\n if ``save_top_k >= 2`` and the callback is called multiple\n times inside an epoch, the name of the saved file will be\n appended with a version count starting with `v0`.\n mode: one of {auto, min, max}.\n If ``save_top_k != 0``, the decision\n to overwrite the current save file is made\n based on either the maximization or the\n minimization of the monitored quantity. For `val_acc`,\n this should be `max`, for `val_loss` this should\n be `min`, etc. In `auto` mode, the direction is\n automatically inferred from the name of the monitored quantity.\n save_weights_only: if ``True``, then only the model's weights will be\n saved (``model.save_weights(filepath)``), else the full model\n is saved (``model.save(filepath)``).\n period: Interval (number of epochs) between checkpoints.\n\n Example::\n\n >>> from pytorch_lightning import Trainer\n >>> from pytorch_lightning.callbacks import ModelCheckpoint\n\n # saves checkpoints to 'my/path/' whenever 'val_loss' has a new min\n >>> checkpoint_callback = ModelCheckpoint(filepath='my/path/')\n >>> trainer = Trainer(checkpoint_callback=checkpoint_callback)\n\n # save epoch and val_loss in name\n # saves a file like: my/path/sample-mnist_epoch=02_val_loss=0.32.ckpt\n >>> checkpoint_callback = ModelCheckpoint(\n ... filepath='my/path/sample-mnist_{epoch:02d}-{val_loss:.2f}'\n ... )\n\n \"\"\"\n\n def __init__(self, filepath: Optional[str] = None, monitor: str = 'val_loss', verbose: bool = False,\n save_last: bool = False, save_top_k: int = 1, save_weights_only: bool = False,\n mode: str = 'auto', period: int = 1, prefix: str = ''):\n super().__init__()\n if save_top_k > 0 and filepath is not None and os.path.isdir(filepath) and len(os.listdir(filepath)) > 0:\n rank_zero_warn(\n f\"Checkpoint directory {filepath} exists and is not empty with save_top_k != 0.\"\n \"All files in this directory will be deleted when a checkpoint is saved!\"\n )\n self._rank = 0\n\n self.monitor = monitor\n self.verbose = verbose\n if filepath is None: # will be determined by trainer at runtime\n self.dirpath, self.filename = None, None\n else:\n if os.path.isdir(filepath):\n self.dirpath, self.filename = filepath, '{epoch}'\n else:\n self.dirpath, self.filename = os.path.split(filepath)\n os.makedirs(self.dirpath, exist_ok=True)\n self.save_last = save_last\n self.save_top_k = save_top_k\n self.save_weights_only = save_weights_only\n self.period = period\n self.epoch_last_check = None\n self.prefix = prefix\n self.best_k_models = {}\n # {filename: monitor}\n self.kth_best_model = ''\n self.best = 0\n self.save_function = None\n\n torch_inf = torch.tensor(np.Inf)\n mode_dict = {\n 'min': (torch_inf, 'min'),\n 'max': (-torch_inf, 'max'),\n 'auto': (-torch_inf, 'max') if 'acc' in self.monitor or self.monitor.startswith('fmeasure')\n else (torch_inf, 'min'),\n }\n\n if mode not in mode_dict:\n rank_zero_warn(f'ModelCheckpoint mode {mode} is unknown, '\n f'fallback to auto mode.', RuntimeWarning)\n mode = 'auto'\n\n self.kth_value, self.mode = mode_dict[mode]\n\n def _del_model(self, filepath):\n if os.path.isfile(filepath):\n os.remove(filepath)\n\n def _save_model(self, filepath):\n # make paths\n os.makedirs(os.path.dirname(filepath), exist_ok=True)\n\n # delegate the saving to the model\n if self.save_function is not None:\n self.save_function(filepath, self.save_weights_only)\n else:\n raise ValueError(\".save_function() not set\")\n\n def check_monitor_top_k(self, current):\n less_than_k_models = len(self.best_k_models) < self.save_top_k\n if less_than_k_models:\n return True\n\n if not isinstance(current, torch.Tensor):\n rank_zero_warn(\n f'{current} is supposed to be a torch.Tensor. Saving checkpoint may not work correctly. '\n f'HINT: check the value of {self.monitor} in your validation loop', RuntimeWarning\n )\n current = torch.tensor(current)\n\n monitor_op = {\n \"min\": torch.lt,\n \"max\": torch.gt,\n }[self.mode]\n\n return monitor_op(current, self.best_k_models[self.kth_best_model])\n\n def format_checkpoint_name(self, epoch, metrics, ver=None):\n \"\"\"Generate a filename according to the defined template.\n\n Example::\n\n >>> tmpdir = os.path.dirname(__file__)\n >>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{epoch}'))\n >>> os.path.basename(ckpt.format_checkpoint_name(0, {}))\n 'epoch=0.ckpt'\n >>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{epoch:03d}'))\n >>> os.path.basename(ckpt.format_checkpoint_name(5, {}))\n 'epoch=005.ckpt'\n >>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{epoch}-{val_loss:.2f}'))\n >>> os.path.basename(ckpt.format_checkpoint_name(2, dict(val_loss=0.123456)))\n 'epoch=2-val_loss=0.12.ckpt'\n >>> ckpt = ModelCheckpoint(os.path.join(tmpdir, '{missing:d}'))\n >>> os.path.basename(ckpt.format_checkpoint_name(0, {}))\n 'missing=0.ckpt'\n \"\"\"\n # check if user passed in keys to the string\n groups = re.findall(r'(\\{.*?)[:\\}]', self.filename)\n\n if len(groups) == 0:\n # default name\n filename = f'{self.prefix}_ckpt_epoch_{epoch}'\n else:\n metrics['epoch'] = epoch\n filename = self.filename\n for tmp in groups:\n name = tmp[1:]\n filename = filename.replace(tmp, name + '={' + name)\n if name not in metrics:\n metrics[name] = 0\n filename = filename.format(**metrics)\n str_ver = f'_v{ver}' if ver is not None else ''\n filepath = os.path.join(self.dirpath, self.prefix + filename + str_ver + '.ckpt')\n return filepath\n\n @rank_zero_only\n def on_validation_end(self, trainer, pl_module):\n # only run on main process\n if trainer.proc_rank != 0:\n return\n\n metrics = trainer.callback_metrics\n epoch = trainer.current_epoch\n if self.save_top_k == 0:\n # no models are saved\n return\n if self.epoch_last_check is not None and (epoch - self.epoch_last_check) < self.period:\n # skipping in this term\n return\n\n self.epoch_last_check = epoch\n\n if self.save_last:\n filepath = os.path.join(self.dirpath, self.prefix + 'last.ckpt')\n self._save_model(filepath)\n\n filepath = self.format_checkpoint_name(epoch, metrics)\n version_cnt = 0\n while os.path.isfile(filepath):\n filepath = self.format_checkpoint_name(epoch, metrics, ver=version_cnt)\n # this epoch called before\n version_cnt += 1\n\n if self.save_top_k != -1:\n current = metrics.get(self.monitor)\n\n if not isinstance(current, torch.Tensor):\n rank_zero_warn(\n f'The metric you returned {current} must be a Torch.Tensor instance, checkpoint not saved '\n f'HINT: what is the value of {self.monitor} in validation_end()?', RuntimeWarning\n )\n\n if current is None:\n rank_zero_warn(\n f'Can save best model only with {self.monitor} available, skipping.', RuntimeWarning\n )\n elif self.check_monitor_top_k(current):\n self._do_check_save(filepath, current, epoch)\n elif self.verbose > 0:\n log.info(f'\\nEpoch {epoch:05d}: {self.monitor} was not in top {self.save_top_k}')\n\n else:\n if self.verbose > 0:\n log.info(f'\\nEpoch {epoch:05d}: saving model to {filepath}')\n self._save_model(filepath)\n\n def _do_check_save(self, filepath, current, epoch):\n # remove kth\n\n del_list = []\n if len(self.best_k_models) == self.save_top_k and self.save_top_k > 0:\n delpath = self.kth_best_model\n self.best_k_models.pop(self.kth_best_model)\n del_list.append(delpath)\n\n self.best_k_models[filepath] = current\n if len(self.best_k_models) == self.save_top_k:\n # monitor dict has reached k elements\n _op = max if self.mode == 'min' else min\n self.kth_best_model = _op(self.best_k_models,\n key=self.best_k_models.get)\n self.kth_value = self.best_k_models[self.kth_best_model]\n\n _op = min if self.mode == 'min' else max\n self.best = _op(self.best_k_models.values())\n\n if self.verbose > 0:\n log.info(\n f'\\nEpoch {epoch:05d}: {self.monitor} reached'\n f' {current:0.5f} (best {self.best:0.5f}), saving model to'\n f' {filepath} as top {self.save_top_k}')\n self._save_model(filepath)\n\n for cur_path in del_list:\n if cur_path != filepath:\n self._del_model(cur_path)\n", "path": "pytorch_lightning/callbacks/model_checkpoint.py"}]} | 3,640 | 435 |
gh_patches_debug_34714 | rasdani/github-patches | git_diff | aws__aws-cli-206 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cloudformation describe-stack-events --output table omits ResourceStatusReason
aws-cli/0.13.2 Python/2.7.5 Darwin/12.4.1
This may be an RFE. With a command like `aws cloudformation describe-stack-events --output table ...` the table output does not include the `ResourceStatusReason` data. This is useful data when failures occur. For example here is some sample output with an interesting `ResourceStatusReason`.
```
{
"StackId": "arn:aws:cloudformation:us-west-2:317324027142:stack/spot-3x-m1large/e1fa9ac0-f985-11e2-aa7f-507bfc8736d2",
"EventId": "LaunchConfig1-CREATE_FAILED-1375236694000",
"ResourceStatus": "CREATE_FAILED",
"ResourceType": "AWS::AutoScaling::LaunchConfiguration",
"Timestamp": "2013-07-31T02:11:34Z",
"ResourceStatusReason": "AMI cannot be described",
"StackName": "spot-3x-m1large",
"PhysicalResourceId": "spot-3x-m1large-LaunchConfig1-EFTX7ICLP050",
"LogicalResourceId": "LaunchConfig1"
}
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `awscli/formatter.py`
Content:
```
1 # Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2
3 # Licensed under the Apache License, Version 2.0 (the "License"). You
4 # may not use this file except in compliance with the License. A copy of
5 # the License is located at
6
7 # http://aws.amazon.com/apache2.0/
8
9 # or in the "license" file accompanying this file. This file is
10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
11 # ANY KIND, either express or implied. See the License for the specific
12 # language governing permissions and limitations under the License.
13 import logging
14 import sys
15 import json
16
17 import six
18
19 from awscli.table import MultiTable, Styler, ColorizedStyler
20
21
22 LOG = logging.getLogger(__name__)
23
24
25 class Formatter(object):
26 def __init__(self, args):
27 self._args = args
28
29 def _remove_request_id(self, response_data):
30 # We only want to display the ResponseMetadata (which includes
31 # the request id) if there is an error in the response.
32 # Since all errors have been unified under the Errors key,
33 # this should be a reasonable way to filter.
34 if 'Errors' not in response_data:
35 if 'ResponseMetadata' in response_data:
36 if 'RequestId' in response_data['ResponseMetadata']:
37 request_id = response_data['ResponseMetadata']['RequestId']
38 LOG.debug('RequestId: %s', request_id)
39 del response_data['ResponseMetadata']
40
41
42 class FullyBufferedFormatter(Formatter):
43 def __call__(self, operation, response, stream=None):
44 if stream is None:
45 # Retrieve stdout on invocation instead of at import time
46 # so that if anything wraps stdout we'll pick up those changes
47 # (specifically colorama on windows wraps stdout).
48 stream = sys.stdout
49 # I think the interfaces between non-paginated
50 # and paginated responses can still be cleaned up.
51 if operation.can_paginate and self._args.paginate:
52 response_data = response.build_full_result()
53 else:
54 response_data = response
55 try:
56 self._remove_request_id(response_data)
57 self._format_response(operation, response_data, stream)
58 finally:
59 # flush is needed to avoid the "close failed in file object
60 # destructor" in python2.x (see http://bugs.python.org/issue11380).
61 stream.flush()
62
63
64 class JSONFormatter(FullyBufferedFormatter):
65
66 def _format_response(self, operation, response, stream):
67 # For operations that have no response body (e.g. s3 put-object)
68 # the response will be an empty string. We don't want to print
69 # that out to the user but other "falsey" values like an empty
70 # dictionary should be printed.
71 if response != '':
72 json.dump(response, stream, indent=4)
73 stream.write('\n')
74
75
76 class TableFormatter(FullyBufferedFormatter):
77 """Pretty print a table from a given response.
78
79 The table formatter is able to take any generic response
80 and generate a pretty printed table. It does this without
81 using the output definition from the model.
82
83 """
84 def __init__(self, args, table=None):
85 super(TableFormatter, self).__init__(args)
86 if args.color == 'auto':
87 self.table = MultiTable(initial_section=False,
88 column_separator='|')
89 elif args.color == 'off':
90 styler = Styler()
91 self.table = MultiTable(initial_section=False,
92 column_separator='|', styler=styler)
93 elif args.color == 'on':
94 styler = ColorizedStyler()
95 self.table = MultiTable(initial_section=False,
96 column_separator='|', styler=styler)
97 else:
98 raise ValueError("Unknown color option: %s" % args.color)
99
100 def _format_response(self, operation, response, stream):
101 if self._build_table(operation.name, response):
102 try:
103 self.table.render(stream)
104 except IOError:
105 # If they're piping stdout to another process which exits before
106 # we're done writing all of our output, we'll get an error about a
107 # closed pipe which we can safely ignore.
108 pass
109
110 def _build_table(self, title, current, indent_level=0):
111 if not current:
112 return False
113 self.table.new_section(title, indent_level=indent_level)
114 if isinstance(current, list):
115 if isinstance(current[0], dict):
116 self._build_sub_table_from_list(current, indent_level, title)
117 else:
118 for item in current:
119 self.table.add_row([item])
120 if isinstance(current, dict):
121 # Render a single row section with keys as header
122 # and the row as the values, unless the value
123 # is a list.
124 self._build_sub_table_from_dict(current, indent_level)
125 return True
126
127 def _build_sub_table_from_dict(self, current, indent_level):
128 # Render a single row section with keys as header
129 # and the row as the values, unless the value
130 # is a list.
131 headers, more = self._group_scalar_keys(current)
132 if len(headers) == 1:
133 # Special casing if a dict has a single scalar key/value pair.
134 self.table.add_row([headers[0], current[headers[0]]])
135 elif headers:
136 self.table.add_row_header(headers)
137 self.table.add_row([current[k] for k in headers])
138 for remaining in more:
139 self._build_table(remaining, current[remaining],
140 indent_level=indent_level + 1)
141
142 def _build_sub_table_from_list(self, current, indent_level, title):
143 headers, more = self._group_scalar_keys(current[0])
144 self.table.add_row_header(headers)
145 first = True
146 for element in current:
147 if not first and more:
148 self.table.new_section(title,
149 indent_level=indent_level)
150 self.table.add_row_header(headers)
151 first = False
152 self.table.add_row([element[header] for header in headers])
153 for remaining in more:
154 # Some of the non scalar attributes may not necessarily
155 # be in every single element of the list, so we need to
156 # check this condition before recursing.
157 if remaining in element:
158 self._build_table(remaining, element[remaining],
159 indent_level=indent_level + 1)
160
161 def _scalar_type(self, element):
162 return not isinstance(element, (list, dict))
163
164 def _group_scalar_keys(self, current):
165 # Given a dict, separate the keys into those whose values are
166 # scalar, and those whose values aren't. Return two lists,
167 # one is the scalar value keys, the second is the remaining keys.
168 more = []
169 headers = []
170 for element in current:
171 if self._scalar_type(current[element]):
172 headers.append(element)
173 else:
174 more.append(element)
175 headers.sort()
176 more.sort()
177 return headers, more
178
179
180 class TextFormatter(FullyBufferedFormatter):
181
182 def _output(self, data, stream, label=None):
183 """
184 A very simple, very stupid text formatter that has no
185 knowledge of the output as defined in the JSON model.
186 """
187 if isinstance(data, dict):
188 scalars = []
189 non_scalars = []
190 for key, val in data.items():
191 if isinstance(val, dict):
192 non_scalars.append((key, val))
193 elif isinstance(val, list):
194 non_scalars.append((key, val))
195 elif not isinstance(val, six.string_types):
196 scalars.append(str(val))
197 else:
198 scalars.append(val)
199 if label:
200 scalars.insert(0, label.upper())
201 stream.write('\t'.join(scalars))
202 stream.write('\n')
203 for label, non_scalar in non_scalars:
204 self._output(non_scalar, stream, label)
205 elif isinstance(data, list):
206 for d in data:
207 self._output(d, stream)
208
209 def _format_response(self, operation, response, stream):
210 self._output(response, stream)
211
212
213 def get_formatter(format_type, args):
214 if format_type == 'json':
215 return JSONFormatter(args)
216 elif format_type == 'text':
217 return TextFormatter(args)
218 elif format_type == 'table':
219 return TableFormatter(args)
220 return None
221
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/awscli/formatter.py b/awscli/formatter.py
--- a/awscli/formatter.py
+++ b/awscli/formatter.py
@@ -140,7 +140,7 @@
indent_level=indent_level + 1)
def _build_sub_table_from_list(self, current, indent_level, title):
- headers, more = self._group_scalar_keys(current[0])
+ headers, more = self._group_scalar_keys_from_list(current)
self.table.add_row_header(headers)
first = True
for element in current:
@@ -149,7 +149,9 @@
indent_level=indent_level)
self.table.add_row_header(headers)
first = False
- self.table.add_row([element[header] for header in headers])
+ # Use .get() to account for the fact that sometimes an element
+ # may not have all the keys from the header.
+ self.table.add_row([element.get(header, '') for header in headers])
for remaining in more:
# Some of the non scalar attributes may not necessarily
# be in every single element of the list, so we need to
@@ -161,6 +163,20 @@
def _scalar_type(self, element):
return not isinstance(element, (list, dict))
+ def _group_scalar_keys_from_list(self, list_of_dicts):
+ # We want to make sure we catch all the keys in the list of dicts.
+ # Most of the time each list element has the same keys, but sometimes
+ # a list element will have keys not defined in other elements.
+ headers = set()
+ more = set()
+ for item in list_of_dicts:
+ current_headers, current_more = self._group_scalar_keys(item)
+ headers.update(current_headers)
+ more.update(current_more)
+ headers = list(sorted(headers))
+ more = list(sorted(more))
+ return headers, more
+
def _group_scalar_keys(self, current):
# Given a dict, separate the keys into those whose values are
# scalar, and those whose values aren't. Return two lists,
| {"golden_diff": "diff --git a/awscli/formatter.py b/awscli/formatter.py\n--- a/awscli/formatter.py\n+++ b/awscli/formatter.py\n@@ -140,7 +140,7 @@\n indent_level=indent_level + 1)\n \n def _build_sub_table_from_list(self, current, indent_level, title):\n- headers, more = self._group_scalar_keys(current[0])\n+ headers, more = self._group_scalar_keys_from_list(current)\n self.table.add_row_header(headers)\n first = True\n for element in current:\n@@ -149,7 +149,9 @@\n indent_level=indent_level)\n self.table.add_row_header(headers)\n first = False\n- self.table.add_row([element[header] for header in headers])\n+ # Use .get() to account for the fact that sometimes an element\n+ # may not have all the keys from the header.\n+ self.table.add_row([element.get(header, '') for header in headers])\n for remaining in more:\n # Some of the non scalar attributes may not necessarily\n # be in every single element of the list, so we need to\n@@ -161,6 +163,20 @@\n def _scalar_type(self, element):\n return not isinstance(element, (list, dict))\n \n+ def _group_scalar_keys_from_list(self, list_of_dicts):\n+ # We want to make sure we catch all the keys in the list of dicts.\n+ # Most of the time each list element has the same keys, but sometimes\n+ # a list element will have keys not defined in other elements.\n+ headers = set()\n+ more = set()\n+ for item in list_of_dicts:\n+ current_headers, current_more = self._group_scalar_keys(item)\n+ headers.update(current_headers)\n+ more.update(current_more)\n+ headers = list(sorted(headers))\n+ more = list(sorted(more))\n+ return headers, more\n+\n def _group_scalar_keys(self, current):\n # Given a dict, separate the keys into those whose values are\n # scalar, and those whose values aren't. Return two lists,\n", "issue": "cloudformation describe-stack-events --output table omits ResourceStatusReason\naws-cli/0.13.2 Python/2.7.5 Darwin/12.4.1\n\nThis may be an RFE. With a command like `aws cloudformation describe-stack-events --output table ...` the table output does not include the `ResourceStatusReason` data. This is useful data when failures occur. For example here is some sample output with an interesting `ResourceStatusReason`.\n\n```\n{\n \"StackId\": \"arn:aws:cloudformation:us-west-2:317324027142:stack/spot-3x-m1large/e1fa9ac0-f985-11e2-aa7f-507bfc8736d2\", \n \"EventId\": \"LaunchConfig1-CREATE_FAILED-1375236694000\", \n \"ResourceStatus\": \"CREATE_FAILED\", \n \"ResourceType\": \"AWS::AutoScaling::LaunchConfiguration\", \n \"Timestamp\": \"2013-07-31T02:11:34Z\", \n \"ResourceStatusReason\": \"AMI cannot be described\", \n \"StackName\": \"spot-3x-m1large\", \n \"PhysicalResourceId\": \"spot-3x-m1large-LaunchConfig1-EFTX7ICLP050\", \n \"LogicalResourceId\": \"LaunchConfig1\"\n}\n```\n\n", "before_files": [{"content": "# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n\n# http://aws.amazon.com/apache2.0/\n\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport logging\nimport sys\nimport json\n\nimport six\n\nfrom awscli.table import MultiTable, Styler, ColorizedStyler\n\n\nLOG = logging.getLogger(__name__)\n\n\nclass Formatter(object):\n def __init__(self, args):\n self._args = args\n\n def _remove_request_id(self, response_data):\n # We only want to display the ResponseMetadata (which includes\n # the request id) if there is an error in the response.\n # Since all errors have been unified under the Errors key,\n # this should be a reasonable way to filter.\n if 'Errors' not in response_data:\n if 'ResponseMetadata' in response_data:\n if 'RequestId' in response_data['ResponseMetadata']:\n request_id = response_data['ResponseMetadata']['RequestId']\n LOG.debug('RequestId: %s', request_id)\n del response_data['ResponseMetadata']\n\n\nclass FullyBufferedFormatter(Formatter):\n def __call__(self, operation, response, stream=None):\n if stream is None:\n # Retrieve stdout on invocation instead of at import time\n # so that if anything wraps stdout we'll pick up those changes\n # (specifically colorama on windows wraps stdout).\n stream = sys.stdout\n # I think the interfaces between non-paginated\n # and paginated responses can still be cleaned up.\n if operation.can_paginate and self._args.paginate:\n response_data = response.build_full_result()\n else:\n response_data = response\n try:\n self._remove_request_id(response_data)\n self._format_response(operation, response_data, stream)\n finally:\n # flush is needed to avoid the \"close failed in file object\n # destructor\" in python2.x (see http://bugs.python.org/issue11380).\n stream.flush()\n\n\nclass JSONFormatter(FullyBufferedFormatter):\n\n def _format_response(self, operation, response, stream):\n # For operations that have no response body (e.g. s3 put-object)\n # the response will be an empty string. We don't want to print\n # that out to the user but other \"falsey\" values like an empty\n # dictionary should be printed.\n if response != '':\n json.dump(response, stream, indent=4)\n stream.write('\\n')\n\n\nclass TableFormatter(FullyBufferedFormatter):\n \"\"\"Pretty print a table from a given response.\n\n The table formatter is able to take any generic response\n and generate a pretty printed table. It does this without\n using the output definition from the model.\n\n \"\"\"\n def __init__(self, args, table=None):\n super(TableFormatter, self).__init__(args)\n if args.color == 'auto':\n self.table = MultiTable(initial_section=False,\n column_separator='|')\n elif args.color == 'off':\n styler = Styler()\n self.table = MultiTable(initial_section=False,\n column_separator='|', styler=styler)\n elif args.color == 'on':\n styler = ColorizedStyler()\n self.table = MultiTable(initial_section=False,\n column_separator='|', styler=styler)\n else:\n raise ValueError(\"Unknown color option: %s\" % args.color)\n\n def _format_response(self, operation, response, stream):\n if self._build_table(operation.name, response):\n try:\n self.table.render(stream)\n except IOError:\n # If they're piping stdout to another process which exits before\n # we're done writing all of our output, we'll get an error about a\n # closed pipe which we can safely ignore.\n pass\n\n def _build_table(self, title, current, indent_level=0):\n if not current:\n return False\n self.table.new_section(title, indent_level=indent_level)\n if isinstance(current, list):\n if isinstance(current[0], dict):\n self._build_sub_table_from_list(current, indent_level, title)\n else:\n for item in current:\n self.table.add_row([item])\n if isinstance(current, dict):\n # Render a single row section with keys as header\n # and the row as the values, unless the value\n # is a list.\n self._build_sub_table_from_dict(current, indent_level)\n return True\n\n def _build_sub_table_from_dict(self, current, indent_level):\n # Render a single row section with keys as header\n # and the row as the values, unless the value\n # is a list.\n headers, more = self._group_scalar_keys(current)\n if len(headers) == 1:\n # Special casing if a dict has a single scalar key/value pair.\n self.table.add_row([headers[0], current[headers[0]]])\n elif headers:\n self.table.add_row_header(headers)\n self.table.add_row([current[k] for k in headers])\n for remaining in more:\n self._build_table(remaining, current[remaining],\n indent_level=indent_level + 1)\n\n def _build_sub_table_from_list(self, current, indent_level, title):\n headers, more = self._group_scalar_keys(current[0])\n self.table.add_row_header(headers)\n first = True\n for element in current:\n if not first and more:\n self.table.new_section(title,\n indent_level=indent_level)\n self.table.add_row_header(headers)\n first = False\n self.table.add_row([element[header] for header in headers])\n for remaining in more:\n # Some of the non scalar attributes may not necessarily\n # be in every single element of the list, so we need to\n # check this condition before recursing.\n if remaining in element:\n self._build_table(remaining, element[remaining],\n indent_level=indent_level + 1)\n\n def _scalar_type(self, element):\n return not isinstance(element, (list, dict))\n\n def _group_scalar_keys(self, current):\n # Given a dict, separate the keys into those whose values are\n # scalar, and those whose values aren't. Return two lists,\n # one is the scalar value keys, the second is the remaining keys.\n more = []\n headers = []\n for element in current:\n if self._scalar_type(current[element]):\n headers.append(element)\n else:\n more.append(element)\n headers.sort()\n more.sort()\n return headers, more\n\n\nclass TextFormatter(FullyBufferedFormatter):\n\n def _output(self, data, stream, label=None):\n \"\"\"\n A very simple, very stupid text formatter that has no\n knowledge of the output as defined in the JSON model.\n \"\"\"\n if isinstance(data, dict):\n scalars = []\n non_scalars = []\n for key, val in data.items():\n if isinstance(val, dict):\n non_scalars.append((key, val))\n elif isinstance(val, list):\n non_scalars.append((key, val))\n elif not isinstance(val, six.string_types):\n scalars.append(str(val))\n else:\n scalars.append(val)\n if label:\n scalars.insert(0, label.upper())\n stream.write('\\t'.join(scalars))\n stream.write('\\n')\n for label, non_scalar in non_scalars:\n self._output(non_scalar, stream, label)\n elif isinstance(data, list):\n for d in data:\n self._output(d, stream)\n\n def _format_response(self, operation, response, stream):\n self._output(response, stream)\n\n\ndef get_formatter(format_type, args):\n if format_type == 'json':\n return JSONFormatter(args)\n elif format_type == 'text':\n return TextFormatter(args)\n elif format_type == 'table':\n return TableFormatter(args)\n return None\n", "path": "awscli/formatter.py"}], "after_files": [{"content": "# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n\n# http://aws.amazon.com/apache2.0/\n\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport logging\nimport sys\nimport json\n\nimport six\n\nfrom awscli.table import MultiTable, Styler, ColorizedStyler\n\n\nLOG = logging.getLogger(__name__)\n\n\nclass Formatter(object):\n def __init__(self, args):\n self._args = args\n\n def _remove_request_id(self, response_data):\n # We only want to display the ResponseMetadata (which includes\n # the request id) if there is an error in the response.\n # Since all errors have been unified under the Errors key,\n # this should be a reasonable way to filter.\n if 'Errors' not in response_data:\n if 'ResponseMetadata' in response_data:\n if 'RequestId' in response_data['ResponseMetadata']:\n request_id = response_data['ResponseMetadata']['RequestId']\n LOG.debug('RequestId: %s', request_id)\n del response_data['ResponseMetadata']\n\n\nclass FullyBufferedFormatter(Formatter):\n def __call__(self, operation, response, stream=None):\n if stream is None:\n # Retrieve stdout on invocation instead of at import time\n # so that if anything wraps stdout we'll pick up those changes\n # (specifically colorama on windows wraps stdout).\n stream = sys.stdout\n # I think the interfaces between non-paginated\n # and paginated responses can still be cleaned up.\n if operation.can_paginate and self._args.paginate:\n response_data = response.build_full_result()\n else:\n response_data = response\n try:\n self._remove_request_id(response_data)\n self._format_response(operation, response_data, stream)\n finally:\n # flush is needed to avoid the \"close failed in file object\n # destructor\" in python2.x (see http://bugs.python.org/issue11380).\n stream.flush()\n\n\nclass JSONFormatter(FullyBufferedFormatter):\n\n def _format_response(self, operation, response, stream):\n # For operations that have no response body (e.g. s3 put-object)\n # the response will be an empty string. We don't want to print\n # that out to the user but other \"falsey\" values like an empty\n # dictionary should be printed.\n if response != '':\n json.dump(response, stream, indent=4)\n stream.write('\\n')\n\n\nclass TableFormatter(FullyBufferedFormatter):\n \"\"\"Pretty print a table from a given response.\n\n The table formatter is able to take any generic response\n and generate a pretty printed table. It does this without\n using the output definition from the model.\n\n \"\"\"\n def __init__(self, args, table=None):\n super(TableFormatter, self).__init__(args)\n if args.color == 'auto':\n self.table = MultiTable(initial_section=False,\n column_separator='|')\n elif args.color == 'off':\n styler = Styler()\n self.table = MultiTable(initial_section=False,\n column_separator='|', styler=styler)\n elif args.color == 'on':\n styler = ColorizedStyler()\n self.table = MultiTable(initial_section=False,\n column_separator='|', styler=styler)\n else:\n raise ValueError(\"Unknown color option: %s\" % args.color)\n\n def _format_response(self, operation, response, stream):\n if self._build_table(operation.name, response):\n try:\n self.table.render(stream)\n except IOError:\n # If they're piping stdout to another process which exits before\n # we're done writing all of our output, we'll get an error about a\n # closed pipe which we can safely ignore.\n pass\n\n def _build_table(self, title, current, indent_level=0):\n if not current:\n return False\n self.table.new_section(title, indent_level=indent_level)\n if isinstance(current, list):\n if isinstance(current[0], dict):\n self._build_sub_table_from_list(current, indent_level, title)\n else:\n for item in current:\n self.table.add_row([item])\n if isinstance(current, dict):\n # Render a single row section with keys as header\n # and the row as the values, unless the value\n # is a list.\n self._build_sub_table_from_dict(current, indent_level)\n return True\n\n def _build_sub_table_from_dict(self, current, indent_level):\n # Render a single row section with keys as header\n # and the row as the values, unless the value\n # is a list.\n headers, more = self._group_scalar_keys(current)\n if len(headers) == 1:\n # Special casing if a dict has a single scalar key/value pair.\n self.table.add_row([headers[0], current[headers[0]]])\n elif headers:\n self.table.add_row_header(headers)\n self.table.add_row([current[k] for k in headers])\n for remaining in more:\n self._build_table(remaining, current[remaining],\n indent_level=indent_level + 1)\n\n def _build_sub_table_from_list(self, current, indent_level, title):\n headers, more = self._group_scalar_keys_from_list(current)\n self.table.add_row_header(headers)\n first = True\n for element in current:\n if not first and more:\n self.table.new_section(title,\n indent_level=indent_level)\n self.table.add_row_header(headers)\n first = False\n # Use .get() to account for the fact that sometimes an element\n # may not have all the keys from the header.\n self.table.add_row([element.get(header, '') for header in headers])\n for remaining in more:\n # Some of the non scalar attributes may not necessarily\n # be in every single element of the list, so we need to\n # check this condition before recursing.\n if remaining in element:\n self._build_table(remaining, element[remaining],\n indent_level=indent_level + 1)\n\n def _scalar_type(self, element):\n return not isinstance(element, (list, dict))\n\n def _group_scalar_keys_from_list(self, list_of_dicts):\n # We want to make sure we catch all the keys in the list of dicts.\n # Most of the time each list element has the same keys, but sometimes\n # a list element will have keys not defined in other elements.\n headers = set()\n more = set()\n for item in list_of_dicts:\n current_headers, current_more = self._group_scalar_keys(item)\n headers.update(current_headers)\n more.update(current_more)\n headers = list(sorted(headers))\n more = list(sorted(more))\n return headers, more\n\n def _group_scalar_keys(self, current):\n # Given a dict, separate the keys into those whose values are\n # scalar, and those whose values aren't. Return two lists,\n # one is the scalar value keys, the second is the remaining keys.\n more = []\n headers = []\n for element in current:\n if self._scalar_type(current[element]):\n headers.append(element)\n else:\n more.append(element)\n headers.sort()\n more.sort()\n return headers, more\n\n\nclass TextFormatter(FullyBufferedFormatter):\n\n def _output(self, data, stream, label=None):\n \"\"\"\n A very simple, very stupid text formatter that has no\n knowledge of the output as defined in the JSON model.\n \"\"\"\n if isinstance(data, dict):\n scalars = []\n non_scalars = []\n for key, val in data.items():\n if isinstance(val, dict):\n non_scalars.append((key, val))\n elif isinstance(val, list):\n non_scalars.append((key, val))\n elif not isinstance(val, six.string_types):\n scalars.append(str(val))\n else:\n scalars.append(val)\n if label:\n scalars.insert(0, label.upper())\n stream.write('\\t'.join(scalars))\n stream.write('\\n')\n for label, non_scalar in non_scalars:\n self._output(non_scalar, stream, label)\n elif isinstance(data, list):\n for d in data:\n self._output(d, stream)\n\n def _format_response(self, operation, response, stream):\n self._output(response, stream)\n\n\ndef get_formatter(format_type, args):\n if format_type == 'json':\n return JSONFormatter(args)\n elif format_type == 'text':\n return TextFormatter(args)\n elif format_type == 'table':\n return TableFormatter(args)\n return None\n", "path": "awscli/formatter.py"}]} | 2,953 | 473 |
gh_patches_debug_28745 | rasdani/github-patches | git_diff | SeldonIO__MLServer-531 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow to configure uvicorn logging
Trying to add custom handlers to the application logger we have found that the uvicorn ones are not working as expected. It seems that uvicorn overrides the logging configuration when the Config object is initialized.
It would be nice to allow users to configure the uvicorn logger. This can be done by setting the path to a logging configuration file when creating uvicorn's Config object.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlserver/logging.py`
Content:
```
1 import logging
2 import sys
3
4 from logging import Formatter, StreamHandler
5
6 from .settings import Settings
7
8 LoggerName = "mlserver"
9 LoggerFormat = "%(asctime)s [%(name)s] %(levelname)s - %(message)s"
10
11 logger = logging.getLogger(LoggerName)
12
13
14 def get_logger():
15 return logger
16
17
18 def configure_logger(settings: Settings = None):
19 logger = get_logger()
20
21 # Don't add handler twice
22 if not logger.handlers:
23 stream_handler = StreamHandler(sys.stdout)
24 formatter = Formatter(LoggerFormat)
25 stream_handler.setFormatter(formatter)
26
27 logger.addHandler(stream_handler)
28
29 logger.setLevel(logging.INFO)
30 if settings and settings.debug:
31 logger.setLevel(logging.DEBUG)
32
33 return logger
34
```
Path: `mlserver/rest/server.py`
Content:
```
1 import uvicorn
2
3 from ..settings import Settings
4 from ..handlers import DataPlane, ModelRepositoryHandlers, get_custom_handlers
5 from ..model import MLModel
6
7 from .utils import matches
8 from .app import create_app
9
10
11 class _NoSignalServer(uvicorn.Server):
12 def install_signal_handlers(self):
13 pass
14
15
16 class RESTServer:
17 def __init__(
18 self,
19 settings: Settings,
20 data_plane: DataPlane,
21 model_repository_handlers: ModelRepositoryHandlers,
22 ):
23 self._settings = settings
24 self._data_plane = data_plane
25 self._model_repository_handlers = model_repository_handlers
26 self._app = create_app(
27 self._settings,
28 data_plane=self._data_plane,
29 model_repository_handlers=self._model_repository_handlers,
30 )
31
32 async def add_custom_handlers(self, model: MLModel):
33 handlers = get_custom_handlers(model)
34 for custom_handler, handler_method in handlers:
35 self._app.add_api_route(
36 custom_handler.rest_path,
37 handler_method,
38 methods=[custom_handler.rest_method],
39 )
40
41 async def delete_custom_handlers(self, model: MLModel):
42 handlers = get_custom_handlers(model)
43 if len(handlers) == 0:
44 return
45
46 # NOTE: Loop in reverse, so that it's quicker to find all the recently
47 # added routes and we can remove routes on-the-fly
48 for i, route in reversed(list(enumerate(self._app.routes))):
49 for j, (custom_handler, handler_method) in enumerate(handlers):
50 if matches(route, custom_handler, handler_method): # type: ignore
51 self._app.routes.pop(i)
52 handlers.pop(j)
53
54 async def start(self):
55 cfg = uvicorn.Config(
56 self._app, host=self._settings.host, port=self._settings.http_port
57 )
58 self._server = _NoSignalServer(cfg)
59 await self._server.serve()
60
61 async def stop(self):
62 self._server.handle_exit(sig=None, frame=None)
63
```
Path: `mlserver/settings.py`
Content:
```
1 from typing import List, Optional
2 from pydantic import BaseSettings, PyObject
3
4 from .version import __version__
5 from .types import MetadataTensor
6
7 ENV_PREFIX_SETTINGS = "MLSERVER_"
8 ENV_PREFIX_MODEL_SETTINGS = "MLSERVER_MODEL_"
9
10
11 class CORSSettings(BaseSettings):
12 class Config:
13 env_prefix = ENV_PREFIX_SETTINGS
14
15 allow_origins: Optional[List[str]] = []
16 """
17 A list of origins that should be permitted to make
18 cross-origin requests. E.g. ['https://example.org', 'https://www.example.org'].
19 You can use ['*'] to allow any origin
20 """
21
22 allow_origin_regex: Optional[str] = None
23 """
24 A regex string to match against origins that
25 should be permitted to make cross-origin requests.
26 e.g. 'https:\\/\\/.*\\.example\\.org'
27 """
28
29 allow_credentials: Optional[bool] = False
30 """Indicate that cookies should be supported for cross-origin requests"""
31
32 allow_methods: Optional[List[str]] = ["GET"]
33 """A list of HTTP methods that should be allowed for cross-origin requests"""
34
35 allow_headers: Optional[List[str]] = []
36 """A list of HTTP request headers that should be supported for
37 cross-origin requests"""
38
39 expose_headers: Optional[List[str]] = []
40 """Indicate any response headers that should be made accessible to the browser"""
41
42 max_age: Optional[int] = 600
43 """Sets a maximum time in seconds for browsers to cache CORS responses"""
44
45
46 class Settings(BaseSettings):
47 class Config:
48 env_prefix = ENV_PREFIX_SETTINGS
49
50 debug: bool = True
51
52 # Model repository settings
53 model_repository_root: str = "."
54 """Root of the model repository, where we will search for models."""
55
56 load_models_at_startup: bool = True
57 """Flag to load all available models automatically at startup."""
58
59 # Server metadata
60 server_name: str = "mlserver"
61 """Name of the server."""
62
63 server_version: str = __version__
64 """Version of the server."""
65
66 extensions: List[str] = []
67 """Server extensions loaded."""
68
69 # Server settings
70 host: str = "0.0.0.0"
71 """Host where to listen for connections."""
72
73 http_port: int = 8080
74 """Port where to listen for HTTP / REST connections."""
75
76 grpc_port: int = 8081
77 """Port where to listen for gRPC connections."""
78
79 grpc_max_message_length: Optional[int] = None
80 """Maximum length (i.e. size) of gRPC payloads."""
81
82 # CORS settings
83 cors_settings: Optional[CORSSettings] = None
84
85 # Metrics settings
86 metrics_endpoint: Optional[str] = "/metrics"
87 """
88 Endpoint used to expose Prometheus metrics. Alternatively, can be set to
89 `None` to disable it
90 """
91
92
93 class ModelParameters(BaseSettings):
94 """
95 Parameters that apply only to a particular instance of a model.
96 This can include things like model weights, or arbitrary ``extra``
97 parameters particular to the underlying inference runtime.
98 The main difference with respect to ``ModelSettings`` is that parameters
99 can change on each instance (e.g. each version) of the model.
100 """
101
102 class Config:
103 env_prefix = ENV_PREFIX_MODEL_SETTINGS
104
105 uri: Optional[str] = None
106 """
107 URI where the model artifacts can be found.
108 This path must be either absolute or relative to where MLServer is running.
109 """
110
111 version: Optional[str] = None
112 """Version of the model."""
113
114 format: Optional[str] = None
115 """Format of the model (only available on certain runtimes)."""
116
117 content_type: Optional[str] = None
118 """Default content type to use for requests and responses."""
119
120 extra: Optional[dict] = {}
121 """Arbitrary settings, dependent on the inference runtime
122 implementation."""
123
124
125 class ModelSettings(BaseSettings):
126 class Config:
127 env_prefix = ENV_PREFIX_MODEL_SETTINGS
128 underscore_attrs_are_private = True
129
130 # Source points to the file where model settings were loaded from
131 _source: Optional[str] = None
132
133 name: str = ""
134 """Name of the model."""
135
136 # Model metadata
137 platform: str = ""
138 """Framework used to train and serialise the model (e.g. sklearn)."""
139
140 versions: List[str] = []
141 """Versions of dependencies used to train the model (e.g.
142 sklearn/0.20.1)."""
143
144 inputs: List[MetadataTensor] = []
145 """Metadata about the inputs accepted by the model."""
146
147 outputs: List[MetadataTensor] = []
148 """Metadata about the outputs returned by the model."""
149
150 # Parallel settings
151 parallel_workers: int = 4
152 """When parallel inference is enabled, number of workers to run inference
153 across."""
154
155 warm_workers: bool = False
156 """When parallel inference is enabled, optionally load model to all workers
157 on startup"""
158
159 # Adaptive Batching settings (disabled by default)
160 max_batch_size: int = 0
161 """When adaptive batching is enabled, maximum number of requests to group
162 together in a single batch."""
163
164 max_batch_time: float = 0.0
165 """When adaptive batching is enabled, maximum amount of time (in seconds)
166 to wait for enough requests to build a full batch."""
167
168 # Custom model class implementation
169 implementation: PyObject = "mlserver.model.MLModel" # type: ignore
170 """*Python path* to the inference runtime to use to serve this model (e.g.
171 ``mlserver_sklearn.SKLearnModel``)."""
172
173 # Model parameters are meant to be set directly by the MLServer runtime.
174 # However, it's also possible to override them manually.
175 parameters: Optional[ModelParameters] = None
176 """Extra parameters for each instance of this model."""
177
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mlserver/logging.py b/mlserver/logging.py
--- a/mlserver/logging.py
+++ b/mlserver/logging.py
@@ -2,6 +2,7 @@
import sys
from logging import Formatter, StreamHandler
+import logging.config
from .settings import Settings
@@ -15,6 +16,10 @@
return logger
+def apply_logging_file(logging_settings: str):
+ logging.config.fileConfig(fname=logging_settings, disable_existing_loggers=False)
+
+
def configure_logger(settings: Settings = None):
logger = get_logger()
@@ -30,4 +35,7 @@
if settings and settings.debug:
logger.setLevel(logging.DEBUG)
+ if settings and settings.logging_settings:
+ apply_logging_file(settings.logging_settings)
+
return logger
diff --git a/mlserver/rest/server.py b/mlserver/rest/server.py
--- a/mlserver/rest/server.py
+++ b/mlserver/rest/server.py
@@ -53,7 +53,10 @@
async def start(self):
cfg = uvicorn.Config(
- self._app, host=self._settings.host, port=self._settings.http_port
+ self._app,
+ host=self._settings.host,
+ port=self._settings.http_port,
+ log_config=self._settings.logging_settings,
)
self._server = _NoSignalServer(cfg)
await self._server.serve()
diff --git a/mlserver/settings.py b/mlserver/settings.py
--- a/mlserver/settings.py
+++ b/mlserver/settings.py
@@ -89,6 +89,10 @@
`None` to disable it
"""
+ # Logging settings
+ logging_settings: Optional[str] = None
+ """Path to logging config file"""
+
class ModelParameters(BaseSettings):
"""
| {"golden_diff": "diff --git a/mlserver/logging.py b/mlserver/logging.py\n--- a/mlserver/logging.py\n+++ b/mlserver/logging.py\n@@ -2,6 +2,7 @@\n import sys\n \n from logging import Formatter, StreamHandler\n+import logging.config\n \n from .settings import Settings\n \n@@ -15,6 +16,10 @@\n return logger\n \n \n+def apply_logging_file(logging_settings: str):\n+ logging.config.fileConfig(fname=logging_settings, disable_existing_loggers=False)\n+\n+\n def configure_logger(settings: Settings = None):\n logger = get_logger()\n \n@@ -30,4 +35,7 @@\n if settings and settings.debug:\n logger.setLevel(logging.DEBUG)\n \n+ if settings and settings.logging_settings:\n+ apply_logging_file(settings.logging_settings)\n+\n return logger\ndiff --git a/mlserver/rest/server.py b/mlserver/rest/server.py\n--- a/mlserver/rest/server.py\n+++ b/mlserver/rest/server.py\n@@ -53,7 +53,10 @@\n \n async def start(self):\n cfg = uvicorn.Config(\n- self._app, host=self._settings.host, port=self._settings.http_port\n+ self._app,\n+ host=self._settings.host,\n+ port=self._settings.http_port,\n+ log_config=self._settings.logging_settings,\n )\n self._server = _NoSignalServer(cfg)\n await self._server.serve()\ndiff --git a/mlserver/settings.py b/mlserver/settings.py\n--- a/mlserver/settings.py\n+++ b/mlserver/settings.py\n@@ -89,6 +89,10 @@\n `None` to disable it\n \"\"\"\n \n+ # Logging settings\n+ logging_settings: Optional[str] = None\n+ \"\"\"Path to logging config file\"\"\"\n+\n \n class ModelParameters(BaseSettings):\n \"\"\"\n", "issue": "Allow to configure uvicorn logging\nTrying to add custom handlers to the application logger we have found that the uvicorn ones are not working as expected. It seems that uvicorn overrides the logging configuration when the Config object is initialized.\r\n\r\nIt would be nice to allow users to configure the uvicorn logger. This can be done by setting the path to a logging configuration file when creating uvicorn's Config object.\n", "before_files": [{"content": "import logging\nimport sys\n\nfrom logging import Formatter, StreamHandler\n\nfrom .settings import Settings\n\nLoggerName = \"mlserver\"\nLoggerFormat = \"%(asctime)s [%(name)s] %(levelname)s - %(message)s\"\n\nlogger = logging.getLogger(LoggerName)\n\n\ndef get_logger():\n return logger\n\n\ndef configure_logger(settings: Settings = None):\n logger = get_logger()\n\n # Don't add handler twice\n if not logger.handlers:\n stream_handler = StreamHandler(sys.stdout)\n formatter = Formatter(LoggerFormat)\n stream_handler.setFormatter(formatter)\n\n logger.addHandler(stream_handler)\n\n logger.setLevel(logging.INFO)\n if settings and settings.debug:\n logger.setLevel(logging.DEBUG)\n\n return logger\n", "path": "mlserver/logging.py"}, {"content": "import uvicorn\n\nfrom ..settings import Settings\nfrom ..handlers import DataPlane, ModelRepositoryHandlers, get_custom_handlers\nfrom ..model import MLModel\n\nfrom .utils import matches\nfrom .app import create_app\n\n\nclass _NoSignalServer(uvicorn.Server):\n def install_signal_handlers(self):\n pass\n\n\nclass RESTServer:\n def __init__(\n self,\n settings: Settings,\n data_plane: DataPlane,\n model_repository_handlers: ModelRepositoryHandlers,\n ):\n self._settings = settings\n self._data_plane = data_plane\n self._model_repository_handlers = model_repository_handlers\n self._app = create_app(\n self._settings,\n data_plane=self._data_plane,\n model_repository_handlers=self._model_repository_handlers,\n )\n\n async def add_custom_handlers(self, model: MLModel):\n handlers = get_custom_handlers(model)\n for custom_handler, handler_method in handlers:\n self._app.add_api_route(\n custom_handler.rest_path,\n handler_method,\n methods=[custom_handler.rest_method],\n )\n\n async def delete_custom_handlers(self, model: MLModel):\n handlers = get_custom_handlers(model)\n if len(handlers) == 0:\n return\n\n # NOTE: Loop in reverse, so that it's quicker to find all the recently\n # added routes and we can remove routes on-the-fly\n for i, route in reversed(list(enumerate(self._app.routes))):\n for j, (custom_handler, handler_method) in enumerate(handlers):\n if matches(route, custom_handler, handler_method): # type: ignore\n self._app.routes.pop(i)\n handlers.pop(j)\n\n async def start(self):\n cfg = uvicorn.Config(\n self._app, host=self._settings.host, port=self._settings.http_port\n )\n self._server = _NoSignalServer(cfg)\n await self._server.serve()\n\n async def stop(self):\n self._server.handle_exit(sig=None, frame=None)\n", "path": "mlserver/rest/server.py"}, {"content": "from typing import List, Optional\nfrom pydantic import BaseSettings, PyObject\n\nfrom .version import __version__\nfrom .types import MetadataTensor\n\nENV_PREFIX_SETTINGS = \"MLSERVER_\"\nENV_PREFIX_MODEL_SETTINGS = \"MLSERVER_MODEL_\"\n\n\nclass CORSSettings(BaseSettings):\n class Config:\n env_prefix = ENV_PREFIX_SETTINGS\n\n allow_origins: Optional[List[str]] = []\n \"\"\"\n A list of origins that should be permitted to make\n cross-origin requests. E.g. ['https://example.org', 'https://www.example.org'].\n You can use ['*'] to allow any origin\n \"\"\"\n\n allow_origin_regex: Optional[str] = None\n \"\"\"\n A regex string to match against origins that\n should be permitted to make cross-origin requests.\n e.g. 'https:\\\\/\\\\/.*\\\\.example\\\\.org'\n \"\"\"\n\n allow_credentials: Optional[bool] = False\n \"\"\"Indicate that cookies should be supported for cross-origin requests\"\"\"\n\n allow_methods: Optional[List[str]] = [\"GET\"]\n \"\"\"A list of HTTP methods that should be allowed for cross-origin requests\"\"\"\n\n allow_headers: Optional[List[str]] = []\n \"\"\"A list of HTTP request headers that should be supported for\n cross-origin requests\"\"\"\n\n expose_headers: Optional[List[str]] = []\n \"\"\"Indicate any response headers that should be made accessible to the browser\"\"\"\n\n max_age: Optional[int] = 600\n \"\"\"Sets a maximum time in seconds for browsers to cache CORS responses\"\"\"\n\n\nclass Settings(BaseSettings):\n class Config:\n env_prefix = ENV_PREFIX_SETTINGS\n\n debug: bool = True\n\n # Model repository settings\n model_repository_root: str = \".\"\n \"\"\"Root of the model repository, where we will search for models.\"\"\"\n\n load_models_at_startup: bool = True\n \"\"\"Flag to load all available models automatically at startup.\"\"\"\n\n # Server metadata\n server_name: str = \"mlserver\"\n \"\"\"Name of the server.\"\"\"\n\n server_version: str = __version__\n \"\"\"Version of the server.\"\"\"\n\n extensions: List[str] = []\n \"\"\"Server extensions loaded.\"\"\"\n\n # Server settings\n host: str = \"0.0.0.0\"\n \"\"\"Host where to listen for connections.\"\"\"\n\n http_port: int = 8080\n \"\"\"Port where to listen for HTTP / REST connections.\"\"\"\n\n grpc_port: int = 8081\n \"\"\"Port where to listen for gRPC connections.\"\"\"\n\n grpc_max_message_length: Optional[int] = None\n \"\"\"Maximum length (i.e. size) of gRPC payloads.\"\"\"\n\n # CORS settings\n cors_settings: Optional[CORSSettings] = None\n\n # Metrics settings\n metrics_endpoint: Optional[str] = \"/metrics\"\n \"\"\"\n Endpoint used to expose Prometheus metrics. Alternatively, can be set to\n `None` to disable it\n \"\"\"\n\n\nclass ModelParameters(BaseSettings):\n \"\"\"\n Parameters that apply only to a particular instance of a model.\n This can include things like model weights, or arbitrary ``extra``\n parameters particular to the underlying inference runtime.\n The main difference with respect to ``ModelSettings`` is that parameters\n can change on each instance (e.g. each version) of the model.\n \"\"\"\n\n class Config:\n env_prefix = ENV_PREFIX_MODEL_SETTINGS\n\n uri: Optional[str] = None\n \"\"\"\n URI where the model artifacts can be found.\n This path must be either absolute or relative to where MLServer is running.\n \"\"\"\n\n version: Optional[str] = None\n \"\"\"Version of the model.\"\"\"\n\n format: Optional[str] = None\n \"\"\"Format of the model (only available on certain runtimes).\"\"\"\n\n content_type: Optional[str] = None\n \"\"\"Default content type to use for requests and responses.\"\"\"\n\n extra: Optional[dict] = {}\n \"\"\"Arbitrary settings, dependent on the inference runtime\n implementation.\"\"\"\n\n\nclass ModelSettings(BaseSettings):\n class Config:\n env_prefix = ENV_PREFIX_MODEL_SETTINGS\n underscore_attrs_are_private = True\n\n # Source points to the file where model settings were loaded from\n _source: Optional[str] = None\n\n name: str = \"\"\n \"\"\"Name of the model.\"\"\"\n\n # Model metadata\n platform: str = \"\"\n \"\"\"Framework used to train and serialise the model (e.g. sklearn).\"\"\"\n\n versions: List[str] = []\n \"\"\"Versions of dependencies used to train the model (e.g.\n sklearn/0.20.1).\"\"\"\n\n inputs: List[MetadataTensor] = []\n \"\"\"Metadata about the inputs accepted by the model.\"\"\"\n\n outputs: List[MetadataTensor] = []\n \"\"\"Metadata about the outputs returned by the model.\"\"\"\n\n # Parallel settings\n parallel_workers: int = 4\n \"\"\"When parallel inference is enabled, number of workers to run inference\n across.\"\"\"\n\n warm_workers: bool = False\n \"\"\"When parallel inference is enabled, optionally load model to all workers\n on startup\"\"\"\n\n # Adaptive Batching settings (disabled by default)\n max_batch_size: int = 0\n \"\"\"When adaptive batching is enabled, maximum number of requests to group\n together in a single batch.\"\"\"\n\n max_batch_time: float = 0.0\n \"\"\"When adaptive batching is enabled, maximum amount of time (in seconds)\n to wait for enough requests to build a full batch.\"\"\"\n\n # Custom model class implementation\n implementation: PyObject = \"mlserver.model.MLModel\" # type: ignore\n \"\"\"*Python path* to the inference runtime to use to serve this model (e.g.\n ``mlserver_sklearn.SKLearnModel``).\"\"\"\n\n # Model parameters are meant to be set directly by the MLServer runtime.\n # However, it's also possible to override them manually.\n parameters: Optional[ModelParameters] = None\n \"\"\"Extra parameters for each instance of this model.\"\"\"\n", "path": "mlserver/settings.py"}], "after_files": [{"content": "import logging\nimport sys\n\nfrom logging import Formatter, StreamHandler\nimport logging.config\n\nfrom .settings import Settings\n\nLoggerName = \"mlserver\"\nLoggerFormat = \"%(asctime)s [%(name)s] %(levelname)s - %(message)s\"\n\nlogger = logging.getLogger(LoggerName)\n\n\ndef get_logger():\n return logger\n\n\ndef apply_logging_file(logging_settings: str):\n logging.config.fileConfig(fname=logging_settings, disable_existing_loggers=False)\n\n\ndef configure_logger(settings: Settings = None):\n logger = get_logger()\n\n # Don't add handler twice\n if not logger.handlers:\n stream_handler = StreamHandler(sys.stdout)\n formatter = Formatter(LoggerFormat)\n stream_handler.setFormatter(formatter)\n\n logger.addHandler(stream_handler)\n\n logger.setLevel(logging.INFO)\n if settings and settings.debug:\n logger.setLevel(logging.DEBUG)\n\n if settings and settings.logging_settings:\n apply_logging_file(settings.logging_settings)\n\n return logger\n", "path": "mlserver/logging.py"}, {"content": "import uvicorn\n\nfrom ..settings import Settings\nfrom ..handlers import DataPlane, ModelRepositoryHandlers, get_custom_handlers\nfrom ..model import MLModel\n\nfrom .utils import matches\nfrom .app import create_app\n\n\nclass _NoSignalServer(uvicorn.Server):\n def install_signal_handlers(self):\n pass\n\n\nclass RESTServer:\n def __init__(\n self,\n settings: Settings,\n data_plane: DataPlane,\n model_repository_handlers: ModelRepositoryHandlers,\n ):\n self._settings = settings\n self._data_plane = data_plane\n self._model_repository_handlers = model_repository_handlers\n self._app = create_app(\n self._settings,\n data_plane=self._data_plane,\n model_repository_handlers=self._model_repository_handlers,\n )\n\n async def add_custom_handlers(self, model: MLModel):\n handlers = get_custom_handlers(model)\n for custom_handler, handler_method in handlers:\n self._app.add_api_route(\n custom_handler.rest_path,\n handler_method,\n methods=[custom_handler.rest_method],\n )\n\n async def delete_custom_handlers(self, model: MLModel):\n handlers = get_custom_handlers(model)\n if len(handlers) == 0:\n return\n\n # NOTE: Loop in reverse, so that it's quicker to find all the recently\n # added routes and we can remove routes on-the-fly\n for i, route in reversed(list(enumerate(self._app.routes))):\n for j, (custom_handler, handler_method) in enumerate(handlers):\n if matches(route, custom_handler, handler_method): # type: ignore\n self._app.routes.pop(i)\n handlers.pop(j)\n\n async def start(self):\n cfg = uvicorn.Config(\n self._app,\n host=self._settings.host,\n port=self._settings.http_port,\n log_config=self._settings.logging_settings,\n )\n self._server = _NoSignalServer(cfg)\n await self._server.serve()\n\n async def stop(self):\n self._server.handle_exit(sig=None, frame=None)\n", "path": "mlserver/rest/server.py"}, {"content": "from typing import List, Optional\nfrom pydantic import BaseSettings, PyObject\n\nfrom .version import __version__\nfrom .types import MetadataTensor\n\nENV_PREFIX_SETTINGS = \"MLSERVER_\"\nENV_PREFIX_MODEL_SETTINGS = \"MLSERVER_MODEL_\"\n\n\nclass CORSSettings(BaseSettings):\n class Config:\n env_prefix = ENV_PREFIX_SETTINGS\n\n allow_origins: Optional[List[str]] = []\n \"\"\"\n A list of origins that should be permitted to make\n cross-origin requests. E.g. ['https://example.org', 'https://www.example.org'].\n You can use ['*'] to allow any origin\n \"\"\"\n\n allow_origin_regex: Optional[str] = None\n \"\"\"\n A regex string to match against origins that\n should be permitted to make cross-origin requests.\n e.g. 'https:\\\\/\\\\/.*\\\\.example\\\\.org'\n \"\"\"\n\n allow_credentials: Optional[bool] = False\n \"\"\"Indicate that cookies should be supported for cross-origin requests\"\"\"\n\n allow_methods: Optional[List[str]] = [\"GET\"]\n \"\"\"A list of HTTP methods that should be allowed for cross-origin requests\"\"\"\n\n allow_headers: Optional[List[str]] = []\n \"\"\"A list of HTTP request headers that should be supported for\n cross-origin requests\"\"\"\n\n expose_headers: Optional[List[str]] = []\n \"\"\"Indicate any response headers that should be made accessible to the browser\"\"\"\n\n max_age: Optional[int] = 600\n \"\"\"Sets a maximum time in seconds for browsers to cache CORS responses\"\"\"\n\n\nclass Settings(BaseSettings):\n class Config:\n env_prefix = ENV_PREFIX_SETTINGS\n\n debug: bool = True\n\n # Model repository settings\n model_repository_root: str = \".\"\n \"\"\"Root of the model repository, where we will search for models.\"\"\"\n\n load_models_at_startup: bool = True\n \"\"\"Flag to load all available models automatically at startup.\"\"\"\n\n # Server metadata\n server_name: str = \"mlserver\"\n \"\"\"Name of the server.\"\"\"\n\n server_version: str = __version__\n \"\"\"Version of the server.\"\"\"\n\n extensions: List[str] = []\n \"\"\"Server extensions loaded.\"\"\"\n\n # Server settings\n host: str = \"0.0.0.0\"\n \"\"\"Host where to listen for connections.\"\"\"\n\n http_port: int = 8080\n \"\"\"Port where to listen for HTTP / REST connections.\"\"\"\n\n grpc_port: int = 8081\n \"\"\"Port where to listen for gRPC connections.\"\"\"\n\n grpc_max_message_length: Optional[int] = None\n \"\"\"Maximum length (i.e. size) of gRPC payloads.\"\"\"\n\n # CORS settings\n cors_settings: Optional[CORSSettings] = None\n\n # Metrics settings\n metrics_endpoint: Optional[str] = \"/metrics\"\n \"\"\"\n Endpoint used to expose Prometheus metrics. Alternatively, can be set to\n `None` to disable it\n \"\"\"\n\n # Logging settings\n logging_settings: Optional[str] = None\n \"\"\"Path to logging config file\"\"\"\n\n\nclass ModelParameters(BaseSettings):\n \"\"\"\n Parameters that apply only to a particular instance of a model.\n This can include things like model weights, or arbitrary ``extra``\n parameters particular to the underlying inference runtime.\n The main difference with respect to ``ModelSettings`` is that parameters\n can change on each instance (e.g. each version) of the model.\n \"\"\"\n\n class Config:\n env_prefix = ENV_PREFIX_MODEL_SETTINGS\n\n uri: Optional[str] = None\n \"\"\"\n URI where the model artifacts can be found.\n This path must be either absolute or relative to where MLServer is running.\n \"\"\"\n\n version: Optional[str] = None\n \"\"\"Version of the model.\"\"\"\n\n format: Optional[str] = None\n \"\"\"Format of the model (only available on certain runtimes).\"\"\"\n\n content_type: Optional[str] = None\n \"\"\"Default content type to use for requests and responses.\"\"\"\n\n extra: Optional[dict] = {}\n \"\"\"Arbitrary settings, dependent on the inference runtime\n implementation.\"\"\"\n\n\nclass ModelSettings(BaseSettings):\n class Config:\n env_prefix = ENV_PREFIX_MODEL_SETTINGS\n underscore_attrs_are_private = True\n\n # Source points to the file where model settings were loaded from\n _source: Optional[str] = None\n\n name: str = \"\"\n \"\"\"Name of the model.\"\"\"\n\n # Model metadata\n platform: str = \"\"\n \"\"\"Framework used to train and serialise the model (e.g. sklearn).\"\"\"\n\n versions: List[str] = []\n \"\"\"Versions of dependencies used to train the model (e.g.\n sklearn/0.20.1).\"\"\"\n\n inputs: List[MetadataTensor] = []\n \"\"\"Metadata about the inputs accepted by the model.\"\"\"\n\n outputs: List[MetadataTensor] = []\n \"\"\"Metadata about the outputs returned by the model.\"\"\"\n\n # Parallel settings\n parallel_workers: int = 4\n \"\"\"When parallel inference is enabled, number of workers to run inference\n across.\"\"\"\n\n warm_workers: bool = False\n \"\"\"When parallel inference is enabled, optionally load model to all workers\n on startup\"\"\"\n\n # Adaptive Batching settings (disabled by default)\n max_batch_size: int = 0\n \"\"\"When adaptive batching is enabled, maximum number of requests to group\n together in a single batch.\"\"\"\n\n max_batch_time: float = 0.0\n \"\"\"When adaptive batching is enabled, maximum amount of time (in seconds)\n to wait for enough requests to build a full batch.\"\"\"\n\n # Custom model class implementation\n implementation: PyObject = \"mlserver.model.MLModel\" # type: ignore\n \"\"\"*Python path* to the inference runtime to use to serve this model (e.g.\n ``mlserver_sklearn.SKLearnModel``).\"\"\"\n\n # Model parameters are meant to be set directly by the MLServer runtime.\n # However, it's also possible to override them manually.\n parameters: Optional[ModelParameters] = None\n \"\"\"Extra parameters for each instance of this model.\"\"\"\n", "path": "mlserver/settings.py"}]} | 2,841 | 391 |
gh_patches_debug_652 | rasdani/github-patches | git_diff | pex-tool__pex-2086 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 2.1.127
On the docket:
+ [x] Pex fails to subset a "foo @ file:///bar" URL lock. #2083
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.126"
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.126"
+__version__ = "2.1.127"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.126\"\n+__version__ = \"2.1.127\"\n", "issue": "Release 2.1.127\nOn the docket:\r\n+ [x] Pex fails to subset a \"foo @ file:///bar\" URL lock. #2083\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.126\"\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.127\"\n", "path": "pex/version.py"}]} | 349 | 98 |
gh_patches_debug_37088 | rasdani/github-patches | git_diff | pre-commit__pre-commit-321 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Does not work within submodules
I'm getting:
```
An unexpected error has occurred: NotADirectoryError: [Errno 20] Not a directory: '/home/quentin/chef-repo/cookbooks/ssmtp-cookbook/.git/hooks/pre-commit'
```
chef-repo is my primary repository and ssmtp-cookbook a git submodule of that.
**ssmtp-cookbook/.git file contents:**
```
gitdir: ../../.git/modules/cookbooks/ssmtp-cookbook
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/git.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import functools
4 import logging
5 import os
6 import os.path
7 import re
8
9 from pre_commit.errors import FatalError
10 from pre_commit.util import CalledProcessError
11 from pre_commit.util import cmd_output
12 from pre_commit.util import memoize_by_cwd
13
14
15 logger = logging.getLogger('pre_commit')
16
17
18 def get_root():
19 try:
20 return cmd_output('git', 'rev-parse', '--show-toplevel')[1].strip()
21 except CalledProcessError:
22 raise FatalError(
23 'Called from outside of the gits. Please cd to a git repository.'
24 )
25
26
27 def is_in_merge_conflict():
28 return (
29 os.path.exists(os.path.join('.git', 'MERGE_MSG')) and
30 os.path.exists(os.path.join('.git', 'MERGE_HEAD'))
31 )
32
33
34 def parse_merge_msg_for_conflicts(merge_msg):
35 # Conflicted files start with tabs
36 return [
37 line.lstrip('#').strip()
38 for line in merge_msg.splitlines()
39 # '#\t' for git 2.4.1
40 if line.startswith(('\t', '#\t'))
41 ]
42
43
44 @memoize_by_cwd
45 def get_conflicted_files():
46 logger.info('Checking merge-conflict files only.')
47 # Need to get the conflicted files from the MERGE_MSG because they could
48 # have resolved the conflict by choosing one side or the other
49 merge_msg = open(os.path.join('.git', 'MERGE_MSG')).read()
50 merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)
51
52 # This will get the rest of the changes made after the merge.
53 # If they resolved the merge conflict by choosing a mesh of both sides
54 # this will also include the conflicted files
55 tree_hash = cmd_output('git', 'write-tree')[1].strip()
56 merge_diff_filenames = cmd_output(
57 'git', 'diff', '-m', tree_hash, 'HEAD', 'MERGE_HEAD', '--name-only',
58 )[1].splitlines()
59 return set(merge_conflict_filenames) | set(merge_diff_filenames)
60
61
62 @memoize_by_cwd
63 def get_staged_files():
64 return cmd_output('git', 'diff', '--staged', '--name-only')[1].splitlines()
65
66
67 @memoize_by_cwd
68 def get_all_files():
69 return cmd_output('git', 'ls-files')[1].splitlines()
70
71
72 def get_files_matching(all_file_list_strategy):
73 @functools.wraps(all_file_list_strategy)
74 @memoize_by_cwd
75 def wrapper(include_expr, exclude_expr):
76 include_regex = re.compile(include_expr)
77 exclude_regex = re.compile(exclude_expr)
78 return set(
79 filename
80 for filename in all_file_list_strategy()
81 if (
82 include_regex.search(filename) and
83 not exclude_regex.search(filename) and
84 os.path.exists(filename)
85 )
86 )
87 return wrapper
88
89
90 get_staged_files_matching = get_files_matching(get_staged_files)
91 get_all_files_matching = get_files_matching(get_all_files)
92 get_conflicted_files_matching = get_files_matching(get_conflicted_files)
93
```
Path: `pre_commit/runner.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import os
4 import os.path
5
6 from cached_property import cached_property
7
8 import pre_commit.constants as C
9 from pre_commit import git
10 from pre_commit.clientlib.validate_config import load_config
11 from pre_commit.repository import Repository
12 from pre_commit.store import Store
13
14
15 class Runner(object):
16 """A `Runner` represents the execution context of the hooks. Notably the
17 repository under test.
18 """
19
20 def __init__(self, git_root):
21 self.git_root = git_root
22
23 @classmethod
24 def create(cls):
25 """Creates a PreCommitRunner by doing the following:
26 - Finds the root of the current git repository
27 - chdirs to that directory
28 """
29 root = git.get_root()
30 os.chdir(root)
31 return cls(root)
32
33 @cached_property
34 def config_file_path(self):
35 return os.path.join(self.git_root, C.CONFIG_FILE)
36
37 @cached_property
38 def repositories(self):
39 """Returns a tuple of the configured repositories."""
40 config = load_config(self.config_file_path)
41 repositories = tuple(Repository.create(x, self.store) for x in config)
42 for repository in repositories:
43 repository.require_installed()
44 return repositories
45
46 def get_hook_path(self, hook_type):
47 return os.path.join(self.git_root, '.git', 'hooks', hook_type)
48
49 @cached_property
50 def pre_commit_path(self):
51 return self.get_hook_path('pre-commit')
52
53 @cached_property
54 def pre_push_path(self):
55 return self.get_hook_path('pre-push')
56
57 @cached_property
58 def cmd_runner(self):
59 # TODO: remove this and inline runner.store.cmd_runner
60 return self.store.cmd_runner
61
62 @cached_property
63 def store(self):
64 return Store()
65
```
Path: `pre_commit/main.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import argparse
4 import os
5 import sys
6
7 import pkg_resources
8
9 from pre_commit import color
10 from pre_commit import five
11 from pre_commit.commands.autoupdate import autoupdate
12 from pre_commit.commands.clean import clean
13 from pre_commit.commands.install_uninstall import install
14 from pre_commit.commands.install_uninstall import uninstall
15 from pre_commit.commands.run import run
16 from pre_commit.error_handler import error_handler
17 from pre_commit.runner import Runner
18
19
20 # https://github.com/pre-commit/pre-commit/issues/217
21 # On OSX, making a virtualenv using pyvenv at . causes `virtualenv` and `pip`
22 # to install packages to the wrong place. We don't want anything to deal with
23 # pyvenv
24 os.environ.pop('__PYVENV_LAUNCHER__', None)
25 # https://github.com/pre-commit/pre-commit/issues/300
26 # In git 2.6.3 (maybe others), git exports this while running pre-commit hooks
27 os.environ.pop('GIT_WORK_TREE', None)
28
29
30 def main(argv=None):
31 argv = argv if argv is not None else sys.argv[1:]
32 argv = [five.to_text(arg) for arg in argv]
33 parser = argparse.ArgumentParser()
34
35 # http://stackoverflow.com/a/8521644/812183
36 parser.add_argument(
37 '-V', '--version',
38 action='version',
39 version='%(prog)s {0}'.format(
40 pkg_resources.get_distribution('pre-commit').version
41 )
42 )
43
44 subparsers = parser.add_subparsers(dest='command')
45
46 install_parser = subparsers.add_parser(
47 'install', help='Install the pre-commit script.',
48 )
49 install_parser.add_argument(
50 '-f', '--overwrite', action='store_true',
51 help='Overwrite existing hooks / remove migration mode.',
52 )
53 install_parser.add_argument(
54 '--install-hooks', action='store_true',
55 help=(
56 'Whether to install hook environments for all environments '
57 'in the config file.'
58 ),
59 )
60 install_parser.add_argument(
61 '-t', '--hook-type', choices=('pre-commit', 'pre-push'),
62 default='pre-commit',
63 )
64
65 uninstall_parser = subparsers.add_parser(
66 'uninstall', help='Uninstall the pre-commit script.',
67 )
68 uninstall_parser.add_argument(
69 '-t', '--hook-type', choices=('pre-commit', 'pre-push'),
70 default='pre-commit',
71 )
72
73 subparsers.add_parser('clean', help='Clean out pre-commit files.')
74
75 subparsers.add_parser(
76 'autoupdate',
77 help="Auto-update pre-commit config to the latest repos' versions.",
78 )
79
80 run_parser = subparsers.add_parser('run', help='Run hooks.')
81 run_parser.add_argument('hook', nargs='?', help='A single hook-id to run')
82 run_parser.add_argument(
83 '--color', default='auto', type=color.use_color,
84 help='Whether to use color in output. Defaults to `auto`',
85 )
86 run_parser.add_argument(
87 '--no-stash', default=False, action='store_true',
88 help='Use this option to prevent auto stashing of unstaged files.',
89 )
90 run_parser.add_argument(
91 '--verbose', '-v', action='store_true', default=False,
92 )
93 run_parser.add_argument(
94 '--origin', '-o',
95 help='The origin branch\'s commit_id when using `git push`',
96 )
97 run_parser.add_argument(
98 '--source', '-s',
99 help='The remote branch\'s commit_id when using `git push`',
100 )
101 run_parser.add_argument(
102 '--allow-unstaged-config', default=False, action='store_true',
103 help='Allow an unstaged config to be present. Note that this will'
104 'be stashed before parsing unless --no-stash is specified'
105 )
106 run_parser.add_argument(
107 '--hook-stage', choices=('commit', 'push'), default='commit',
108 help='The stage during which the hook is fired e.g. commit or push',
109 )
110 run_mutex_group = run_parser.add_mutually_exclusive_group(required=False)
111 run_mutex_group.add_argument(
112 '--all-files', '-a', action='store_true', default=False,
113 help='Run on all the files in the repo. Implies --no-stash.',
114 )
115 run_mutex_group.add_argument(
116 '--files', nargs='*', help='Specific filenames to run hooks on.',
117 )
118
119 help = subparsers.add_parser(
120 'help', help='Show help for a specific command.'
121 )
122 help.add_argument('help_cmd', nargs='?', help='Command to show help for.')
123
124 # Argparse doesn't really provide a way to use a `default` subparser
125 if len(argv) == 0:
126 argv = ['run']
127 args = parser.parse_args(argv)
128
129 if args.command == 'help':
130 if args.help_cmd:
131 parser.parse_args([args.help_cmd, '--help'])
132 else:
133 parser.parse_args(['--help'])
134
135 with error_handler():
136 runner = Runner.create()
137
138 if args.command == 'install':
139 return install(
140 runner, overwrite=args.overwrite, hooks=args.install_hooks,
141 hook_type=args.hook_type,
142 )
143 elif args.command == 'uninstall':
144 return uninstall(runner, hook_type=args.hook_type)
145 elif args.command == 'clean':
146 return clean(runner)
147 elif args.command == 'autoupdate':
148 return autoupdate(runner)
149 elif args.command == 'run':
150 return run(runner, args)
151 else:
152 raise NotImplementedError(
153 'Command {0} not implemented.'.format(args.command)
154 )
155
156 raise AssertionError(
157 'Command {0} failed to exit with a returncode'.format(args.command)
158 )
159
160
161 if __name__ == '__main__':
162 exit(main())
163
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pre_commit/git.py b/pre_commit/git.py
--- a/pre_commit/git.py
+++ b/pre_commit/git.py
@@ -24,10 +24,18 @@
)
+def get_git_dir(git_root):
+ return os.path.normpath(os.path.join(
+ git_root,
+ cmd_output('git', 'rev-parse', '--git-dir', cwd=git_root)[1].strip(),
+ ))
+
+
def is_in_merge_conflict():
+ git_dir = get_git_dir('.')
return (
- os.path.exists(os.path.join('.git', 'MERGE_MSG')) and
- os.path.exists(os.path.join('.git', 'MERGE_HEAD'))
+ os.path.exists(os.path.join(git_dir, 'MERGE_MSG')) and
+ os.path.exists(os.path.join(git_dir, 'MERGE_HEAD'))
)
@@ -46,7 +54,7 @@
logger.info('Checking merge-conflict files only.')
# Need to get the conflicted files from the MERGE_MSG because they could
# have resolved the conflict by choosing one side or the other
- merge_msg = open(os.path.join('.git', 'MERGE_MSG')).read()
+ merge_msg = open(os.path.join(get_git_dir('.'), 'MERGE_MSG')).read()
merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)
# This will get the rest of the changes made after the merge.
diff --git a/pre_commit/main.py b/pre_commit/main.py
--- a/pre_commit/main.py
+++ b/pre_commit/main.py
@@ -25,6 +25,13 @@
# https://github.com/pre-commit/pre-commit/issues/300
# In git 2.6.3 (maybe others), git exports this while running pre-commit hooks
os.environ.pop('GIT_WORK_TREE', None)
+# In git 1.9.1 (maybe others), git exports these while running pre-commit hooks
+# in submodules. In the general case this causes problems.
+# These are covered by test_install_in_submodule_and_run
+# Causes git clone to clone wrong thing
+os.environ.pop('GIT_DIR', None)
+# Causes 'error invalid object ...' during commit
+os.environ.pop('GIT_INDEX_FILE', None)
def main(argv=None):
diff --git a/pre_commit/runner.py b/pre_commit/runner.py
--- a/pre_commit/runner.py
+++ b/pre_commit/runner.py
@@ -30,6 +30,10 @@
os.chdir(root)
return cls(root)
+ @cached_property
+ def git_dir(self):
+ return git.get_git_dir(self.git_root)
+
@cached_property
def config_file_path(self):
return os.path.join(self.git_root, C.CONFIG_FILE)
@@ -44,7 +48,7 @@
return repositories
def get_hook_path(self, hook_type):
- return os.path.join(self.git_root, '.git', 'hooks', hook_type)
+ return os.path.join(self.git_dir, 'hooks', hook_type)
@cached_property
def pre_commit_path(self):
| {"golden_diff": "diff --git a/pre_commit/git.py b/pre_commit/git.py\n--- a/pre_commit/git.py\n+++ b/pre_commit/git.py\n@@ -24,10 +24,18 @@\n )\n \n \n+def get_git_dir(git_root):\n+ return os.path.normpath(os.path.join(\n+ git_root,\n+ cmd_output('git', 'rev-parse', '--git-dir', cwd=git_root)[1].strip(),\n+ ))\n+\n+\n def is_in_merge_conflict():\n+ git_dir = get_git_dir('.')\n return (\n- os.path.exists(os.path.join('.git', 'MERGE_MSG')) and\n- os.path.exists(os.path.join('.git', 'MERGE_HEAD'))\n+ os.path.exists(os.path.join(git_dir, 'MERGE_MSG')) and\n+ os.path.exists(os.path.join(git_dir, 'MERGE_HEAD'))\n )\n \n \n@@ -46,7 +54,7 @@\n logger.info('Checking merge-conflict files only.')\n # Need to get the conflicted files from the MERGE_MSG because they could\n # have resolved the conflict by choosing one side or the other\n- merge_msg = open(os.path.join('.git', 'MERGE_MSG')).read()\n+ merge_msg = open(os.path.join(get_git_dir('.'), 'MERGE_MSG')).read()\n merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)\n \n # This will get the rest of the changes made after the merge.\ndiff --git a/pre_commit/main.py b/pre_commit/main.py\n--- a/pre_commit/main.py\n+++ b/pre_commit/main.py\n@@ -25,6 +25,13 @@\n # https://github.com/pre-commit/pre-commit/issues/300\n # In git 2.6.3 (maybe others), git exports this while running pre-commit hooks\n os.environ.pop('GIT_WORK_TREE', None)\n+# In git 1.9.1 (maybe others), git exports these while running pre-commit hooks\n+# in submodules. In the general case this causes problems.\n+# These are covered by test_install_in_submodule_and_run\n+# Causes git clone to clone wrong thing\n+os.environ.pop('GIT_DIR', None)\n+# Causes 'error invalid object ...' during commit\n+os.environ.pop('GIT_INDEX_FILE', None)\n \n \n def main(argv=None):\ndiff --git a/pre_commit/runner.py b/pre_commit/runner.py\n--- a/pre_commit/runner.py\n+++ b/pre_commit/runner.py\n@@ -30,6 +30,10 @@\n os.chdir(root)\n return cls(root)\n \n+ @cached_property\n+ def git_dir(self):\n+ return git.get_git_dir(self.git_root)\n+\n @cached_property\n def config_file_path(self):\n return os.path.join(self.git_root, C.CONFIG_FILE)\n@@ -44,7 +48,7 @@\n return repositories\n \n def get_hook_path(self, hook_type):\n- return os.path.join(self.git_root, '.git', 'hooks', hook_type)\n+ return os.path.join(self.git_dir, 'hooks', hook_type)\n \n @cached_property\n def pre_commit_path(self):\n", "issue": "Does not work within submodules\nI'm getting: \n\n```\nAn unexpected error has occurred: NotADirectoryError: [Errno 20] Not a directory: '/home/quentin/chef-repo/cookbooks/ssmtp-cookbook/.git/hooks/pre-commit'\n```\n\nchef-repo is my primary repository and ssmtp-cookbook a git submodule of that. \n\n**ssmtp-cookbook/.git file contents:**\n\n```\ngitdir: ../../.git/modules/cookbooks/ssmtp-cookbook\n```\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport functools\nimport logging\nimport os\nimport os.path\nimport re\n\nfrom pre_commit.errors import FatalError\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import memoize_by_cwd\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef get_root():\n try:\n return cmd_output('git', 'rev-parse', '--show-toplevel')[1].strip()\n except CalledProcessError:\n raise FatalError(\n 'Called from outside of the gits. Please cd to a git repository.'\n )\n\n\ndef is_in_merge_conflict():\n return (\n os.path.exists(os.path.join('.git', 'MERGE_MSG')) and\n os.path.exists(os.path.join('.git', 'MERGE_HEAD'))\n )\n\n\ndef parse_merge_msg_for_conflicts(merge_msg):\n # Conflicted files start with tabs\n return [\n line.lstrip('#').strip()\n for line in merge_msg.splitlines()\n # '#\\t' for git 2.4.1\n if line.startswith(('\\t', '#\\t'))\n ]\n\n\n@memoize_by_cwd\ndef get_conflicted_files():\n logger.info('Checking merge-conflict files only.')\n # Need to get the conflicted files from the MERGE_MSG because they could\n # have resolved the conflict by choosing one side or the other\n merge_msg = open(os.path.join('.git', 'MERGE_MSG')).read()\n merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)\n\n # This will get the rest of the changes made after the merge.\n # If they resolved the merge conflict by choosing a mesh of both sides\n # this will also include the conflicted files\n tree_hash = cmd_output('git', 'write-tree')[1].strip()\n merge_diff_filenames = cmd_output(\n 'git', 'diff', '-m', tree_hash, 'HEAD', 'MERGE_HEAD', '--name-only',\n )[1].splitlines()\n return set(merge_conflict_filenames) | set(merge_diff_filenames)\n\n\n@memoize_by_cwd\ndef get_staged_files():\n return cmd_output('git', 'diff', '--staged', '--name-only')[1].splitlines()\n\n\n@memoize_by_cwd\ndef get_all_files():\n return cmd_output('git', 'ls-files')[1].splitlines()\n\n\ndef get_files_matching(all_file_list_strategy):\n @functools.wraps(all_file_list_strategy)\n @memoize_by_cwd\n def wrapper(include_expr, exclude_expr):\n include_regex = re.compile(include_expr)\n exclude_regex = re.compile(exclude_expr)\n return set(\n filename\n for filename in all_file_list_strategy()\n if (\n include_regex.search(filename) and\n not exclude_regex.search(filename) and\n os.path.exists(filename)\n )\n )\n return wrapper\n\n\nget_staged_files_matching = get_files_matching(get_staged_files)\nget_all_files_matching = get_files_matching(get_all_files)\nget_conflicted_files_matching = get_files_matching(get_conflicted_files)\n", "path": "pre_commit/git.py"}, {"content": "from __future__ import unicode_literals\n\nimport os\nimport os.path\n\nfrom cached_property import cached_property\n\nimport pre_commit.constants as C\nfrom pre_commit import git\nfrom pre_commit.clientlib.validate_config import load_config\nfrom pre_commit.repository import Repository\nfrom pre_commit.store import Store\n\n\nclass Runner(object):\n \"\"\"A `Runner` represents the execution context of the hooks. Notably the\n repository under test.\n \"\"\"\n\n def __init__(self, git_root):\n self.git_root = git_root\n\n @classmethod\n def create(cls):\n \"\"\"Creates a PreCommitRunner by doing the following:\n - Finds the root of the current git repository\n - chdirs to that directory\n \"\"\"\n root = git.get_root()\n os.chdir(root)\n return cls(root)\n\n @cached_property\n def config_file_path(self):\n return os.path.join(self.git_root, C.CONFIG_FILE)\n\n @cached_property\n def repositories(self):\n \"\"\"Returns a tuple of the configured repositories.\"\"\"\n config = load_config(self.config_file_path)\n repositories = tuple(Repository.create(x, self.store) for x in config)\n for repository in repositories:\n repository.require_installed()\n return repositories\n\n def get_hook_path(self, hook_type):\n return os.path.join(self.git_root, '.git', 'hooks', hook_type)\n\n @cached_property\n def pre_commit_path(self):\n return self.get_hook_path('pre-commit')\n\n @cached_property\n def pre_push_path(self):\n return self.get_hook_path('pre-push')\n\n @cached_property\n def cmd_runner(self):\n # TODO: remove this and inline runner.store.cmd_runner\n return self.store.cmd_runner\n\n @cached_property\n def store(self):\n return Store()\n", "path": "pre_commit/runner.py"}, {"content": "from __future__ import unicode_literals\n\nimport argparse\nimport os\nimport sys\n\nimport pkg_resources\n\nfrom pre_commit import color\nfrom pre_commit import five\nfrom pre_commit.commands.autoupdate import autoupdate\nfrom pre_commit.commands.clean import clean\nfrom pre_commit.commands.install_uninstall import install\nfrom pre_commit.commands.install_uninstall import uninstall\nfrom pre_commit.commands.run import run\nfrom pre_commit.error_handler import error_handler\nfrom pre_commit.runner import Runner\n\n\n# https://github.com/pre-commit/pre-commit/issues/217\n# On OSX, making a virtualenv using pyvenv at . causes `virtualenv` and `pip`\n# to install packages to the wrong place. We don't want anything to deal with\n# pyvenv\nos.environ.pop('__PYVENV_LAUNCHER__', None)\n# https://github.com/pre-commit/pre-commit/issues/300\n# In git 2.6.3 (maybe others), git exports this while running pre-commit hooks\nos.environ.pop('GIT_WORK_TREE', None)\n\n\ndef main(argv=None):\n argv = argv if argv is not None else sys.argv[1:]\n argv = [five.to_text(arg) for arg in argv]\n parser = argparse.ArgumentParser()\n\n # http://stackoverflow.com/a/8521644/812183\n parser.add_argument(\n '-V', '--version',\n action='version',\n version='%(prog)s {0}'.format(\n pkg_resources.get_distribution('pre-commit').version\n )\n )\n\n subparsers = parser.add_subparsers(dest='command')\n\n install_parser = subparsers.add_parser(\n 'install', help='Install the pre-commit script.',\n )\n install_parser.add_argument(\n '-f', '--overwrite', action='store_true',\n help='Overwrite existing hooks / remove migration mode.',\n )\n install_parser.add_argument(\n '--install-hooks', action='store_true',\n help=(\n 'Whether to install hook environments for all environments '\n 'in the config file.'\n ),\n )\n install_parser.add_argument(\n '-t', '--hook-type', choices=('pre-commit', 'pre-push'),\n default='pre-commit',\n )\n\n uninstall_parser = subparsers.add_parser(\n 'uninstall', help='Uninstall the pre-commit script.',\n )\n uninstall_parser.add_argument(\n '-t', '--hook-type', choices=('pre-commit', 'pre-push'),\n default='pre-commit',\n )\n\n subparsers.add_parser('clean', help='Clean out pre-commit files.')\n\n subparsers.add_parser(\n 'autoupdate',\n help=\"Auto-update pre-commit config to the latest repos' versions.\",\n )\n\n run_parser = subparsers.add_parser('run', help='Run hooks.')\n run_parser.add_argument('hook', nargs='?', help='A single hook-id to run')\n run_parser.add_argument(\n '--color', default='auto', type=color.use_color,\n help='Whether to use color in output. Defaults to `auto`',\n )\n run_parser.add_argument(\n '--no-stash', default=False, action='store_true',\n help='Use this option to prevent auto stashing of unstaged files.',\n )\n run_parser.add_argument(\n '--verbose', '-v', action='store_true', default=False,\n )\n run_parser.add_argument(\n '--origin', '-o',\n help='The origin branch\\'s commit_id when using `git push`',\n )\n run_parser.add_argument(\n '--source', '-s',\n help='The remote branch\\'s commit_id when using `git push`',\n )\n run_parser.add_argument(\n '--allow-unstaged-config', default=False, action='store_true',\n help='Allow an unstaged config to be present. Note that this will'\n 'be stashed before parsing unless --no-stash is specified'\n )\n run_parser.add_argument(\n '--hook-stage', choices=('commit', 'push'), default='commit',\n help='The stage during which the hook is fired e.g. commit or push',\n )\n run_mutex_group = run_parser.add_mutually_exclusive_group(required=False)\n run_mutex_group.add_argument(\n '--all-files', '-a', action='store_true', default=False,\n help='Run on all the files in the repo. Implies --no-stash.',\n )\n run_mutex_group.add_argument(\n '--files', nargs='*', help='Specific filenames to run hooks on.',\n )\n\n help = subparsers.add_parser(\n 'help', help='Show help for a specific command.'\n )\n help.add_argument('help_cmd', nargs='?', help='Command to show help for.')\n\n # Argparse doesn't really provide a way to use a `default` subparser\n if len(argv) == 0:\n argv = ['run']\n args = parser.parse_args(argv)\n\n if args.command == 'help':\n if args.help_cmd:\n parser.parse_args([args.help_cmd, '--help'])\n else:\n parser.parse_args(['--help'])\n\n with error_handler():\n runner = Runner.create()\n\n if args.command == 'install':\n return install(\n runner, overwrite=args.overwrite, hooks=args.install_hooks,\n hook_type=args.hook_type,\n )\n elif args.command == 'uninstall':\n return uninstall(runner, hook_type=args.hook_type)\n elif args.command == 'clean':\n return clean(runner)\n elif args.command == 'autoupdate':\n return autoupdate(runner)\n elif args.command == 'run':\n return run(runner, args)\n else:\n raise NotImplementedError(\n 'Command {0} not implemented.'.format(args.command)\n )\n\n raise AssertionError(\n 'Command {0} failed to exit with a returncode'.format(args.command)\n )\n\n\nif __name__ == '__main__':\n exit(main())\n", "path": "pre_commit/main.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport functools\nimport logging\nimport os\nimport os.path\nimport re\n\nfrom pre_commit.errors import FatalError\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import memoize_by_cwd\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef get_root():\n try:\n return cmd_output('git', 'rev-parse', '--show-toplevel')[1].strip()\n except CalledProcessError:\n raise FatalError(\n 'Called from outside of the gits. Please cd to a git repository.'\n )\n\n\ndef get_git_dir(git_root):\n return os.path.normpath(os.path.join(\n git_root,\n cmd_output('git', 'rev-parse', '--git-dir', cwd=git_root)[1].strip(),\n ))\n\n\ndef is_in_merge_conflict():\n git_dir = get_git_dir('.')\n return (\n os.path.exists(os.path.join(git_dir, 'MERGE_MSG')) and\n os.path.exists(os.path.join(git_dir, 'MERGE_HEAD'))\n )\n\n\ndef parse_merge_msg_for_conflicts(merge_msg):\n # Conflicted files start with tabs\n return [\n line.lstrip('#').strip()\n for line in merge_msg.splitlines()\n # '#\\t' for git 2.4.1\n if line.startswith(('\\t', '#\\t'))\n ]\n\n\n@memoize_by_cwd\ndef get_conflicted_files():\n logger.info('Checking merge-conflict files only.')\n # Need to get the conflicted files from the MERGE_MSG because they could\n # have resolved the conflict by choosing one side or the other\n merge_msg = open(os.path.join(get_git_dir('.'), 'MERGE_MSG')).read()\n merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)\n\n # This will get the rest of the changes made after the merge.\n # If they resolved the merge conflict by choosing a mesh of both sides\n # this will also include the conflicted files\n tree_hash = cmd_output('git', 'write-tree')[1].strip()\n merge_diff_filenames = cmd_output(\n 'git', 'diff', '-m', tree_hash, 'HEAD', 'MERGE_HEAD', '--name-only',\n )[1].splitlines()\n return set(merge_conflict_filenames) | set(merge_diff_filenames)\n\n\n@memoize_by_cwd\ndef get_staged_files():\n return cmd_output('git', 'diff', '--staged', '--name-only')[1].splitlines()\n\n\n@memoize_by_cwd\ndef get_all_files():\n return cmd_output('git', 'ls-files')[1].splitlines()\n\n\ndef get_files_matching(all_file_list_strategy):\n @functools.wraps(all_file_list_strategy)\n @memoize_by_cwd\n def wrapper(include_expr, exclude_expr):\n include_regex = re.compile(include_expr)\n exclude_regex = re.compile(exclude_expr)\n return set(\n filename\n for filename in all_file_list_strategy()\n if (\n include_regex.search(filename) and\n not exclude_regex.search(filename) and\n os.path.exists(filename)\n )\n )\n return wrapper\n\n\nget_staged_files_matching = get_files_matching(get_staged_files)\nget_all_files_matching = get_files_matching(get_all_files)\nget_conflicted_files_matching = get_files_matching(get_conflicted_files)\n", "path": "pre_commit/git.py"}, {"content": "from __future__ import unicode_literals\n\nimport os\nimport os.path\n\nfrom cached_property import cached_property\n\nimport pre_commit.constants as C\nfrom pre_commit import git\nfrom pre_commit.clientlib.validate_config import load_config\nfrom pre_commit.repository import Repository\nfrom pre_commit.store import Store\n\n\nclass Runner(object):\n \"\"\"A `Runner` represents the execution context of the hooks. Notably the\n repository under test.\n \"\"\"\n\n def __init__(self, git_root):\n self.git_root = git_root\n\n @classmethod\n def create(cls):\n \"\"\"Creates a PreCommitRunner by doing the following:\n - Finds the root of the current git repository\n - chdirs to that directory\n \"\"\"\n root = git.get_root()\n os.chdir(root)\n return cls(root)\n\n @cached_property\n def git_dir(self):\n return git.get_git_dir(self.git_root)\n\n @cached_property\n def config_file_path(self):\n return os.path.join(self.git_root, C.CONFIG_FILE)\n\n @cached_property\n def repositories(self):\n \"\"\"Returns a tuple of the configured repositories.\"\"\"\n config = load_config(self.config_file_path)\n repositories = tuple(Repository.create(x, self.store) for x in config)\n for repository in repositories:\n repository.require_installed()\n return repositories\n\n def get_hook_path(self, hook_type):\n return os.path.join(self.git_dir, 'hooks', hook_type)\n\n @cached_property\n def pre_commit_path(self):\n return self.get_hook_path('pre-commit')\n\n @cached_property\n def pre_push_path(self):\n return self.get_hook_path('pre-push')\n\n @cached_property\n def cmd_runner(self):\n # TODO: remove this and inline runner.store.cmd_runner\n return self.store.cmd_runner\n\n @cached_property\n def store(self):\n return Store()\n", "path": "pre_commit/runner.py"}, {"content": "from __future__ import unicode_literals\n\nimport argparse\nimport os\nimport sys\n\nimport pkg_resources\n\nfrom pre_commit import color\nfrom pre_commit import five\nfrom pre_commit.commands.autoupdate import autoupdate\nfrom pre_commit.commands.clean import clean\nfrom pre_commit.commands.install_uninstall import install\nfrom pre_commit.commands.install_uninstall import uninstall\nfrom pre_commit.commands.run import run\nfrom pre_commit.error_handler import error_handler\nfrom pre_commit.runner import Runner\n\n\n# https://github.com/pre-commit/pre-commit/issues/217\n# On OSX, making a virtualenv using pyvenv at . causes `virtualenv` and `pip`\n# to install packages to the wrong place. We don't want anything to deal with\n# pyvenv\nos.environ.pop('__PYVENV_LAUNCHER__', None)\n# https://github.com/pre-commit/pre-commit/issues/300\n# In git 2.6.3 (maybe others), git exports this while running pre-commit hooks\nos.environ.pop('GIT_WORK_TREE', None)\n# In git 1.9.1 (maybe others), git exports these while running pre-commit hooks\n# in submodules. In the general case this causes problems.\n# These are covered by test_install_in_submodule_and_run\n# Causes git clone to clone wrong thing\nos.environ.pop('GIT_DIR', None)\n# Causes 'error invalid object ...' during commit\nos.environ.pop('GIT_INDEX_FILE', None)\n\n\ndef main(argv=None):\n argv = argv if argv is not None else sys.argv[1:]\n argv = [five.to_text(arg) for arg in argv]\n parser = argparse.ArgumentParser()\n\n # http://stackoverflow.com/a/8521644/812183\n parser.add_argument(\n '-V', '--version',\n action='version',\n version='%(prog)s {0}'.format(\n pkg_resources.get_distribution('pre-commit').version\n )\n )\n\n subparsers = parser.add_subparsers(dest='command')\n\n install_parser = subparsers.add_parser(\n 'install', help='Install the pre-commit script.',\n )\n install_parser.add_argument(\n '-f', '--overwrite', action='store_true',\n help='Overwrite existing hooks / remove migration mode.',\n )\n install_parser.add_argument(\n '--install-hooks', action='store_true',\n help=(\n 'Whether to install hook environments for all environments '\n 'in the config file.'\n ),\n )\n install_parser.add_argument(\n '-t', '--hook-type', choices=('pre-commit', 'pre-push'),\n default='pre-commit',\n )\n\n uninstall_parser = subparsers.add_parser(\n 'uninstall', help='Uninstall the pre-commit script.',\n )\n uninstall_parser.add_argument(\n '-t', '--hook-type', choices=('pre-commit', 'pre-push'),\n default='pre-commit',\n )\n\n subparsers.add_parser('clean', help='Clean out pre-commit files.')\n\n subparsers.add_parser(\n 'autoupdate',\n help=\"Auto-update pre-commit config to the latest repos' versions.\",\n )\n\n run_parser = subparsers.add_parser('run', help='Run hooks.')\n run_parser.add_argument('hook', nargs='?', help='A single hook-id to run')\n run_parser.add_argument(\n '--color', default='auto', type=color.use_color,\n help='Whether to use color in output. Defaults to `auto`',\n )\n run_parser.add_argument(\n '--no-stash', default=False, action='store_true',\n help='Use this option to prevent auto stashing of unstaged files.',\n )\n run_parser.add_argument(\n '--verbose', '-v', action='store_true', default=False,\n )\n run_parser.add_argument(\n '--origin', '-o',\n help='The origin branch\\'s commit_id when using `git push`',\n )\n run_parser.add_argument(\n '--source', '-s',\n help='The remote branch\\'s commit_id when using `git push`',\n )\n run_parser.add_argument(\n '--allow-unstaged-config', default=False, action='store_true',\n help='Allow an unstaged config to be present. Note that this will'\n 'be stashed before parsing unless --no-stash is specified'\n )\n run_parser.add_argument(\n '--hook-stage', choices=('commit', 'push'), default='commit',\n help='The stage during which the hook is fired e.g. commit or push',\n )\n run_mutex_group = run_parser.add_mutually_exclusive_group(required=False)\n run_mutex_group.add_argument(\n '--all-files', '-a', action='store_true', default=False,\n help='Run on all the files in the repo. Implies --no-stash.',\n )\n run_mutex_group.add_argument(\n '--files', nargs='*', help='Specific filenames to run hooks on.',\n )\n\n help = subparsers.add_parser(\n 'help', help='Show help for a specific command.'\n )\n help.add_argument('help_cmd', nargs='?', help='Command to show help for.')\n\n # Argparse doesn't really provide a way to use a `default` subparser\n if len(argv) == 0:\n argv = ['run']\n args = parser.parse_args(argv)\n\n if args.command == 'help':\n if args.help_cmd:\n parser.parse_args([args.help_cmd, '--help'])\n else:\n parser.parse_args(['--help'])\n\n with error_handler():\n runner = Runner.create()\n\n if args.command == 'install':\n return install(\n runner, overwrite=args.overwrite, hooks=args.install_hooks,\n hook_type=args.hook_type,\n )\n elif args.command == 'uninstall':\n return uninstall(runner, hook_type=args.hook_type)\n elif args.command == 'clean':\n return clean(runner)\n elif args.command == 'autoupdate':\n return autoupdate(runner)\n elif args.command == 'run':\n return run(runner, args)\n else:\n raise NotImplementedError(\n 'Command {0} not implemented.'.format(args.command)\n )\n\n raise AssertionError(\n 'Command {0} failed to exit with a returncode'.format(args.command)\n )\n\n\nif __name__ == '__main__':\n exit(main())\n", "path": "pre_commit/main.py"}]} | 3,411 | 682 |
gh_patches_debug_9704 | rasdani/github-patches | git_diff | Textualize__textual-441 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[textual][bug] CSS rule parsing fails when the name of the colour we pass contains a digit
So while this is working correctly:
```css
#my_widget {
background: dark_cyan;
}
```
...this fails:
```css
#my_widget {
background: turquoise4;
}
```
...with the following error:
```
• failed to parse color 'turquoise';
• failed to parse 'turquoise' as a color;
```
(maybe just a regex that doesn't take into account the fact that colour names can include numbers?)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/textual/css/tokenize.py`
Content:
```
1 from __future__ import annotations
2
3 import re
4 from typing import Iterable
5
6 from textual.css.tokenizer import Expect, Tokenizer, Token
7
8 COMMENT_START = r"\/\*"
9 SCALAR = r"\-?\d+\.?\d*(?:fr|%|w|h|vw|vh)"
10 DURATION = r"\d+\.?\d*(?:ms|s)"
11 NUMBER = r"\-?\d+\.?\d*"
12 COLOR = r"\#[0-9a-fA-F]{8}|\#[0-9a-fA-F]{6}|rgb\(\-?\d+\.?\d*,\-?\d+\.?\d*,\-?\d+\.?\d*\)|rgba\(\-?\d+\.?\d*,\-?\d+\.?\d*,\-?\d+\.?\d*,\-?\d+\.?\d*\)"
13 KEY_VALUE = r"[a-zA-Z_-][a-zA-Z0-9_-]*=[0-9a-zA-Z_\-\/]+"
14 TOKEN = "[a-zA-Z_-]+"
15 STRING = r"\".*?\""
16 VARIABLE_REF = r"\$[a-zA-Z0-9_\-]+"
17
18 # Values permitted in variable and rule declarations.
19 DECLARATION_VALUES = {
20 "scalar": SCALAR,
21 "duration": DURATION,
22 "number": NUMBER,
23 "color": COLOR,
24 "key_value": KEY_VALUE,
25 "token": TOKEN,
26 "string": STRING,
27 "variable_ref": VARIABLE_REF,
28 }
29
30 # The tokenizers "expectation" while at the root/highest level of scope
31 # in the CSS file. At this level we might expect to see selectors, comments,
32 # variable definitions etc.
33 expect_root_scope = Expect(
34 whitespace=r"\s+",
35 comment_start=COMMENT_START,
36 selector_start_id=r"\#[a-zA-Z_\-][a-zA-Z0-9_\-]*",
37 selector_start_class=r"\.[a-zA-Z_\-][a-zA-Z0-9_\-]*",
38 selector_start_universal=r"\*",
39 selector_start=r"[a-zA-Z_\-]+",
40 variable_name=rf"{VARIABLE_REF}:",
41 ).expect_eof(True)
42
43 # After a variable declaration e.g. "$warning-text: TOKENS;"
44 # for tokenizing variable value ------^~~~~~~^
45 expect_variable_name_continue = Expect(
46 variable_value_end=r"\n|;",
47 whitespace=r"\s+",
48 comment_start=COMMENT_START,
49 **DECLARATION_VALUES,
50 ).expect_eof(True)
51
52 expect_comment_end = Expect(
53 comment_end=re.escape("*/"),
54 )
55
56 # After we come across a selector in CSS e.g. ".my-class", we may
57 # find other selectors, pseudo-classes... e.g. ".my-class :hover"
58 expect_selector_continue = Expect(
59 whitespace=r"\s+",
60 comment_start=COMMENT_START,
61 pseudo_class=r"\:[a-zA-Z_-]+",
62 selector_id=r"\#[a-zA-Z_\-][a-zA-Z0-9_\-]*",
63 selector_class=r"\.[a-zA-Z_\-][a-zA-Z0-9_\-]*",
64 selector_universal=r"\*",
65 selector=r"[a-zA-Z_\-]+",
66 combinator_child=">",
67 new_selector=r",",
68 declaration_set_start=r"\{",
69 )
70
71 # A rule declaration e.g. "text: red;"
72 # ^---^
73 expect_declaration = Expect(
74 whitespace=r"\s+",
75 comment_start=COMMENT_START,
76 declaration_name=r"[a-zA-Z_\-]+\:",
77 declaration_set_end=r"\}",
78 )
79
80 expect_declaration_solo = Expect(
81 whitespace=r"\s+",
82 comment_start=COMMENT_START,
83 declaration_name=r"[a-zA-Z_\-]+\:",
84 declaration_set_end=r"\}",
85 ).expect_eof(True)
86
87 # The value(s)/content from a rule declaration e.g. "text: red;"
88 # ^---^
89 expect_declaration_content = Expect(
90 declaration_end=r";",
91 whitespace=r"\s+",
92 comment_start=COMMENT_START,
93 **DECLARATION_VALUES,
94 important=r"\!important",
95 comma=",",
96 declaration_set_end=r"\}",
97 )
98
99 expect_declaration_content_solo = Expect(
100 declaration_end=r";",
101 whitespace=r"\s+",
102 comment_start=COMMENT_START,
103 **DECLARATION_VALUES,
104 important=r"\!important",
105 comma=",",
106 declaration_set_end=r"\}",
107 ).expect_eof(True)
108
109
110 class TokenizerState:
111 """State machine for the tokenizer.
112
113 Attributes:
114 EXPECT: The initial expectation of the tokenizer. Since we start tokenizing
115 at the root scope, we might expect to see either a variable or selector, for example.
116 STATE_MAP: Maps token names to Expects, defines the sets of valid tokens
117 that we'd expect to see next, given the current token. For example, if
118 we've just processed a variable declaration name, we next expect to see
119 the value of that variable.
120 """
121
122 EXPECT = expect_root_scope
123 STATE_MAP = {
124 "variable_name": expect_variable_name_continue,
125 "variable_value_end": expect_root_scope,
126 "selector_start": expect_selector_continue,
127 "selector_start_id": expect_selector_continue,
128 "selector_start_class": expect_selector_continue,
129 "selector_start_universal": expect_selector_continue,
130 "selector_id": expect_selector_continue,
131 "selector_class": expect_selector_continue,
132 "selector_universal": expect_selector_continue,
133 "declaration_set_start": expect_declaration,
134 "declaration_name": expect_declaration_content,
135 "declaration_end": expect_declaration,
136 "declaration_set_end": expect_root_scope,
137 }
138
139 def __call__(self, code: str, path: str) -> Iterable[Token]:
140 tokenizer = Tokenizer(code, path=path)
141 expect = self.EXPECT
142 get_token = tokenizer.get_token
143 get_state = self.STATE_MAP.get
144 while True:
145 token = get_token(expect)
146 name = token.name
147 if name == "comment_start":
148 tokenizer.skip_to(expect_comment_end)
149 continue
150 elif name == "eof":
151 break
152 expect = get_state(name, expect)
153 yield token
154
155
156 class DeclarationTokenizerState(TokenizerState):
157 EXPECT = expect_declaration_solo
158 STATE_MAP = {
159 "declaration_name": expect_declaration_content,
160 "declaration_end": expect_declaration_solo,
161 }
162
163
164 class ValueTokenizerState(TokenizerState):
165 EXPECT = expect_declaration_content_solo
166
167
168 tokenize = TokenizerState()
169 tokenize_declarations = DeclarationTokenizerState()
170 tokenize_value = ValueTokenizerState()
171
172
173 def tokenize_values(values: dict[str, str]) -> dict[str, list[Token]]:
174 """Tokens the values in a dict of strings.
175
176 Args:
177 values (dict[str, str]): A mapping of CSS variable name on to a value, to be
178 added to the CSS context.
179
180 Returns:
181 dict[str, list[Token]]: A mapping of name on to a list of tokens,
182 """
183 value_tokens = {
184 name: list(tokenize_value(value, "__name__")) for name, value in values.items()
185 }
186 return value_tokens
187
188
189 if __name__ == "__main__":
190 from rich import print
191
192 css = """#something {
193
194 color: rgb(10,12,23)
195 }
196 """
197 # transition: offset 500 in_out_cubic;
198 tokens = tokenize(css, __name__)
199 print(list(tokens))
200
201 print(tokenize_values({"primary": "rgb(10,20,30)", "secondary": "#ff00ff"}))
202
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/textual/css/tokenize.py b/src/textual/css/tokenize.py
--- a/src/textual/css/tokenize.py
+++ b/src/textual/css/tokenize.py
@@ -11,7 +11,7 @@
NUMBER = r"\-?\d+\.?\d*"
COLOR = r"\#[0-9a-fA-F]{8}|\#[0-9a-fA-F]{6}|rgb\(\-?\d+\.?\d*,\-?\d+\.?\d*,\-?\d+\.?\d*\)|rgba\(\-?\d+\.?\d*,\-?\d+\.?\d*,\-?\d+\.?\d*,\-?\d+\.?\d*\)"
KEY_VALUE = r"[a-zA-Z_-][a-zA-Z0-9_-]*=[0-9a-zA-Z_\-\/]+"
-TOKEN = "[a-zA-Z_-]+"
+TOKEN = "[a-zA-Z][a-zA-Z0-9_-]*"
STRING = r"\".*?\""
VARIABLE_REF = r"\$[a-zA-Z0-9_\-]+"
| {"golden_diff": "diff --git a/src/textual/css/tokenize.py b/src/textual/css/tokenize.py\n--- a/src/textual/css/tokenize.py\n+++ b/src/textual/css/tokenize.py\n@@ -11,7 +11,7 @@\n NUMBER = r\"\\-?\\d+\\.?\\d*\"\n COLOR = r\"\\#[0-9a-fA-F]{8}|\\#[0-9a-fA-F]{6}|rgb\\(\\-?\\d+\\.?\\d*,\\-?\\d+\\.?\\d*,\\-?\\d+\\.?\\d*\\)|rgba\\(\\-?\\d+\\.?\\d*,\\-?\\d+\\.?\\d*,\\-?\\d+\\.?\\d*,\\-?\\d+\\.?\\d*\\)\"\n KEY_VALUE = r\"[a-zA-Z_-][a-zA-Z0-9_-]*=[0-9a-zA-Z_\\-\\/]+\"\n-TOKEN = \"[a-zA-Z_-]+\"\n+TOKEN = \"[a-zA-Z][a-zA-Z0-9_-]*\"\n STRING = r\"\\\".*?\\\"\"\n VARIABLE_REF = r\"\\$[a-zA-Z0-9_\\-]+\"\n", "issue": "[textual][bug] CSS rule parsing fails when the name of the colour we pass contains a digit\nSo while this is working correctly:\r\n```css\r\n#my_widget {\r\n background: dark_cyan;\r\n}\r\n```\r\n\r\n...this fails:\r\n```css\r\n#my_widget {\r\n background: turquoise4;\r\n}\r\n```\r\n...with the following error:\r\n```\r\n \u2022 failed to parse color 'turquoise'; \r\n \u2022 failed to parse 'turquoise' as a color; \r\n```\r\n(maybe just a regex that doesn't take into account the fact that colour names can include numbers?)\n", "before_files": [{"content": "from __future__ import annotations\n\nimport re\nfrom typing import Iterable\n\nfrom textual.css.tokenizer import Expect, Tokenizer, Token\n\nCOMMENT_START = r\"\\/\\*\"\nSCALAR = r\"\\-?\\d+\\.?\\d*(?:fr|%|w|h|vw|vh)\"\nDURATION = r\"\\d+\\.?\\d*(?:ms|s)\"\nNUMBER = r\"\\-?\\d+\\.?\\d*\"\nCOLOR = r\"\\#[0-9a-fA-F]{8}|\\#[0-9a-fA-F]{6}|rgb\\(\\-?\\d+\\.?\\d*,\\-?\\d+\\.?\\d*,\\-?\\d+\\.?\\d*\\)|rgba\\(\\-?\\d+\\.?\\d*,\\-?\\d+\\.?\\d*,\\-?\\d+\\.?\\d*,\\-?\\d+\\.?\\d*\\)\"\nKEY_VALUE = r\"[a-zA-Z_-][a-zA-Z0-9_-]*=[0-9a-zA-Z_\\-\\/]+\"\nTOKEN = \"[a-zA-Z_-]+\"\nSTRING = r\"\\\".*?\\\"\"\nVARIABLE_REF = r\"\\$[a-zA-Z0-9_\\-]+\"\n\n# Values permitted in variable and rule declarations.\nDECLARATION_VALUES = {\n \"scalar\": SCALAR,\n \"duration\": DURATION,\n \"number\": NUMBER,\n \"color\": COLOR,\n \"key_value\": KEY_VALUE,\n \"token\": TOKEN,\n \"string\": STRING,\n \"variable_ref\": VARIABLE_REF,\n}\n\n# The tokenizers \"expectation\" while at the root/highest level of scope\n# in the CSS file. At this level we might expect to see selectors, comments,\n# variable definitions etc.\nexpect_root_scope = Expect(\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n selector_start_id=r\"\\#[a-zA-Z_\\-][a-zA-Z0-9_\\-]*\",\n selector_start_class=r\"\\.[a-zA-Z_\\-][a-zA-Z0-9_\\-]*\",\n selector_start_universal=r\"\\*\",\n selector_start=r\"[a-zA-Z_\\-]+\",\n variable_name=rf\"{VARIABLE_REF}:\",\n).expect_eof(True)\n\n# After a variable declaration e.g. \"$warning-text: TOKENS;\"\n# for tokenizing variable value ------^~~~~~~^\nexpect_variable_name_continue = Expect(\n variable_value_end=r\"\\n|;\",\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n **DECLARATION_VALUES,\n).expect_eof(True)\n\nexpect_comment_end = Expect(\n comment_end=re.escape(\"*/\"),\n)\n\n# After we come across a selector in CSS e.g. \".my-class\", we may\n# find other selectors, pseudo-classes... e.g. \".my-class :hover\"\nexpect_selector_continue = Expect(\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n pseudo_class=r\"\\:[a-zA-Z_-]+\",\n selector_id=r\"\\#[a-zA-Z_\\-][a-zA-Z0-9_\\-]*\",\n selector_class=r\"\\.[a-zA-Z_\\-][a-zA-Z0-9_\\-]*\",\n selector_universal=r\"\\*\",\n selector=r\"[a-zA-Z_\\-]+\",\n combinator_child=\">\",\n new_selector=r\",\",\n declaration_set_start=r\"\\{\",\n)\n\n# A rule declaration e.g. \"text: red;\"\n# ^---^\nexpect_declaration = Expect(\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n declaration_name=r\"[a-zA-Z_\\-]+\\:\",\n declaration_set_end=r\"\\}\",\n)\n\nexpect_declaration_solo = Expect(\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n declaration_name=r\"[a-zA-Z_\\-]+\\:\",\n declaration_set_end=r\"\\}\",\n).expect_eof(True)\n\n# The value(s)/content from a rule declaration e.g. \"text: red;\"\n# ^---^\nexpect_declaration_content = Expect(\n declaration_end=r\";\",\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n **DECLARATION_VALUES,\n important=r\"\\!important\",\n comma=\",\",\n declaration_set_end=r\"\\}\",\n)\n\nexpect_declaration_content_solo = Expect(\n declaration_end=r\";\",\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n **DECLARATION_VALUES,\n important=r\"\\!important\",\n comma=\",\",\n declaration_set_end=r\"\\}\",\n).expect_eof(True)\n\n\nclass TokenizerState:\n \"\"\"State machine for the tokenizer.\n\n Attributes:\n EXPECT: The initial expectation of the tokenizer. Since we start tokenizing\n at the root scope, we might expect to see either a variable or selector, for example.\n STATE_MAP: Maps token names to Expects, defines the sets of valid tokens\n that we'd expect to see next, given the current token. For example, if\n we've just processed a variable declaration name, we next expect to see\n the value of that variable.\n \"\"\"\n\n EXPECT = expect_root_scope\n STATE_MAP = {\n \"variable_name\": expect_variable_name_continue,\n \"variable_value_end\": expect_root_scope,\n \"selector_start\": expect_selector_continue,\n \"selector_start_id\": expect_selector_continue,\n \"selector_start_class\": expect_selector_continue,\n \"selector_start_universal\": expect_selector_continue,\n \"selector_id\": expect_selector_continue,\n \"selector_class\": expect_selector_continue,\n \"selector_universal\": expect_selector_continue,\n \"declaration_set_start\": expect_declaration,\n \"declaration_name\": expect_declaration_content,\n \"declaration_end\": expect_declaration,\n \"declaration_set_end\": expect_root_scope,\n }\n\n def __call__(self, code: str, path: str) -> Iterable[Token]:\n tokenizer = Tokenizer(code, path=path)\n expect = self.EXPECT\n get_token = tokenizer.get_token\n get_state = self.STATE_MAP.get\n while True:\n token = get_token(expect)\n name = token.name\n if name == \"comment_start\":\n tokenizer.skip_to(expect_comment_end)\n continue\n elif name == \"eof\":\n break\n expect = get_state(name, expect)\n yield token\n\n\nclass DeclarationTokenizerState(TokenizerState):\n EXPECT = expect_declaration_solo\n STATE_MAP = {\n \"declaration_name\": expect_declaration_content,\n \"declaration_end\": expect_declaration_solo,\n }\n\n\nclass ValueTokenizerState(TokenizerState):\n EXPECT = expect_declaration_content_solo\n\n\ntokenize = TokenizerState()\ntokenize_declarations = DeclarationTokenizerState()\ntokenize_value = ValueTokenizerState()\n\n\ndef tokenize_values(values: dict[str, str]) -> dict[str, list[Token]]:\n \"\"\"Tokens the values in a dict of strings.\n\n Args:\n values (dict[str, str]): A mapping of CSS variable name on to a value, to be\n added to the CSS context.\n\n Returns:\n dict[str, list[Token]]: A mapping of name on to a list of tokens,\n \"\"\"\n value_tokens = {\n name: list(tokenize_value(value, \"__name__\")) for name, value in values.items()\n }\n return value_tokens\n\n\nif __name__ == \"__main__\":\n from rich import print\n\n css = \"\"\"#something {\n\n color: rgb(10,12,23)\n }\n \"\"\"\n # transition: offset 500 in_out_cubic;\n tokens = tokenize(css, __name__)\n print(list(tokens))\n\n print(tokenize_values({\"primary\": \"rgb(10,20,30)\", \"secondary\": \"#ff00ff\"}))\n", "path": "src/textual/css/tokenize.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport re\nfrom typing import Iterable\n\nfrom textual.css.tokenizer import Expect, Tokenizer, Token\n\nCOMMENT_START = r\"\\/\\*\"\nSCALAR = r\"\\-?\\d+\\.?\\d*(?:fr|%|w|h|vw|vh)\"\nDURATION = r\"\\d+\\.?\\d*(?:ms|s)\"\nNUMBER = r\"\\-?\\d+\\.?\\d*\"\nCOLOR = r\"\\#[0-9a-fA-F]{8}|\\#[0-9a-fA-F]{6}|rgb\\(\\-?\\d+\\.?\\d*,\\-?\\d+\\.?\\d*,\\-?\\d+\\.?\\d*\\)|rgba\\(\\-?\\d+\\.?\\d*,\\-?\\d+\\.?\\d*,\\-?\\d+\\.?\\d*,\\-?\\d+\\.?\\d*\\)\"\nKEY_VALUE = r\"[a-zA-Z_-][a-zA-Z0-9_-]*=[0-9a-zA-Z_\\-\\/]+\"\nTOKEN = \"[a-zA-Z][a-zA-Z0-9_-]*\"\nSTRING = r\"\\\".*?\\\"\"\nVARIABLE_REF = r\"\\$[a-zA-Z0-9_\\-]+\"\n\n# Values permitted in variable and rule declarations.\nDECLARATION_VALUES = {\n \"scalar\": SCALAR,\n \"duration\": DURATION,\n \"number\": NUMBER,\n \"color\": COLOR,\n \"key_value\": KEY_VALUE,\n \"token\": TOKEN,\n \"string\": STRING,\n \"variable_ref\": VARIABLE_REF,\n}\n\n# The tokenizers \"expectation\" while at the root/highest level of scope\n# in the CSS file. At this level we might expect to see selectors, comments,\n# variable definitions etc.\nexpect_root_scope = Expect(\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n selector_start_id=r\"\\#[a-zA-Z_\\-][a-zA-Z0-9_\\-]*\",\n selector_start_class=r\"\\.[a-zA-Z_\\-][a-zA-Z0-9_\\-]*\",\n selector_start_universal=r\"\\*\",\n selector_start=r\"[a-zA-Z_\\-]+\",\n variable_name=rf\"{VARIABLE_REF}:\",\n).expect_eof(True)\n\n# After a variable declaration e.g. \"$warning-text: TOKENS;\"\n# for tokenizing variable value ------^~~~~~~^\nexpect_variable_name_continue = Expect(\n variable_value_end=r\"\\n|;\",\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n **DECLARATION_VALUES,\n).expect_eof(True)\n\nexpect_comment_end = Expect(\n comment_end=re.escape(\"*/\"),\n)\n\n# After we come across a selector in CSS e.g. \".my-class\", we may\n# find other selectors, pseudo-classes... e.g. \".my-class :hover\"\nexpect_selector_continue = Expect(\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n pseudo_class=r\"\\:[a-zA-Z_-]+\",\n selector_id=r\"\\#[a-zA-Z_\\-][a-zA-Z0-9_\\-]*\",\n selector_class=r\"\\.[a-zA-Z_\\-][a-zA-Z0-9_\\-]*\",\n selector_universal=r\"\\*\",\n selector=r\"[a-zA-Z_\\-]+\",\n combinator_child=\">\",\n new_selector=r\",\",\n declaration_set_start=r\"\\{\",\n)\n\n# A rule declaration e.g. \"text: red;\"\n# ^---^\nexpect_declaration = Expect(\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n declaration_name=r\"[a-zA-Z_\\-]+\\:\",\n declaration_set_end=r\"\\}\",\n)\n\nexpect_declaration_solo = Expect(\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n declaration_name=r\"[a-zA-Z_\\-]+\\:\",\n declaration_set_end=r\"\\}\",\n).expect_eof(True)\n\n# The value(s)/content from a rule declaration e.g. \"text: red;\"\n# ^---^\nexpect_declaration_content = Expect(\n declaration_end=r\";\",\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n **DECLARATION_VALUES,\n important=r\"\\!important\",\n comma=\",\",\n declaration_set_end=r\"\\}\",\n)\n\nexpect_declaration_content_solo = Expect(\n declaration_end=r\";\",\n whitespace=r\"\\s+\",\n comment_start=COMMENT_START,\n **DECLARATION_VALUES,\n important=r\"\\!important\",\n comma=\",\",\n declaration_set_end=r\"\\}\",\n).expect_eof(True)\n\n\nclass TokenizerState:\n \"\"\"State machine for the tokenizer.\n\n Attributes:\n EXPECT: The initial expectation of the tokenizer. Since we start tokenizing\n at the root scope, we might expect to see either a variable or selector, for example.\n STATE_MAP: Maps token names to Expects, defines the sets of valid tokens\n that we'd expect to see next, given the current token. For example, if\n we've just processed a variable declaration name, we next expect to see\n the value of that variable.\n \"\"\"\n\n EXPECT = expect_root_scope\n STATE_MAP = {\n \"variable_name\": expect_variable_name_continue,\n \"variable_value_end\": expect_root_scope,\n \"selector_start\": expect_selector_continue,\n \"selector_start_id\": expect_selector_continue,\n \"selector_start_class\": expect_selector_continue,\n \"selector_start_universal\": expect_selector_continue,\n \"selector_id\": expect_selector_continue,\n \"selector_class\": expect_selector_continue,\n \"selector_universal\": expect_selector_continue,\n \"declaration_set_start\": expect_declaration,\n \"declaration_name\": expect_declaration_content,\n \"declaration_end\": expect_declaration,\n \"declaration_set_end\": expect_root_scope,\n }\n\n def __call__(self, code: str, path: str) -> Iterable[Token]:\n tokenizer = Tokenizer(code, path=path)\n expect = self.EXPECT\n get_token = tokenizer.get_token\n get_state = self.STATE_MAP.get\n while True:\n token = get_token(expect)\n name = token.name\n if name == \"comment_start\":\n tokenizer.skip_to(expect_comment_end)\n continue\n elif name == \"eof\":\n break\n expect = get_state(name, expect)\n yield token\n\n\nclass DeclarationTokenizerState(TokenizerState):\n EXPECT = expect_declaration_solo\n STATE_MAP = {\n \"declaration_name\": expect_declaration_content,\n \"declaration_end\": expect_declaration_solo,\n }\n\n\nclass ValueTokenizerState(TokenizerState):\n EXPECT = expect_declaration_content_solo\n\n\ntokenize = TokenizerState()\ntokenize_declarations = DeclarationTokenizerState()\ntokenize_value = ValueTokenizerState()\n\n\ndef tokenize_values(values: dict[str, str]) -> dict[str, list[Token]]:\n \"\"\"Tokens the values in a dict of strings.\n\n Args:\n values (dict[str, str]): A mapping of CSS variable name on to a value, to be\n added to the CSS context.\n\n Returns:\n dict[str, list[Token]]: A mapping of name on to a list of tokens,\n \"\"\"\n value_tokens = {\n name: list(tokenize_value(value, \"__name__\")) for name, value in values.items()\n }\n return value_tokens\n\n\nif __name__ == \"__main__\":\n from rich import print\n\n css = \"\"\"#something {\n\n color: rgb(10,12,23)\n }\n \"\"\"\n # transition: offset 500 in_out_cubic;\n tokens = tokenize(css, __name__)\n print(list(tokens))\n\n print(tokenize_values({\"primary\": \"rgb(10,20,30)\", \"secondary\": \"#ff00ff\"}))\n", "path": "src/textual/css/tokenize.py"}]} | 2,502 | 239 |
gh_patches_debug_18348 | rasdani/github-patches | git_diff | elastic__apm-agent-python-1414 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Sanic 21.9.2 breaks exception tracking
For yet unknown reasons, Sanic 21.9.2+ broke exception tracking. The changes between 21.9.1 and 21.9.2 are here:
https://github.com/sanic-org/sanic/compare/v21.9.1...v21.9.2
The test failures are here: https://apm-ci.elastic.co/blue/organizations/jenkins/apm-agent-python%2Fapm-agent-python-nightly-mbp%2Fmaster/detail/master/787/tests/
Example:
----------------------------------------------------- Captured log call ------------------------------------------------------INFO sanic.root:testing.py:82 http://127.0.0.1:50003/fallback-value-error
ERROR sanic.error:request.py:193 Exception occurred in one of response middleware handlers
Traceback (most recent call last):
File "/home/user/.local/lib/python3.10/site-packages/sanic_routing/router.py", line 79, in resolve
route, param_basket = self.find_route(
File "", line 24, in find_route
sanic_routing.exceptions.NotFound: Not Found
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/user/.local/lib/python3.10/site-packages/sanic/router.py", line 38, in _get
return self.resolve(
File "/home/user/.local/lib/python3.10/site-packages/sanic_routing/router.py", line 96, in resolve
raise self.exception(str(e), path=path)
sanic_routing.exceptions.NotFound: Not Found
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "handle_request", line 26, in handle_request
Any,
File "/home/user/.local/lib/python3.10/site-packages/sanic/router.py", line 66, in get
return self._get(path, method, host)
File "/home/user/.local/lib/python3.10/site-packages/sanic/router.py", line 44, in _get
raise NotFound("Requested URL {} not found".format(e.path))
sanic.exceptions.NotFound: Requested URL /fallback-value-error not found
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/user/.local/lib/python3.10/site-packages/sanic/request.py", line 187, in respond
response = await self.app._run_response_middleware(
File "_run_response_middleware", line 22, in _run_response_middleware
from ssl import Purpose, SSLContext, create_default_context
File "/app/elasticapm/contrib/sanic/__init__.py", line 279, in _instrument_response
await set_context(
File "/app/elasticapm/contrib/asyncio/traces.py", line 93, in set_context
data = await data()
File "/app/elasticapm/contrib/sanic/utils.py", line 121, in get_response_info
if config.capture_body and "octet-stream" not in response.content_type:
TypeError: argument of type 'NoneType' is not iterable
Checking for `response.content_type is not None` in `elasticapm/contrib/sanic/utils.py:121` doesn't resolve the issue.
@ahopkins do you happen to have an idea what could cause these failures?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticapm/contrib/sanic/utils.py`
Content:
```
1 # BSD 3-Clause License
2 #
3 # Copyright (c) 2012, the Sentry Team, see AUTHORS for more details
4 # Copyright (c) 2019, Elasticsearch BV
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are met:
9 #
10 # * Redistributions of source code must retain the above copyright notice, this
11 # list of conditions and the following disclaimer.
12 #
13 # * Redistributions in binary form must reproduce the above copyright notice,
14 # this list of conditions and the following disclaimer in the documentation
15 # and/or other materials provided with the distribution.
16 #
17 # * Neither the name of the copyright holder nor the names of its
18 # contributors may be used to endorse or promote products derived from
19 # this software without specific prior written permission.
20 #
21 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
24 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
25 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30
31 from typing import Dict
32
33 from sanic import Sanic
34 from sanic import __version__ as version
35 from sanic.cookies import CookieJar
36 from sanic.request import Request
37 from sanic.response import HTTPResponse
38
39 from elasticapm.base import Client
40 from elasticapm.conf import Config, constants
41 from elasticapm.contrib.sanic.sanic_types import EnvInfoType
42 from elasticapm.utils import compat, get_url_dict
43
44
45 class SanicAPMConfig(dict):
46 def __init__(self, app: Sanic):
47 super(SanicAPMConfig, self).__init__()
48 for _key, _v in app.config.items():
49 if _key.startswith("ELASTIC_APM_"):
50 self[_key.replace("ELASTIC_APM_", "")] = _v
51
52
53 def get_env(request: Request) -> EnvInfoType:
54 """
55 Extract Server Environment Information from the current Request's context
56 :param request: Inbound HTTP Request
57 :return: A tuple containing the attribute and it's corresponding value for the current Application ENV
58 """
59 for _attr in ("server_name", "server_port", "version"):
60 if hasattr(request, _attr):
61 yield _attr, getattr(request, _attr)
62
63
64 # noinspection PyBroadException
65 async def get_request_info(config: Config, request: Request) -> Dict[str, str]:
66 """
67 Generate a traceable context information from the inbound HTTP request
68
69 :param config: Application Configuration used to tune the way the data is captured
70 :param request: Inbound HTTP request
71 :return: A dictionary containing the context information of the ongoing transaction
72 """
73 env = dict(get_env(request=request))
74 env.update(dict(request.app.config))
75 result = {
76 "env": env,
77 "method": request.method,
78 "socket": {
79 "remote_address": _get_client_ip(request=request),
80 "encrypted": request.scheme in ["https", "wss"],
81 },
82 "cookies": request.cookies,
83 "http_version": request.version,
84 }
85 if config.capture_headers:
86 result["headers"] = dict(request.headers)
87
88 if request.method in constants.HTTP_WITH_BODY and config.capture_body:
89 if request.content_type.startswith("multipart") or "octet-stream" in request.content_type:
90 result["body"] = "[DISCARDED]"
91 try:
92 result["body"] = request.body.decode("utf-8")
93 except Exception:
94 pass
95
96 if "body" not in result:
97 result["body"] = "[REDACTED]"
98 result["url"] = get_url_dict(request.url)
99 return result
100
101
102 async def get_response_info(config: Config, response: HTTPResponse) -> Dict[str, str]:
103 """
104 Generate a traceable context information from the inbound HTTP Response
105
106 :param config: Application Configuration used to tune the way the data is captured
107 :param response: outbound HTTP Response
108 :return: A dictionary containing the context information of the ongoing transaction
109 """
110 result = {
111 "cookies": _transform_response_cookie(cookies=response.cookies),
112 "finished": True,
113 "headers_sent": True,
114 }
115 if isinstance(response.status, compat.integer_types):
116 result["status_code"] = response.status
117
118 if config.capture_headers:
119 result["headers"] = dict(response.headers)
120
121 if config.capture_body and "octet-stream" not in response.content_type:
122 result["body"] = response.body.decode("utf-8")
123 else:
124 result["body"] = "[REDACTED]"
125
126 return result
127
128
129 def _get_client_ip(request: Request) -> str:
130 """Extract Client IP Address Information"""
131 try:
132 return request.ip or request.socket[0] or request.remote_addr
133 except IndexError:
134 return request.remote_addr
135
136
137 def make_client(client_cls=Client, **defaults) -> Client:
138 if "framework_name" not in defaults:
139 defaults["framework_name"] = "sanic"
140 defaults["framework_version"] = version
141
142 return client_cls(**defaults)
143
144
145 def _transform_response_cookie(cookies: CookieJar) -> Dict[str, str]:
146 """Transform the Sanic's CookieJar instance into a Normal dictionary to build the context"""
147 return {k: {"value": v.value, "path": v["path"]} for k, v in cookies.items()}
148
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/elasticapm/contrib/sanic/utils.py b/elasticapm/contrib/sanic/utils.py
--- a/elasticapm/contrib/sanic/utils.py
+++ b/elasticapm/contrib/sanic/utils.py
@@ -28,6 +28,7 @@
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+from string import ascii_uppercase
from typing import Dict
from sanic import Sanic
@@ -71,7 +72,8 @@
:return: A dictionary containing the context information of the ongoing transaction
"""
env = dict(get_env(request=request))
- env.update(dict(request.app.config))
+ app_config = {k: v for k, v in dict(request.app.config).items() if all(letter in ascii_uppercase for letter in k)}
+ env.update(app_config)
result = {
"env": env,
"method": request.method,
| {"golden_diff": "diff --git a/elasticapm/contrib/sanic/utils.py b/elasticapm/contrib/sanic/utils.py\n--- a/elasticapm/contrib/sanic/utils.py\n+++ b/elasticapm/contrib/sanic/utils.py\n@@ -28,6 +28,7 @@\n # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n \n+from string import ascii_uppercase\n from typing import Dict\n \n from sanic import Sanic\n@@ -71,7 +72,8 @@\n :return: A dictionary containing the context information of the ongoing transaction\n \"\"\"\n env = dict(get_env(request=request))\n- env.update(dict(request.app.config))\n+ app_config = {k: v for k, v in dict(request.app.config).items() if all(letter in ascii_uppercase for letter in k)}\n+ env.update(app_config)\n result = {\n \"env\": env,\n \"method\": request.method,\n", "issue": "Sanic 21.9.2 breaks exception tracking\nFor yet unknown reasons, Sanic 21.9.2+ broke exception tracking. The changes between 21.9.1 and 21.9.2 are here:\r\n\r\nhttps://github.com/sanic-org/sanic/compare/v21.9.1...v21.9.2\r\n\r\nThe test failures are here: https://apm-ci.elastic.co/blue/organizations/jenkins/apm-agent-python%2Fapm-agent-python-nightly-mbp%2Fmaster/detail/master/787/tests/\r\n\r\nExample:\r\n\r\n ----------------------------------------------------- Captured log call ------------------------------------------------------INFO sanic.root:testing.py:82 http://127.0.0.1:50003/fallback-value-error\r\n ERROR sanic.error:request.py:193 Exception occurred in one of response middleware handlers\r\n Traceback (most recent call last):\r\n File \"/home/user/.local/lib/python3.10/site-packages/sanic_routing/router.py\", line 79, in resolve\r\n route, param_basket = self.find_route(\r\n File \"\", line 24, in find_route\r\n sanic_routing.exceptions.NotFound: Not Found\r\n\r\n During handling of the above exception, another exception occurred:\r\n\r\n Traceback (most recent call last):\r\n File \"/home/user/.local/lib/python3.10/site-packages/sanic/router.py\", line 38, in _get\r\n return self.resolve(\r\n File \"/home/user/.local/lib/python3.10/site-packages/sanic_routing/router.py\", line 96, in resolve\r\n raise self.exception(str(e), path=path)\r\n sanic_routing.exceptions.NotFound: Not Found\r\n\r\n During handling of the above exception, another exception occurred:\r\n\r\n Traceback (most recent call last):\r\n File \"handle_request\", line 26, in handle_request\r\n Any,\r\n File \"/home/user/.local/lib/python3.10/site-packages/sanic/router.py\", line 66, in get\r\n return self._get(path, method, host)\r\n File \"/home/user/.local/lib/python3.10/site-packages/sanic/router.py\", line 44, in _get\r\n raise NotFound(\"Requested URL {} not found\".format(e.path))\r\n sanic.exceptions.NotFound: Requested URL /fallback-value-error not found\r\n\r\n During handling of the above exception, another exception occurred:\r\n\r\n Traceback (most recent call last):\r\n File \"/home/user/.local/lib/python3.10/site-packages/sanic/request.py\", line 187, in respond\r\n response = await self.app._run_response_middleware(\r\n File \"_run_response_middleware\", line 22, in _run_response_middleware\r\n from ssl import Purpose, SSLContext, create_default_context\r\n File \"/app/elasticapm/contrib/sanic/__init__.py\", line 279, in _instrument_response\r\n await set_context(\r\n File \"/app/elasticapm/contrib/asyncio/traces.py\", line 93, in set_context\r\n data = await data()\r\n File \"/app/elasticapm/contrib/sanic/utils.py\", line 121, in get_response_info\r\n if config.capture_body and \"octet-stream\" not in response.content_type:\r\n TypeError: argument of type 'NoneType' is not iterable\r\n\r\nChecking for `response.content_type is not None` in `elasticapm/contrib/sanic/utils.py:121` doesn't resolve the issue.\r\n\r\n@ahopkins do you happen to have an idea what could cause these failures?\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\nfrom typing import Dict\n\nfrom sanic import Sanic\nfrom sanic import __version__ as version\nfrom sanic.cookies import CookieJar\nfrom sanic.request import Request\nfrom sanic.response import HTTPResponse\n\nfrom elasticapm.base import Client\nfrom elasticapm.conf import Config, constants\nfrom elasticapm.contrib.sanic.sanic_types import EnvInfoType\nfrom elasticapm.utils import compat, get_url_dict\n\n\nclass SanicAPMConfig(dict):\n def __init__(self, app: Sanic):\n super(SanicAPMConfig, self).__init__()\n for _key, _v in app.config.items():\n if _key.startswith(\"ELASTIC_APM_\"):\n self[_key.replace(\"ELASTIC_APM_\", \"\")] = _v\n\n\ndef get_env(request: Request) -> EnvInfoType:\n \"\"\"\n Extract Server Environment Information from the current Request's context\n :param request: Inbound HTTP Request\n :return: A tuple containing the attribute and it's corresponding value for the current Application ENV\n \"\"\"\n for _attr in (\"server_name\", \"server_port\", \"version\"):\n if hasattr(request, _attr):\n yield _attr, getattr(request, _attr)\n\n\n# noinspection PyBroadException\nasync def get_request_info(config: Config, request: Request) -> Dict[str, str]:\n \"\"\"\n Generate a traceable context information from the inbound HTTP request\n\n :param config: Application Configuration used to tune the way the data is captured\n :param request: Inbound HTTP request\n :return: A dictionary containing the context information of the ongoing transaction\n \"\"\"\n env = dict(get_env(request=request))\n env.update(dict(request.app.config))\n result = {\n \"env\": env,\n \"method\": request.method,\n \"socket\": {\n \"remote_address\": _get_client_ip(request=request),\n \"encrypted\": request.scheme in [\"https\", \"wss\"],\n },\n \"cookies\": request.cookies,\n \"http_version\": request.version,\n }\n if config.capture_headers:\n result[\"headers\"] = dict(request.headers)\n\n if request.method in constants.HTTP_WITH_BODY and config.capture_body:\n if request.content_type.startswith(\"multipart\") or \"octet-stream\" in request.content_type:\n result[\"body\"] = \"[DISCARDED]\"\n try:\n result[\"body\"] = request.body.decode(\"utf-8\")\n except Exception:\n pass\n\n if \"body\" not in result:\n result[\"body\"] = \"[REDACTED]\"\n result[\"url\"] = get_url_dict(request.url)\n return result\n\n\nasync def get_response_info(config: Config, response: HTTPResponse) -> Dict[str, str]:\n \"\"\"\n Generate a traceable context information from the inbound HTTP Response\n\n :param config: Application Configuration used to tune the way the data is captured\n :param response: outbound HTTP Response\n :return: A dictionary containing the context information of the ongoing transaction\n \"\"\"\n result = {\n \"cookies\": _transform_response_cookie(cookies=response.cookies),\n \"finished\": True,\n \"headers_sent\": True,\n }\n if isinstance(response.status, compat.integer_types):\n result[\"status_code\"] = response.status\n\n if config.capture_headers:\n result[\"headers\"] = dict(response.headers)\n\n if config.capture_body and \"octet-stream\" not in response.content_type:\n result[\"body\"] = response.body.decode(\"utf-8\")\n else:\n result[\"body\"] = \"[REDACTED]\"\n\n return result\n\n\ndef _get_client_ip(request: Request) -> str:\n \"\"\"Extract Client IP Address Information\"\"\"\n try:\n return request.ip or request.socket[0] or request.remote_addr\n except IndexError:\n return request.remote_addr\n\n\ndef make_client(client_cls=Client, **defaults) -> Client:\n if \"framework_name\" not in defaults:\n defaults[\"framework_name\"] = \"sanic\"\n defaults[\"framework_version\"] = version\n\n return client_cls(**defaults)\n\n\ndef _transform_response_cookie(cookies: CookieJar) -> Dict[str, str]:\n \"\"\"Transform the Sanic's CookieJar instance into a Normal dictionary to build the context\"\"\"\n return {k: {\"value\": v.value, \"path\": v[\"path\"]} for k, v in cookies.items()}\n", "path": "elasticapm/contrib/sanic/utils.py"}], "after_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\nfrom string import ascii_uppercase\nfrom typing import Dict\n\nfrom sanic import Sanic\nfrom sanic import __version__ as version\nfrom sanic.cookies import CookieJar\nfrom sanic.request import Request\nfrom sanic.response import HTTPResponse\n\nfrom elasticapm.base import Client\nfrom elasticapm.conf import Config, constants\nfrom elasticapm.contrib.sanic.sanic_types import EnvInfoType\nfrom elasticapm.utils import compat, get_url_dict\n\n\nclass SanicAPMConfig(dict):\n def __init__(self, app: Sanic):\n super(SanicAPMConfig, self).__init__()\n for _key, _v in app.config.items():\n if _key.startswith(\"ELASTIC_APM_\"):\n self[_key.replace(\"ELASTIC_APM_\", \"\")] = _v\n\n\ndef get_env(request: Request) -> EnvInfoType:\n \"\"\"\n Extract Server Environment Information from the current Request's context\n :param request: Inbound HTTP Request\n :return: A tuple containing the attribute and it's corresponding value for the current Application ENV\n \"\"\"\n for _attr in (\"server_name\", \"server_port\", \"version\"):\n if hasattr(request, _attr):\n yield _attr, getattr(request, _attr)\n\n\n# noinspection PyBroadException\nasync def get_request_info(config: Config, request: Request) -> Dict[str, str]:\n \"\"\"\n Generate a traceable context information from the inbound HTTP request\n\n :param config: Application Configuration used to tune the way the data is captured\n :param request: Inbound HTTP request\n :return: A dictionary containing the context information of the ongoing transaction\n \"\"\"\n env = dict(get_env(request=request))\n app_config = {k: v for k, v in dict(request.app.config).items() if all(letter in ascii_uppercase for letter in k)}\n env.update(app_config)\n result = {\n \"env\": env,\n \"method\": request.method,\n \"socket\": {\n \"remote_address\": _get_client_ip(request=request),\n \"encrypted\": request.scheme in [\"https\", \"wss\"],\n },\n \"cookies\": request.cookies,\n \"http_version\": request.version,\n }\n if config.capture_headers:\n result[\"headers\"] = dict(request.headers)\n\n if request.method in constants.HTTP_WITH_BODY and config.capture_body:\n if request.content_type.startswith(\"multipart\") or \"octet-stream\" in request.content_type:\n result[\"body\"] = \"[DISCARDED]\"\n try:\n result[\"body\"] = request.body.decode(\"utf-8\")\n except Exception:\n pass\n\n if \"body\" not in result:\n result[\"body\"] = \"[REDACTED]\"\n result[\"url\"] = get_url_dict(request.url)\n return result\n\n\nasync def get_response_info(config: Config, response: HTTPResponse) -> Dict[str, str]:\n \"\"\"\n Generate a traceable context information from the inbound HTTP Response\n\n :param config: Application Configuration used to tune the way the data is captured\n :param response: outbound HTTP Response\n :return: A dictionary containing the context information of the ongoing transaction\n \"\"\"\n result = {\n \"cookies\": _transform_response_cookie(cookies=response.cookies),\n \"finished\": True,\n \"headers_sent\": True,\n }\n if isinstance(response.status, compat.integer_types):\n result[\"status_code\"] = response.status\n\n if config.capture_headers:\n result[\"headers\"] = dict(response.headers)\n\n if config.capture_body and \"octet-stream\" not in response.content_type:\n result[\"body\"] = response.body.decode(\"utf-8\")\n else:\n result[\"body\"] = \"[REDACTED]\"\n\n return result\n\n\ndef _get_client_ip(request: Request) -> str:\n \"\"\"Extract Client IP Address Information\"\"\"\n try:\n return request.ip or request.socket[0] or request.remote_addr\n except IndexError:\n return request.remote_addr\n\n\ndef make_client(client_cls=Client, **defaults) -> Client:\n if \"framework_name\" not in defaults:\n defaults[\"framework_name\"] = \"sanic\"\n defaults[\"framework_version\"] = version\n\n return client_cls(**defaults)\n\n\ndef _transform_response_cookie(cookies: CookieJar) -> Dict[str, str]:\n \"\"\"Transform the Sanic's CookieJar instance into a Normal dictionary to build the context\"\"\"\n return {k: {\"value\": v.value, \"path\": v[\"path\"]} for k, v in cookies.items()}\n", "path": "elasticapm/contrib/sanic/utils.py"}]} | 2,643 | 224 |
gh_patches_debug_27127 | rasdani/github-patches | git_diff | mindsdb__mindsdb-1011 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Endpoint to disable telemtry
fairly self explainatory, add and an endpoint to the HTTP API that can be used to disable/enable the telemtry.
Endpoint to disable telemtry
fairly self explainatory, add and an endpoint to the HTTP API that can be used to disable/enable the telemtry.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mindsdb/api/http/namespaces/util.py`
Content:
```
1 from flask import request
2 from flask_restx import Resource, abort
3 from flask import current_app as ca
4
5 from mindsdb.api.http.namespaces.configs.util import ns_conf
6 from mindsdb import __about__
7
8 @ns_conf.route('/ping')
9 class Ping(Resource):
10 @ns_conf.doc('get_ping')
11 def get(self):
12 '''Checks server avaliable'''
13 return {'status': 'ok'}
14
15 @ns_conf.route('/report_uuid')
16 class ReportUUID(Resource):
17 @ns_conf.doc('get_report_uuid')
18 def get(self):
19 metamodel_name = '___monitroing_metamodel___'
20 predictor = ca.mindsdb_native.create(metamodel_name)
21 return {
22 'report_uuid': predictor.report_uuid
23 }
24
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mindsdb/api/http/namespaces/util.py b/mindsdb/api/http/namespaces/util.py
--- a/mindsdb/api/http/namespaces/util.py
+++ b/mindsdb/api/http/namespaces/util.py
@@ -1,3 +1,4 @@
+import os
from flask import request
from flask_restx import Resource, abort
from flask import current_app as ca
@@ -5,6 +6,8 @@
from mindsdb.api.http.namespaces.configs.util import ns_conf
from mindsdb import __about__
+TELEMETRY_FILE = 'telemetry.lock'
+
@ns_conf.route('/ping')
class Ping(Resource):
@ns_conf.doc('get_ping')
@@ -21,3 +24,34 @@
return {
'report_uuid': predictor.report_uuid
}
+
+@ns_conf.route('/telemetry')
+class Telemetry(Resource):
+ @ns_conf.doc('get_telemetry_status')
+ def get(self):
+ status = "enabled" if is_telemetry_active() else "disabled"
+ return {"status": status}
+
+ @ns_conf.doc('set_telemetry')
+ def post(self):
+ data = request.json
+ action = data['action']
+ if str(action).lower() in ["true", "enable", "on"]:
+ enable_telemetry()
+ else:
+ disable_telemetry()
+
+
+def enable_telemetry():
+ path = os.path.join(ca.config_obj['storage_dir'], TELEMETRY_FILE)
+ if os.path.exists(path):
+ os.remove(path)
+
+def disable_telemetry():
+ path = os.path.join(ca.config_obj['storage_dir'], TELEMETRY_FILE)
+ with open(path, 'w') as _:
+ pass
+
+def is_telemetry_active():
+ path = os.path.join(ca.config_obj['storage_dir'], TELEMETRY_FILE)
+ return not os.path.exists(path)
| {"golden_diff": "diff --git a/mindsdb/api/http/namespaces/util.py b/mindsdb/api/http/namespaces/util.py\n--- a/mindsdb/api/http/namespaces/util.py\n+++ b/mindsdb/api/http/namespaces/util.py\n@@ -1,3 +1,4 @@\n+import os\n from flask import request\n from flask_restx import Resource, abort\n from flask import current_app as ca\n@@ -5,6 +6,8 @@\n from mindsdb.api.http.namespaces.configs.util import ns_conf\n from mindsdb import __about__\n \n+TELEMETRY_FILE = 'telemetry.lock'\n+\n @ns_conf.route('/ping')\n class Ping(Resource):\n @ns_conf.doc('get_ping')\n@@ -21,3 +24,34 @@\n return {\n 'report_uuid': predictor.report_uuid\n }\n+\n+@ns_conf.route('/telemetry')\n+class Telemetry(Resource):\n+ @ns_conf.doc('get_telemetry_status')\n+ def get(self):\n+ status = \"enabled\" if is_telemetry_active() else \"disabled\"\n+ return {\"status\": status}\n+\n+ @ns_conf.doc('set_telemetry')\n+ def post(self):\n+ data = request.json\n+ action = data['action']\n+ if str(action).lower() in [\"true\", \"enable\", \"on\"]:\n+ enable_telemetry()\n+ else:\n+ disable_telemetry()\n+\n+\n+def enable_telemetry():\n+ path = os.path.join(ca.config_obj['storage_dir'], TELEMETRY_FILE)\n+ if os.path.exists(path):\n+ os.remove(path)\n+\n+def disable_telemetry():\n+ path = os.path.join(ca.config_obj['storage_dir'], TELEMETRY_FILE)\n+ with open(path, 'w') as _:\n+ pass\n+\n+def is_telemetry_active():\n+ path = os.path.join(ca.config_obj['storage_dir'], TELEMETRY_FILE)\n+ return not os.path.exists(path)\n", "issue": "Endpoint to disable telemtry\nfairly self explainatory, add and an endpoint to the HTTP API that can be used to disable/enable the telemtry.\r\n\nEndpoint to disable telemtry\nfairly self explainatory, add and an endpoint to the HTTP API that can be used to disable/enable the telemtry.\r\n\n", "before_files": [{"content": "from flask import request\nfrom flask_restx import Resource, abort\nfrom flask import current_app as ca\n\nfrom mindsdb.api.http.namespaces.configs.util import ns_conf\nfrom mindsdb import __about__\n\n@ns_conf.route('/ping')\nclass Ping(Resource):\n @ns_conf.doc('get_ping')\n def get(self):\n '''Checks server avaliable'''\n return {'status': 'ok'}\n\n@ns_conf.route('/report_uuid')\nclass ReportUUID(Resource):\n @ns_conf.doc('get_report_uuid')\n def get(self):\n metamodel_name = '___monitroing_metamodel___'\n predictor = ca.mindsdb_native.create(metamodel_name)\n return {\n 'report_uuid': predictor.report_uuid\n }\n", "path": "mindsdb/api/http/namespaces/util.py"}], "after_files": [{"content": "import os\nfrom flask import request\nfrom flask_restx import Resource, abort\nfrom flask import current_app as ca\n\nfrom mindsdb.api.http.namespaces.configs.util import ns_conf\nfrom mindsdb import __about__\n\nTELEMETRY_FILE = 'telemetry.lock'\n\n@ns_conf.route('/ping')\nclass Ping(Resource):\n @ns_conf.doc('get_ping')\n def get(self):\n '''Checks server avaliable'''\n return {'status': 'ok'}\n\n@ns_conf.route('/report_uuid')\nclass ReportUUID(Resource):\n @ns_conf.doc('get_report_uuid')\n def get(self):\n metamodel_name = '___monitroing_metamodel___'\n predictor = ca.mindsdb_native.create(metamodel_name)\n return {\n 'report_uuid': predictor.report_uuid\n }\n\n@ns_conf.route('/telemetry')\nclass Telemetry(Resource):\n @ns_conf.doc('get_telemetry_status')\n def get(self):\n status = \"enabled\" if is_telemetry_active() else \"disabled\"\n return {\"status\": status}\n\n @ns_conf.doc('set_telemetry')\n def post(self):\n data = request.json\n action = data['action']\n if str(action).lower() in [\"true\", \"enable\", \"on\"]:\n enable_telemetry()\n else:\n disable_telemetry()\n\n\ndef enable_telemetry():\n path = os.path.join(ca.config_obj['storage_dir'], TELEMETRY_FILE)\n if os.path.exists(path):\n os.remove(path)\n\ndef disable_telemetry():\n path = os.path.join(ca.config_obj['storage_dir'], TELEMETRY_FILE)\n with open(path, 'w') as _:\n pass\n\ndef is_telemetry_active():\n path = os.path.join(ca.config_obj['storage_dir'], TELEMETRY_FILE)\n return not os.path.exists(path)\n", "path": "mindsdb/api/http/namespaces/util.py"}]} | 531 | 433 |
gh_patches_debug_67113 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-5120 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Erreur 403 après recherche dans la bibliothèque
> Bonjour,
>
> Je tombe sur une erreur 403 "Vous n’avez pas les droits suffisants pour accéder à cette page." après une recherche dans les cours présents dans la bibliothèque.
Voilà comment elle est apparue :
>
> - Arrivé sur le site déconnecté
> - Je suis allé dans "Développement web" depuis le bandeau en haut du site
> - A partir de là je me suis connecté à mon compte (même onglet, bouton classique), ce qui m'a ramené sur la page
> - Puis j'ai fait une recherche "PHP" dans la barre de rechercher, ce qui m'a amené sur [ce lien](https://zestedesavoir.com/rechercher/?q=PHP&models=content&from_library=on&category=informatique&subcategory=site-web)
>
> L'erreur 403 se produit lorsque je coche 1 à 3 cases (sous la barre de recherche), pour filtrer les résultats, et que je clique à nouveau sur le bouton "rechercher" dans la barre.
>
> Voilà [un exemple de lien](https://zestedesavoir.com/rechercher/?q=PHP&category=informatique&subcategory=site-web&from_library=on&models=) provoquant une erreur 403 de mon côté.
>
> Bye
Sujet : https://zestedesavoir.com/forums/sujet/11609/erreur-403-apres-recherche-dans-la-bibliotheque/
*Envoyé depuis Zeste de Savoir*
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zds/searchv2/forms.py`
Content:
```
1 import os
2 import random
3
4 from django import forms
5 from django.conf import settings
6 from django.utils.translation import ugettext_lazy as _
7
8 from crispy_forms.bootstrap import StrictButton
9 from crispy_forms.helper import FormHelper
10 from crispy_forms.layout import Layout, Field
11 from django.core.urlresolvers import reverse
12
13
14 class SearchForm(forms.Form):
15 q = forms.CharField(
16 label=_('Recherche'),
17 max_length=150,
18 required=False,
19 widget=forms.TextInput(
20 attrs={
21 'type': 'search',
22 'required': 'required'
23 }
24 )
25 )
26
27 choices = sorted(
28 [(k, v[0]) for k, v in settings.ZDS_APP['search']['search_groups'].items()],
29 key=lambda pair: pair[1]
30 )
31
32 models = forms.MultipleChoiceField(
33 label='',
34 widget=forms.CheckboxSelectMultiple,
35 required=False,
36 choices=choices
37 )
38
39 category = forms.CharField(widget=forms.HiddenInput, required=False)
40 subcategory = forms.CharField(widget=forms.HiddenInput, required=False)
41 from_library = forms.CharField(widget=forms.HiddenInput, required=False)
42
43 def __init__(self, *args, **kwargs):
44
45 super(SearchForm, self).__init__(*args, **kwargs)
46
47 self.helper = FormHelper()
48 self.helper.form_id = 'search-form'
49 self.helper.form_class = 'clearfix'
50 self.helper.form_method = 'get'
51 self.helper.form_action = reverse('search:query')
52
53 try:
54 with open(os.path.join(settings.BASE_DIR, 'suggestions.txt'), 'r') as suggestions_file:
55 suggestions = ', '.join(random.sample(suggestions_file.readlines(), 5)) + '…'
56 except OSError:
57 suggestions = _('Mathématiques, Droit, UDK, Langues, Python…')
58
59 self.fields['q'].widget.attrs['placeholder'] = suggestions
60
61 self.helper.layout = Layout(
62 Field('q'),
63 StrictButton('', type='submit', css_class='ico-after ico-search', title=_('Rechercher')),
64 Field('category'),
65 Field('subcategory'),
66 Field('from_library')
67 )
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zds/searchv2/forms.py b/zds/searchv2/forms.py
--- a/zds/searchv2/forms.py
+++ b/zds/searchv2/forms.py
@@ -31,7 +31,12 @@
models = forms.MultipleChoiceField(
label='',
- widget=forms.CheckboxSelectMultiple,
+ widget=forms.CheckboxSelectMultiple(
+ attrs={
+ 'class': 'search-filters',
+ 'form': 'search-form'
+ }
+ ),
required=False,
choices=choices
)
| {"golden_diff": "diff --git a/zds/searchv2/forms.py b/zds/searchv2/forms.py\n--- a/zds/searchv2/forms.py\n+++ b/zds/searchv2/forms.py\n@@ -31,7 +31,12 @@\n \n models = forms.MultipleChoiceField(\n label='',\n- widget=forms.CheckboxSelectMultiple,\n+ widget=forms.CheckboxSelectMultiple(\n+ attrs={\n+ 'class': 'search-filters',\n+ 'form': 'search-form'\n+ }\n+ ),\n required=False,\n choices=choices\n )\n", "issue": "Erreur 403 apr\u00e8s recherche dans la biblioth\u00e8que\n> Bonjour,\r\n> \r\n> Je tombe sur une erreur 403 \"Vous n\u2019avez pas les droits suffisants pour acc\u00e9der \u00e0 cette page.\" apr\u00e8s une recherche dans les cours pr\u00e9sents dans la biblioth\u00e8que.\r\nVoil\u00e0 comment elle est apparue :\r\n> \r\n> - Arriv\u00e9 sur le site d\u00e9connect\u00e9\r\n> - Je suis all\u00e9 dans \"D\u00e9veloppement web\" depuis le bandeau en haut du site\r\n> - A partir de l\u00e0 je me suis connect\u00e9 \u00e0 mon compte (m\u00eame onglet, bouton classique), ce qui m'a ramen\u00e9 sur la page\r\n> - Puis j'ai fait une recherche \"PHP\" dans la barre de rechercher, ce qui m'a amen\u00e9 sur [ce lien](https://zestedesavoir.com/rechercher/?q=PHP&models=content&from_library=on&category=informatique&subcategory=site-web)\r\n> \r\n> L'erreur 403 se produit lorsque je coche 1 \u00e0 3 cases (sous la barre de recherche), pour filtrer les r\u00e9sultats, et que je clique \u00e0 nouveau sur le bouton \"rechercher\" dans la barre.\r\n> \r\n> Voil\u00e0 [un exemple de lien](https://zestedesavoir.com/rechercher/?q=PHP&category=informatique&subcategory=site-web&from_library=on&models=) provoquant une erreur 403 de mon c\u00f4t\u00e9.\r\n> \r\n> Bye\r\n\r\nSujet : https://zestedesavoir.com/forums/sujet/11609/erreur-403-apres-recherche-dans-la-bibliotheque/\r\n*Envoy\u00e9 depuis Zeste de Savoir*\n", "before_files": [{"content": "import os\nimport random\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom crispy_forms.bootstrap import StrictButton\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Field\nfrom django.core.urlresolvers import reverse\n\n\nclass SearchForm(forms.Form):\n q = forms.CharField(\n label=_('Recherche'),\n max_length=150,\n required=False,\n widget=forms.TextInput(\n attrs={\n 'type': 'search',\n 'required': 'required'\n }\n )\n )\n\n choices = sorted(\n [(k, v[0]) for k, v in settings.ZDS_APP['search']['search_groups'].items()],\n key=lambda pair: pair[1]\n )\n\n models = forms.MultipleChoiceField(\n label='',\n widget=forms.CheckboxSelectMultiple,\n required=False,\n choices=choices\n )\n\n category = forms.CharField(widget=forms.HiddenInput, required=False)\n subcategory = forms.CharField(widget=forms.HiddenInput, required=False)\n from_library = forms.CharField(widget=forms.HiddenInput, required=False)\n\n def __init__(self, *args, **kwargs):\n\n super(SearchForm, self).__init__(*args, **kwargs)\n\n self.helper = FormHelper()\n self.helper.form_id = 'search-form'\n self.helper.form_class = 'clearfix'\n self.helper.form_method = 'get'\n self.helper.form_action = reverse('search:query')\n\n try:\n with open(os.path.join(settings.BASE_DIR, 'suggestions.txt'), 'r') as suggestions_file:\n suggestions = ', '.join(random.sample(suggestions_file.readlines(), 5)) + '\u2026'\n except OSError:\n suggestions = _('Math\u00e9matiques, Droit, UDK, Langues, Python\u2026')\n\n self.fields['q'].widget.attrs['placeholder'] = suggestions\n\n self.helper.layout = Layout(\n Field('q'),\n StrictButton('', type='submit', css_class='ico-after ico-search', title=_('Rechercher')),\n Field('category'),\n Field('subcategory'),\n Field('from_library')\n )\n", "path": "zds/searchv2/forms.py"}], "after_files": [{"content": "import os\nimport random\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom crispy_forms.bootstrap import StrictButton\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Field\nfrom django.core.urlresolvers import reverse\n\n\nclass SearchForm(forms.Form):\n q = forms.CharField(\n label=_('Recherche'),\n max_length=150,\n required=False,\n widget=forms.TextInput(\n attrs={\n 'type': 'search',\n 'required': 'required'\n }\n )\n )\n\n choices = sorted(\n [(k, v[0]) for k, v in settings.ZDS_APP['search']['search_groups'].items()],\n key=lambda pair: pair[1]\n )\n\n models = forms.MultipleChoiceField(\n label='',\n widget=forms.CheckboxSelectMultiple(\n attrs={\n 'class': 'search-filters',\n 'form': 'search-form'\n }\n ),\n required=False,\n choices=choices\n )\n\n category = forms.CharField(widget=forms.HiddenInput, required=False)\n subcategory = forms.CharField(widget=forms.HiddenInput, required=False)\n from_library = forms.CharField(widget=forms.HiddenInput, required=False)\n\n def __init__(self, *args, **kwargs):\n\n super(SearchForm, self).__init__(*args, **kwargs)\n\n self.helper = FormHelper()\n self.helper.form_id = 'search-form'\n self.helper.form_class = 'clearfix'\n self.helper.form_method = 'get'\n self.helper.form_action = reverse('search:query')\n\n try:\n with open(os.path.join(settings.BASE_DIR, 'suggestions.txt'), 'r') as suggestions_file:\n suggestions = ', '.join(random.sample(suggestions_file.readlines(), 5)) + '\u2026'\n except OSError:\n suggestions = _('Math\u00e9matiques, Droit, UDK, Langues, Python\u2026')\n\n self.fields['q'].widget.attrs['placeholder'] = suggestions\n\n self.helper.layout = Layout(\n Field('q'),\n StrictButton('', type='submit', css_class='ico-after ico-search', title=_('Rechercher')),\n Field('category'),\n Field('subcategory'),\n Field('from_library')\n )\n", "path": "zds/searchv2/forms.py"}]} | 1,229 | 124 |
gh_patches_debug_34184 | rasdani/github-patches | git_diff | goauthentik__authentik-5163 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Outpost cannot be created in restricted namespaces
**Describe the bug**
Creating outposts in restricted namespaces does not work due to securityContext settings not getting copied over from main deployment.
**To Reproduce**
Steps to reproduce the behavior:
1. Create a restricted namespace:
* kubectl create ns test-authentik
* kubectl label ns test-authentikpod-security.kubernetes.io/enforce=baseline --overwrite
2. Create a new Outpost, in configuration do:
kubernetes_namespace=test-authentik
**Expected behavior**
Outpost should be created successfully.
**Logs**
```bash
kubectl describe -n authentik replicasets.apps ak-outpost-default-ldap-outpost
...
Warning FailedCreate 6m51s replicaset-controller Error creating: pods "ak-outpost-default-ldap-outpost-7f4d464f8c-7g76k" is forbidden: violates PodSecurity "restricted:latest": allowPrivilegeEscalation != false (container "ldap" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container "ldap" must set securityContext.capabilities.drop=["ALL"]), runAsNonRoot != true (pod or container "ldap" must set securityContext.runAsNonRoot=true), seccompProfile (pod or container "ldap" must set securityContext.seccompProfile.type to "RuntimeDefault" or "Localhost")
```
**Version and Deployment (please complete the following information):**
- authentik version: 23.3.1
- Deployment: helm
**Workaround**
Edit deployment to use the same securityContext settings from main pod and container in new deployments.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `authentik/outposts/controllers/k8s/deployment.py`
Content:
```
1 """Kubernetes Deployment Reconciler"""
2 from typing import TYPE_CHECKING
3
4 from django.utils.text import slugify
5 from kubernetes.client import (
6 AppsV1Api,
7 V1Container,
8 V1ContainerPort,
9 V1Deployment,
10 V1DeploymentSpec,
11 V1EnvVar,
12 V1EnvVarSource,
13 V1LabelSelector,
14 V1ObjectMeta,
15 V1ObjectReference,
16 V1PodSpec,
17 V1PodTemplateSpec,
18 V1SecretKeySelector,
19 )
20
21 from authentik import __version__, get_full_version
22 from authentik.outposts.controllers.base import FIELD_MANAGER
23 from authentik.outposts.controllers.k8s.base import KubernetesObjectReconciler
24 from authentik.outposts.controllers.k8s.triggers import NeedsUpdate
25 from authentik.outposts.controllers.k8s.utils import compare_ports
26 from authentik.outposts.models import Outpost
27
28 if TYPE_CHECKING:
29 from authentik.outposts.controllers.kubernetes import KubernetesController
30
31
32 class DeploymentReconciler(KubernetesObjectReconciler[V1Deployment]):
33 """Kubernetes Deployment Reconciler"""
34
35 outpost: Outpost
36
37 def __init__(self, controller: "KubernetesController") -> None:
38 super().__init__(controller)
39 self.api = AppsV1Api(controller.client)
40 self.outpost = self.controller.outpost
41
42 def reconcile(self, current: V1Deployment, reference: V1Deployment):
43 compare_ports(
44 current.spec.template.spec.containers[0].ports,
45 reference.spec.template.spec.containers[0].ports,
46 )
47 if current.spec.replicas != reference.spec.replicas:
48 raise NeedsUpdate()
49 if (
50 current.spec.template.spec.containers[0].image
51 != reference.spec.template.spec.containers[0].image
52 ):
53 raise NeedsUpdate()
54 super().reconcile(current, reference)
55
56 def get_pod_meta(self, **kwargs) -> dict[str, str]:
57 """Get common object metadata"""
58 kwargs.update(
59 {
60 "app.kubernetes.io/name": f"authentik-outpost-{self.outpost.type}",
61 "app.kubernetes.io/managed-by": "goauthentik.io",
62 "goauthentik.io/outpost-uuid": self.controller.outpost.uuid.hex,
63 "goauthentik.io/outpost-name": slugify(self.controller.outpost.name),
64 "goauthentik.io/outpost-type": str(self.controller.outpost.type),
65 }
66 )
67 return kwargs
68
69 def get_reference_object(self) -> V1Deployment:
70 """Get deployment object for outpost"""
71 # Generate V1ContainerPort objects
72 container_ports = []
73 for port in self.controller.deployment_ports:
74 container_ports.append(
75 V1ContainerPort(
76 container_port=port.inner_port or port.port,
77 name=port.name,
78 protocol=port.protocol.upper(),
79 )
80 )
81 meta = self.get_object_meta(name=self.name)
82 image_name = self.controller.get_container_image()
83 image_pull_secrets = self.outpost.config.kubernetes_image_pull_secrets
84 version = get_full_version()
85 return V1Deployment(
86 metadata=meta,
87 spec=V1DeploymentSpec(
88 replicas=self.outpost.config.kubernetes_replicas,
89 selector=V1LabelSelector(match_labels=self.get_pod_meta()),
90 template=V1PodTemplateSpec(
91 metadata=V1ObjectMeta(
92 labels=self.get_pod_meta(
93 **{
94 # Support istio-specific labels, but also use the standard k8s
95 # recommendations
96 "app.kubernetes.io/version": version,
97 "app": "authentik-outpost",
98 "version": version,
99 }
100 )
101 ),
102 spec=V1PodSpec(
103 image_pull_secrets=[
104 V1ObjectReference(name=secret) for secret in image_pull_secrets
105 ],
106 containers=[
107 V1Container(
108 name=str(self.outpost.type),
109 image=image_name,
110 ports=container_ports,
111 env=[
112 V1EnvVar(
113 name="AUTHENTIK_HOST",
114 value_from=V1EnvVarSource(
115 secret_key_ref=V1SecretKeySelector(
116 name=self.name,
117 key="authentik_host",
118 )
119 ),
120 ),
121 V1EnvVar(
122 name="AUTHENTIK_HOST_BROWSER",
123 value_from=V1EnvVarSource(
124 secret_key_ref=V1SecretKeySelector(
125 name=self.name,
126 key="authentik_host_browser",
127 )
128 ),
129 ),
130 V1EnvVar(
131 name="AUTHENTIK_TOKEN",
132 value_from=V1EnvVarSource(
133 secret_key_ref=V1SecretKeySelector(
134 name=self.name,
135 key="token",
136 )
137 ),
138 ),
139 V1EnvVar(
140 name="AUTHENTIK_INSECURE",
141 value_from=V1EnvVarSource(
142 secret_key_ref=V1SecretKeySelector(
143 name=self.name,
144 key="authentik_host_insecure",
145 )
146 ),
147 ),
148 ],
149 )
150 ],
151 ),
152 ),
153 ),
154 )
155
156 def create(self, reference: V1Deployment):
157 return self.api.create_namespaced_deployment(
158 self.namespace, reference, field_manager=FIELD_MANAGER
159 )
160
161 def delete(self, reference: V1Deployment):
162 return self.api.delete_namespaced_deployment(reference.metadata.name, self.namespace)
163
164 def retrieve(self) -> V1Deployment:
165 return self.api.read_namespaced_deployment(self.name, self.namespace)
166
167 def update(self, current: V1Deployment, reference: V1Deployment):
168 return self.api.patch_namespaced_deployment(
169 current.metadata.name,
170 self.namespace,
171 reference,
172 field_manager=FIELD_MANAGER,
173 )
174
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/authentik/outposts/controllers/k8s/deployment.py b/authentik/outposts/controllers/k8s/deployment.py
--- a/authentik/outposts/controllers/k8s/deployment.py
+++ b/authentik/outposts/controllers/k8s/deployment.py
@@ -4,6 +4,7 @@
from django.utils.text import slugify
from kubernetes.client import (
AppsV1Api,
+ V1Capabilities,
V1Container,
V1ContainerPort,
V1Deployment,
@@ -13,9 +14,12 @@
V1LabelSelector,
V1ObjectMeta,
V1ObjectReference,
+ V1PodSecurityContext,
V1PodSpec,
V1PodTemplateSpec,
+ V1SeccompProfile,
V1SecretKeySelector,
+ V1SecurityContext,
)
from authentik import __version__, get_full_version
@@ -103,6 +107,12 @@
image_pull_secrets=[
V1ObjectReference(name=secret) for secret in image_pull_secrets
],
+ security_context=V1PodSecurityContext(
+ run_as_non_root=True,
+ seccomp_profile=V1SeccompProfile(
+ type="RuntimeDefault",
+ ),
+ ),
containers=[
V1Container(
name=str(self.outpost.type),
@@ -146,6 +156,12 @@
),
),
],
+ security_context=V1SecurityContext(
+ allow_privilege_escalation=False,
+ capabilities=V1Capabilities(
+ drop=["ALL"],
+ ),
+ ),
)
],
),
| {"golden_diff": "diff --git a/authentik/outposts/controllers/k8s/deployment.py b/authentik/outposts/controllers/k8s/deployment.py\n--- a/authentik/outposts/controllers/k8s/deployment.py\n+++ b/authentik/outposts/controllers/k8s/deployment.py\n@@ -4,6 +4,7 @@\n from django.utils.text import slugify\n from kubernetes.client import (\n AppsV1Api,\n+ V1Capabilities,\n V1Container,\n V1ContainerPort,\n V1Deployment,\n@@ -13,9 +14,12 @@\n V1LabelSelector,\n V1ObjectMeta,\n V1ObjectReference,\n+ V1PodSecurityContext,\n V1PodSpec,\n V1PodTemplateSpec,\n+ V1SeccompProfile,\n V1SecretKeySelector,\n+ V1SecurityContext,\n )\n \n from authentik import __version__, get_full_version\n@@ -103,6 +107,12 @@\n image_pull_secrets=[\n V1ObjectReference(name=secret) for secret in image_pull_secrets\n ],\n+ security_context=V1PodSecurityContext(\n+ run_as_non_root=True,\n+ seccomp_profile=V1SeccompProfile(\n+ type=\"RuntimeDefault\",\n+ ),\n+ ),\n containers=[\n V1Container(\n name=str(self.outpost.type),\n@@ -146,6 +156,12 @@\n ),\n ),\n ],\n+ security_context=V1SecurityContext(\n+ allow_privilege_escalation=False,\n+ capabilities=V1Capabilities(\n+ drop=[\"ALL\"],\n+ ),\n+ ),\n )\n ],\n ),\n", "issue": "Outpost cannot be created in restricted namespaces\n**Describe the bug**\r\nCreating outposts in restricted namespaces does not work due to securityContext settings not getting copied over from main deployment.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Create a restricted namespace:\r\n* kubectl create ns test-authentik\r\n* kubectl label ns test-authentikpod-security.kubernetes.io/enforce=baseline --overwrite\r\n2. Create a new Outpost, in configuration do:\r\nkubernetes_namespace=test-authentik\r\n\r\n**Expected behavior**\r\nOutpost should be created successfully.\r\n\r\n**Logs**\r\n```bash\r\nkubectl describe -n authentik replicasets.apps ak-outpost-default-ldap-outpost \r\n ...\r\n Warning FailedCreate 6m51s replicaset-controller Error creating: pods \"ak-outpost-default-ldap-outpost-7f4d464f8c-7g76k\" is forbidden: violates PodSecurity \"restricted:latest\": allowPrivilegeEscalation != false (container \"ldap\" must set securityContext.allowPrivilegeEscalation=false), unrestricted capabilities (container \"ldap\" must set securityContext.capabilities.drop=[\"ALL\"]), runAsNonRoot != true (pod or container \"ldap\" must set securityContext.runAsNonRoot=true), seccompProfile (pod or container \"ldap\" must set securityContext.seccompProfile.type to \"RuntimeDefault\" or \"Localhost\")\r\n ```\r\n\r\n**Version and Deployment (please complete the following information):**\r\n - authentik version: 23.3.1\r\n - Deployment: helm\r\n\r\n**Workaround**\r\nEdit deployment to use the same securityContext settings from main pod and container in new deployments.\r\n\n", "before_files": [{"content": "\"\"\"Kubernetes Deployment Reconciler\"\"\"\nfrom typing import TYPE_CHECKING\n\nfrom django.utils.text import slugify\nfrom kubernetes.client import (\n AppsV1Api,\n V1Container,\n V1ContainerPort,\n V1Deployment,\n V1DeploymentSpec,\n V1EnvVar,\n V1EnvVarSource,\n V1LabelSelector,\n V1ObjectMeta,\n V1ObjectReference,\n V1PodSpec,\n V1PodTemplateSpec,\n V1SecretKeySelector,\n)\n\nfrom authentik import __version__, get_full_version\nfrom authentik.outposts.controllers.base import FIELD_MANAGER\nfrom authentik.outposts.controllers.k8s.base import KubernetesObjectReconciler\nfrom authentik.outposts.controllers.k8s.triggers import NeedsUpdate\nfrom authentik.outposts.controllers.k8s.utils import compare_ports\nfrom authentik.outposts.models import Outpost\n\nif TYPE_CHECKING:\n from authentik.outposts.controllers.kubernetes import KubernetesController\n\n\nclass DeploymentReconciler(KubernetesObjectReconciler[V1Deployment]):\n \"\"\"Kubernetes Deployment Reconciler\"\"\"\n\n outpost: Outpost\n\n def __init__(self, controller: \"KubernetesController\") -> None:\n super().__init__(controller)\n self.api = AppsV1Api(controller.client)\n self.outpost = self.controller.outpost\n\n def reconcile(self, current: V1Deployment, reference: V1Deployment):\n compare_ports(\n current.spec.template.spec.containers[0].ports,\n reference.spec.template.spec.containers[0].ports,\n )\n if current.spec.replicas != reference.spec.replicas:\n raise NeedsUpdate()\n if (\n current.spec.template.spec.containers[0].image\n != reference.spec.template.spec.containers[0].image\n ):\n raise NeedsUpdate()\n super().reconcile(current, reference)\n\n def get_pod_meta(self, **kwargs) -> dict[str, str]:\n \"\"\"Get common object metadata\"\"\"\n kwargs.update(\n {\n \"app.kubernetes.io/name\": f\"authentik-outpost-{self.outpost.type}\",\n \"app.kubernetes.io/managed-by\": \"goauthentik.io\",\n \"goauthentik.io/outpost-uuid\": self.controller.outpost.uuid.hex,\n \"goauthentik.io/outpost-name\": slugify(self.controller.outpost.name),\n \"goauthentik.io/outpost-type\": str(self.controller.outpost.type),\n }\n )\n return kwargs\n\n def get_reference_object(self) -> V1Deployment:\n \"\"\"Get deployment object for outpost\"\"\"\n # Generate V1ContainerPort objects\n container_ports = []\n for port in self.controller.deployment_ports:\n container_ports.append(\n V1ContainerPort(\n container_port=port.inner_port or port.port,\n name=port.name,\n protocol=port.protocol.upper(),\n )\n )\n meta = self.get_object_meta(name=self.name)\n image_name = self.controller.get_container_image()\n image_pull_secrets = self.outpost.config.kubernetes_image_pull_secrets\n version = get_full_version()\n return V1Deployment(\n metadata=meta,\n spec=V1DeploymentSpec(\n replicas=self.outpost.config.kubernetes_replicas,\n selector=V1LabelSelector(match_labels=self.get_pod_meta()),\n template=V1PodTemplateSpec(\n metadata=V1ObjectMeta(\n labels=self.get_pod_meta(\n **{\n # Support istio-specific labels, but also use the standard k8s\n # recommendations\n \"app.kubernetes.io/version\": version,\n \"app\": \"authentik-outpost\",\n \"version\": version,\n }\n )\n ),\n spec=V1PodSpec(\n image_pull_secrets=[\n V1ObjectReference(name=secret) for secret in image_pull_secrets\n ],\n containers=[\n V1Container(\n name=str(self.outpost.type),\n image=image_name,\n ports=container_ports,\n env=[\n V1EnvVar(\n name=\"AUTHENTIK_HOST\",\n value_from=V1EnvVarSource(\n secret_key_ref=V1SecretKeySelector(\n name=self.name,\n key=\"authentik_host\",\n )\n ),\n ),\n V1EnvVar(\n name=\"AUTHENTIK_HOST_BROWSER\",\n value_from=V1EnvVarSource(\n secret_key_ref=V1SecretKeySelector(\n name=self.name,\n key=\"authentik_host_browser\",\n )\n ),\n ),\n V1EnvVar(\n name=\"AUTHENTIK_TOKEN\",\n value_from=V1EnvVarSource(\n secret_key_ref=V1SecretKeySelector(\n name=self.name,\n key=\"token\",\n )\n ),\n ),\n V1EnvVar(\n name=\"AUTHENTIK_INSECURE\",\n value_from=V1EnvVarSource(\n secret_key_ref=V1SecretKeySelector(\n name=self.name,\n key=\"authentik_host_insecure\",\n )\n ),\n ),\n ],\n )\n ],\n ),\n ),\n ),\n )\n\n def create(self, reference: V1Deployment):\n return self.api.create_namespaced_deployment(\n self.namespace, reference, field_manager=FIELD_MANAGER\n )\n\n def delete(self, reference: V1Deployment):\n return self.api.delete_namespaced_deployment(reference.metadata.name, self.namespace)\n\n def retrieve(self) -> V1Deployment:\n return self.api.read_namespaced_deployment(self.name, self.namespace)\n\n def update(self, current: V1Deployment, reference: V1Deployment):\n return self.api.patch_namespaced_deployment(\n current.metadata.name,\n self.namespace,\n reference,\n field_manager=FIELD_MANAGER,\n )\n", "path": "authentik/outposts/controllers/k8s/deployment.py"}], "after_files": [{"content": "\"\"\"Kubernetes Deployment Reconciler\"\"\"\nfrom typing import TYPE_CHECKING\n\nfrom django.utils.text import slugify\nfrom kubernetes.client import (\n AppsV1Api,\n V1Capabilities,\n V1Container,\n V1ContainerPort,\n V1Deployment,\n V1DeploymentSpec,\n V1EnvVar,\n V1EnvVarSource,\n V1LabelSelector,\n V1ObjectMeta,\n V1ObjectReference,\n V1PodSecurityContext,\n V1PodSpec,\n V1PodTemplateSpec,\n V1SeccompProfile,\n V1SecretKeySelector,\n V1SecurityContext,\n)\n\nfrom authentik import __version__, get_full_version\nfrom authentik.outposts.controllers.base import FIELD_MANAGER\nfrom authentik.outposts.controllers.k8s.base import KubernetesObjectReconciler\nfrom authentik.outposts.controllers.k8s.triggers import NeedsUpdate\nfrom authentik.outposts.controllers.k8s.utils import compare_ports\nfrom authentik.outposts.models import Outpost\n\nif TYPE_CHECKING:\n from authentik.outposts.controllers.kubernetes import KubernetesController\n\n\nclass DeploymentReconciler(KubernetesObjectReconciler[V1Deployment]):\n \"\"\"Kubernetes Deployment Reconciler\"\"\"\n\n outpost: Outpost\n\n def __init__(self, controller: \"KubernetesController\") -> None:\n super().__init__(controller)\n self.api = AppsV1Api(controller.client)\n self.outpost = self.controller.outpost\n\n def reconcile(self, current: V1Deployment, reference: V1Deployment):\n compare_ports(\n current.spec.template.spec.containers[0].ports,\n reference.spec.template.spec.containers[0].ports,\n )\n if current.spec.replicas != reference.spec.replicas:\n raise NeedsUpdate()\n if (\n current.spec.template.spec.containers[0].image\n != reference.spec.template.spec.containers[0].image\n ):\n raise NeedsUpdate()\n super().reconcile(current, reference)\n\n def get_pod_meta(self, **kwargs) -> dict[str, str]:\n \"\"\"Get common object metadata\"\"\"\n kwargs.update(\n {\n \"app.kubernetes.io/name\": f\"authentik-outpost-{self.outpost.type}\",\n \"app.kubernetes.io/managed-by\": \"goauthentik.io\",\n \"goauthentik.io/outpost-uuid\": self.controller.outpost.uuid.hex,\n \"goauthentik.io/outpost-name\": slugify(self.controller.outpost.name),\n \"goauthentik.io/outpost-type\": str(self.controller.outpost.type),\n }\n )\n return kwargs\n\n def get_reference_object(self) -> V1Deployment:\n \"\"\"Get deployment object for outpost\"\"\"\n # Generate V1ContainerPort objects\n container_ports = []\n for port in self.controller.deployment_ports:\n container_ports.append(\n V1ContainerPort(\n container_port=port.inner_port or port.port,\n name=port.name,\n protocol=port.protocol.upper(),\n )\n )\n meta = self.get_object_meta(name=self.name)\n image_name = self.controller.get_container_image()\n image_pull_secrets = self.outpost.config.kubernetes_image_pull_secrets\n version = get_full_version()\n return V1Deployment(\n metadata=meta,\n spec=V1DeploymentSpec(\n replicas=self.outpost.config.kubernetes_replicas,\n selector=V1LabelSelector(match_labels=self.get_pod_meta()),\n template=V1PodTemplateSpec(\n metadata=V1ObjectMeta(\n labels=self.get_pod_meta(\n **{\n # Support istio-specific labels, but also use the standard k8s\n # recommendations\n \"app.kubernetes.io/version\": version,\n \"app\": \"authentik-outpost\",\n \"version\": version,\n }\n )\n ),\n spec=V1PodSpec(\n image_pull_secrets=[\n V1ObjectReference(name=secret) for secret in image_pull_secrets\n ],\n security_context=V1PodSecurityContext(\n run_as_non_root=True,\n seccomp_profile=V1SeccompProfile(\n type=\"RuntimeDefault\",\n ),\n ),\n containers=[\n V1Container(\n name=str(self.outpost.type),\n image=image_name,\n ports=container_ports,\n env=[\n V1EnvVar(\n name=\"AUTHENTIK_HOST\",\n value_from=V1EnvVarSource(\n secret_key_ref=V1SecretKeySelector(\n name=self.name,\n key=\"authentik_host\",\n )\n ),\n ),\n V1EnvVar(\n name=\"AUTHENTIK_HOST_BROWSER\",\n value_from=V1EnvVarSource(\n secret_key_ref=V1SecretKeySelector(\n name=self.name,\n key=\"authentik_host_browser\",\n )\n ),\n ),\n V1EnvVar(\n name=\"AUTHENTIK_TOKEN\",\n value_from=V1EnvVarSource(\n secret_key_ref=V1SecretKeySelector(\n name=self.name,\n key=\"token\",\n )\n ),\n ),\n V1EnvVar(\n name=\"AUTHENTIK_INSECURE\",\n value_from=V1EnvVarSource(\n secret_key_ref=V1SecretKeySelector(\n name=self.name,\n key=\"authentik_host_insecure\",\n )\n ),\n ),\n ],\n security_context=V1SecurityContext(\n allow_privilege_escalation=False,\n capabilities=V1Capabilities(\n drop=[\"ALL\"],\n ),\n ),\n )\n ],\n ),\n ),\n ),\n )\n\n def create(self, reference: V1Deployment):\n return self.api.create_namespaced_deployment(\n self.namespace, reference, field_manager=FIELD_MANAGER\n )\n\n def delete(self, reference: V1Deployment):\n return self.api.delete_namespaced_deployment(reference.metadata.name, self.namespace)\n\n def retrieve(self) -> V1Deployment:\n return self.api.read_namespaced_deployment(self.name, self.namespace)\n\n def update(self, current: V1Deployment, reference: V1Deployment):\n return self.api.patch_namespaced_deployment(\n current.metadata.name,\n self.namespace,\n reference,\n field_manager=FIELD_MANAGER,\n )\n", "path": "authentik/outposts/controllers/k8s/deployment.py"}]} | 2,258 | 370 |
gh_patches_debug_2761 | rasdani/github-patches | git_diff | napari__napari-1088 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ListModel.append does not check type
## 🐛 Bug
in working on layer groups, I found a strange lack of type checking when appending to a `ListModel` (which inherits from `TypedList`). [`ListModel.append`](https://github.com/napari/napari/blob/59ed366e9d492a2389c451468fd8b9f96508b4e2/napari/utils/list/_model.py#L59) jumps right over `TypedList.append`
https://github.com/napari/napari/blob/59ed366e9d492a2389c451468fd8b9f96508b4e2/napari/utils/list/_model.py#L58-L60
... and if you try to something that is not a `Layer` to a `LayerList`, it works fine up until throwing an error (unrelated to typing) in `components.layerlist._add`. Is that supposed to be `TypedList.append(self, obj)`? or was that intentional?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `napari/utils/list/_model.py`
Content:
```
1 from ...utils.event import EmitterGroup
2
3 from ._multi import MultiIndexList
4 from ._typed import TypedList
5
6
7 class ListModel(MultiIndexList, TypedList):
8 """List with events, tuple-indexing, typing, and filtering.
9
10 Parameters
11 ----------
12 basetype : type
13 Type of the elements in the list.
14 iterable : iterable, optional
15 Elements to initialize the list with.
16 lookup : dict of type : function(object, ``basetype``) -> bool
17 Functions that determine if an object is a reference to an
18 element of the list.
19
20 Attributes
21 ----------
22 events : vispy.util.event.EmitterGroup
23 Group of events for adding, removing, and reordering elements
24 within the list.
25 """
26
27 def __init__(self, basetype, iterable=(), lookup=None):
28 super().__init__(basetype, iterable, lookup)
29 self.events = EmitterGroup(
30 source=self,
31 auto_connect=True,
32 added=None,
33 removed=None,
34 reordered=None,
35 changed=None,
36 )
37 self.events.added.connect(self.events.changed)
38 self.events.removed.connect(self.events.changed)
39 self.events.reordered.connect(self.events.changed)
40
41 def __setitem__(self, query, values):
42 indices = tuple(self.__prsitem__(query))
43 new_indices = tuple(values)
44
45 if sorted(indices) != sorted(self.index(v) for v in new_indices):
46 raise TypeError(
47 'must be a reordering of indices; '
48 'setting of list items not allowed'
49 )
50
51 super().__setitem__(indices, new_indices)
52 self.events.reordered()
53
54 def insert(self, index, obj):
55 super().insert(index, obj)
56 self.events.added(item=obj, index=self.__locitem__(index))
57
58 def append(self, obj):
59 super(TypedList, self).append(obj)
60 self.events.added(item=obj, index=len(self) - 1)
61
62 def pop(self, key):
63 obj = super().pop(key)
64 self.events.removed(item=obj, index=key)
65 return obj
66
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/napari/utils/list/_model.py b/napari/utils/list/_model.py
--- a/napari/utils/list/_model.py
+++ b/napari/utils/list/_model.py
@@ -56,7 +56,7 @@
self.events.added(item=obj, index=self.__locitem__(index))
def append(self, obj):
- super(TypedList, self).append(obj)
+ TypedList.append(self, obj)
self.events.added(item=obj, index=len(self) - 1)
def pop(self, key):
| {"golden_diff": "diff --git a/napari/utils/list/_model.py b/napari/utils/list/_model.py\n--- a/napari/utils/list/_model.py\n+++ b/napari/utils/list/_model.py\n@@ -56,7 +56,7 @@\n self.events.added(item=obj, index=self.__locitem__(index))\n \n def append(self, obj):\n- super(TypedList, self).append(obj)\n+ TypedList.append(self, obj)\n self.events.added(item=obj, index=len(self) - 1)\n \n def pop(self, key):\n", "issue": "ListModel.append does not check type\n## \ud83d\udc1b Bug\r\nin working on layer groups, I found a strange lack of type checking when appending to a `ListModel` (which inherits from `TypedList`). [`ListModel.append`](https://github.com/napari/napari/blob/59ed366e9d492a2389c451468fd8b9f96508b4e2/napari/utils/list/_model.py#L59) jumps right over `TypedList.append`\r\nhttps://github.com/napari/napari/blob/59ed366e9d492a2389c451468fd8b9f96508b4e2/napari/utils/list/_model.py#L58-L60\r\n\r\n... and if you try to something that is not a `Layer` to a `LayerList`, it works fine up until throwing an error (unrelated to typing) in `components.layerlist._add`. Is that supposed to be `TypedList.append(self, obj)`? or was that intentional?\n", "before_files": [{"content": "from ...utils.event import EmitterGroup\n\nfrom ._multi import MultiIndexList\nfrom ._typed import TypedList\n\n\nclass ListModel(MultiIndexList, TypedList):\n \"\"\"List with events, tuple-indexing, typing, and filtering.\n\n Parameters\n ----------\n basetype : type\n Type of the elements in the list.\n iterable : iterable, optional\n Elements to initialize the list with.\n lookup : dict of type : function(object, ``basetype``) -> bool\n Functions that determine if an object is a reference to an\n element of the list.\n\n Attributes\n ----------\n events : vispy.util.event.EmitterGroup\n Group of events for adding, removing, and reordering elements\n within the list.\n \"\"\"\n\n def __init__(self, basetype, iterable=(), lookup=None):\n super().__init__(basetype, iterable, lookup)\n self.events = EmitterGroup(\n source=self,\n auto_connect=True,\n added=None,\n removed=None,\n reordered=None,\n changed=None,\n )\n self.events.added.connect(self.events.changed)\n self.events.removed.connect(self.events.changed)\n self.events.reordered.connect(self.events.changed)\n\n def __setitem__(self, query, values):\n indices = tuple(self.__prsitem__(query))\n new_indices = tuple(values)\n\n if sorted(indices) != sorted(self.index(v) for v in new_indices):\n raise TypeError(\n 'must be a reordering of indices; '\n 'setting of list items not allowed'\n )\n\n super().__setitem__(indices, new_indices)\n self.events.reordered()\n\n def insert(self, index, obj):\n super().insert(index, obj)\n self.events.added(item=obj, index=self.__locitem__(index))\n\n def append(self, obj):\n super(TypedList, self).append(obj)\n self.events.added(item=obj, index=len(self) - 1)\n\n def pop(self, key):\n obj = super().pop(key)\n self.events.removed(item=obj, index=key)\n return obj\n", "path": "napari/utils/list/_model.py"}], "after_files": [{"content": "from ...utils.event import EmitterGroup\n\nfrom ._multi import MultiIndexList\nfrom ._typed import TypedList\n\n\nclass ListModel(MultiIndexList, TypedList):\n \"\"\"List with events, tuple-indexing, typing, and filtering.\n\n Parameters\n ----------\n basetype : type\n Type of the elements in the list.\n iterable : iterable, optional\n Elements to initialize the list with.\n lookup : dict of type : function(object, ``basetype``) -> bool\n Functions that determine if an object is a reference to an\n element of the list.\n\n Attributes\n ----------\n events : vispy.util.event.EmitterGroup\n Group of events for adding, removing, and reordering elements\n within the list.\n \"\"\"\n\n def __init__(self, basetype, iterable=(), lookup=None):\n super().__init__(basetype, iterable, lookup)\n self.events = EmitterGroup(\n source=self,\n auto_connect=True,\n added=None,\n removed=None,\n reordered=None,\n changed=None,\n )\n self.events.added.connect(self.events.changed)\n self.events.removed.connect(self.events.changed)\n self.events.reordered.connect(self.events.changed)\n\n def __setitem__(self, query, values):\n indices = tuple(self.__prsitem__(query))\n new_indices = tuple(values)\n\n if sorted(indices) != sorted(self.index(v) for v in new_indices):\n raise TypeError(\n 'must be a reordering of indices; '\n 'setting of list items not allowed'\n )\n\n super().__setitem__(indices, new_indices)\n self.events.reordered()\n\n def insert(self, index, obj):\n super().insert(index, obj)\n self.events.added(item=obj, index=self.__locitem__(index))\n\n def append(self, obj):\n TypedList.append(self, obj)\n self.events.added(item=obj, index=len(self) - 1)\n\n def pop(self, key):\n obj = super().pop(key)\n self.events.removed(item=obj, index=key)\n return obj\n", "path": "napari/utils/list/_model.py"}]} | 1,085 | 125 |
gh_patches_debug_14048 | rasdani/github-patches | git_diff | ibis-project__ibis-8094 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bug: non-string Categorical from pandas crash memtable()
### What happened?
```python
import ibis
import pandas as pd
df = pd.DataFrame(
{
"cat_string": pd.Series(["a", "b", "c"], dtype="category"),
"cat_int": pd.Series([1, 2, 3], dtype="category"),
}
)
ibis.memtable(df).execute()
# ArrowTypeError: ('Expected a string or bytes dtype, got int64', 'Conversion failed for column cat_int with type category')
```
### What version of ibis are you using?
main (5998143f09add788bb569b9e3ea091240ba00a26, Jan 22, 2024)
### What backend(s) are you using, if any?
duckdb
### Relevant log output
_No response_
### Code of Conduct
- [X] I agree to follow this project's Code of Conduct
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ibis/formats/pandas.py`
Content:
```
1 from __future__ import annotations
2
3 import contextlib
4 import datetime
5 import warnings
6
7 import numpy as np
8 import pandas as pd
9 import pandas.api.types as pdt
10 import pyarrow as pa
11
12 import ibis.expr.datatypes as dt
13 import ibis.expr.schema as sch
14 from ibis.common.temporal import normalize_timezone
15 from ibis.formats import DataMapper, SchemaMapper, TableProxy
16 from ibis.formats.numpy import NumpyType
17 from ibis.formats.pyarrow import PyArrowData, PyArrowSchema, PyArrowType
18
19 _has_arrow_dtype = hasattr(pd, "ArrowDtype")
20
21 if not _has_arrow_dtype:
22 warnings.warn(
23 f"The `ArrowDtype` class is not available in pandas {pd.__version__}. "
24 "Install pandas >= 1.5.0 for interop with pandas and arrow dtype support"
25 )
26
27
28 class PandasType(NumpyType):
29 @classmethod
30 def to_ibis(cls, typ, nullable=True):
31 if isinstance(typ, pdt.DatetimeTZDtype):
32 return dt.Timestamp(timezone=str(typ.tz), nullable=nullable)
33 elif pdt.is_datetime64_dtype(typ):
34 return dt.Timestamp(nullable=nullable)
35 elif isinstance(typ, pdt.CategoricalDtype):
36 return dt.String(nullable=nullable)
37 elif pdt.is_extension_array_dtype(typ):
38 if _has_arrow_dtype and isinstance(typ, pd.ArrowDtype):
39 return PyArrowType.to_ibis(typ.pyarrow_dtype, nullable=nullable)
40 else:
41 name = typ.__class__.__name__.replace("Dtype", "")
42 klass = getattr(dt, name)
43 return klass(nullable=nullable)
44 else:
45 return super().to_ibis(typ, nullable=nullable)
46
47 @classmethod
48 def from_ibis(cls, dtype):
49 if dtype.is_timestamp() and dtype.timezone:
50 return pdt.DatetimeTZDtype("ns", dtype.timezone)
51 elif dtype.is_interval():
52 return np.dtype(f"timedelta64[{dtype.unit.short}]")
53 else:
54 return super().from_ibis(dtype)
55
56
57 class PandasSchema(SchemaMapper):
58 @classmethod
59 def to_ibis(cls, pandas_schema):
60 if isinstance(pandas_schema, pd.Series):
61 pandas_schema = pandas_schema.to_list()
62
63 fields = {name: PandasType.to_ibis(t) for name, t in pandas_schema}
64
65 return sch.Schema(fields)
66
67 @classmethod
68 def from_ibis(cls, schema):
69 names = schema.names
70 types = [PandasType.from_ibis(t) for t in schema.types]
71 return list(zip(names, types))
72
73
74 class PandasData(DataMapper):
75 @classmethod
76 def infer_scalar(cls, s):
77 return PyArrowData.infer_scalar(s)
78
79 @classmethod
80 def infer_column(cls, s):
81 return PyArrowData.infer_column(s)
82
83 @classmethod
84 def infer_table(cls, df, schema=None):
85 schema = schema if schema is not None else {}
86
87 pairs = []
88 for column_name in df.dtypes.keys():
89 if not isinstance(column_name, str):
90 raise TypeError(
91 "Column names must be strings to use the pandas backend"
92 )
93
94 if column_name in schema:
95 ibis_dtype = schema[column_name]
96 else:
97 pandas_column = df[column_name]
98 pandas_dtype = pandas_column.dtype
99 if pandas_dtype == np.object_:
100 ibis_dtype = cls.infer_column(pandas_column)
101 else:
102 ibis_dtype = PandasType.to_ibis(pandas_dtype)
103
104 pairs.append((column_name, ibis_dtype))
105
106 return sch.Schema.from_tuples(pairs)
107
108 @classmethod
109 def convert_table(cls, df, schema):
110 if len(schema) != len(df.columns):
111 raise ValueError(
112 "schema column count does not match input data column count"
113 )
114
115 for (name, series), dtype in zip(df.items(), schema.types):
116 df[name] = cls.convert_column(series, dtype)
117
118 # return data with the schema's columns which may be different than the
119 # input columns
120 df.columns = schema.names
121 return df
122
123 @classmethod
124 def convert_column(cls, obj, dtype):
125 pandas_type = PandasType.from_ibis(dtype)
126
127 if obj.dtype == pandas_type and dtype.is_primitive():
128 return obj
129
130 method_name = f"convert_{dtype.__class__.__name__}"
131 convert_method = getattr(cls, method_name, cls.convert_default)
132
133 result = convert_method(obj, dtype, pandas_type)
134 assert not isinstance(result, np.ndarray), f"{convert_method} -> {type(result)}"
135 return result
136
137 @classmethod
138 def convert_scalar(cls, obj, dtype):
139 df = PandasData.convert_table(obj, sch.Schema({obj.columns[0]: dtype}))
140 return df.iat[0, 0]
141
142 @classmethod
143 def convert_GeoSpatial(cls, s, dtype, pandas_type):
144 return s
145
146 convert_Point = (
147 convert_LineString
148 ) = (
149 convert_Polygon
150 ) = (
151 convert_MultiLineString
152 ) = convert_MultiPoint = convert_MultiPolygon = convert_GeoSpatial
153
154 @classmethod
155 def convert_default(cls, s, dtype, pandas_type):
156 try:
157 return s.astype(pandas_type)
158 except Exception: # noqa: BLE001
159 return s
160
161 @classmethod
162 def convert_Boolean(cls, s, dtype, pandas_type):
163 if s.empty:
164 return s.astype(pandas_type)
165 elif pdt.is_object_dtype(s.dtype):
166 return s
167 elif s.dtype != pandas_type:
168 return s.map(bool, na_action="ignore")
169 else:
170 return s
171
172 @classmethod
173 def convert_Timestamp(cls, s, dtype, pandas_type):
174 if isinstance(dtype, pd.DatetimeTZDtype):
175 return s.dt.tz_convert(dtype.timezone)
176 elif pdt.is_datetime64_dtype(s.dtype):
177 return s.dt.tz_localize(dtype.timezone)
178 else:
179 try:
180 return s.astype(pandas_type)
181 except pd.errors.OutOfBoundsDatetime: # uncovered
182 try:
183 from dateutil.parser import parse as date_parse
184
185 return s.map(date_parse, na_action="ignore")
186 except TypeError:
187 return s
188 except TypeError:
189 try:
190 return pd.to_datetime(s).dt.tz_convert(dtype.timezone)
191 except TypeError:
192 return pd.to_datetime(s).dt.tz_localize(dtype.timezone)
193
194 @classmethod
195 def convert_Date(cls, s, dtype, pandas_type):
196 if isinstance(s.dtype, pd.DatetimeTZDtype):
197 s = s.dt.tz_convert("UTC").dt.tz_localize(None)
198 try:
199 return s.astype(pandas_type).dt.date
200 except (TypeError, pd._libs.tslibs.OutOfBoundsDatetime):
201
202 def try_date(v):
203 if isinstance(v, datetime.datetime):
204 return v.date()
205 elif isinstance(v, str):
206 return datetime.date.fromisoformat(v)
207 else:
208 return v
209
210 return s.map(try_date, na_action="ignore")
211
212 @classmethod
213 def convert_Interval(cls, s, dtype, pandas_type):
214 values = s.values
215 try:
216 result = values.astype(pandas_type)
217 except ValueError: # can happen when `column` is DateOffsets # uncovered
218 result = s
219 else:
220 result = s.__class__(result, index=s.index, name=s.name)
221 return result
222
223 @classmethod
224 def convert_String(cls, s, dtype, pandas_type):
225 return s.astype(pandas_type, errors="ignore")
226
227 @classmethod
228 def convert_UUID(cls, s, dtype, pandas_type):
229 return s.map(cls.get_element_converter(dtype), na_action="ignore")
230
231 @classmethod
232 def convert_Struct(cls, s, dtype, pandas_type):
233 return s.map(cls.get_element_converter(dtype), na_action="ignore")
234
235 @classmethod
236 def convert_Array(cls, s, dtype, pandas_type):
237 return s.map(cls.get_element_converter(dtype), na_action="ignore")
238
239 @classmethod
240 def convert_Map(cls, s, dtype, pandas_type):
241 return s.map(cls.get_element_converter(dtype), na_action="ignore")
242
243 @classmethod
244 def convert_JSON(cls, s, dtype, pandas_type):
245 return s.map(cls.get_element_converter(dtype), na_action="ignore").astype(
246 "object"
247 )
248
249 @classmethod
250 def get_element_converter(cls, dtype):
251 name = f"convert_{type(dtype).__name__}_element"
252 funcgen = getattr(cls, name, lambda _: lambda x: x)
253 return funcgen(dtype)
254
255 @classmethod
256 def convert_Struct_element(cls, dtype):
257 converters = tuple(map(cls.get_element_converter, dtype.types))
258
259 def convert(values, names=dtype.names, converters=converters):
260 if values is None:
261 return values
262
263 items = values.items() if isinstance(values, dict) else zip(names, values)
264 return {
265 k: converter(v) if v is not None else v
266 for converter, (k, v) in zip(converters, items)
267 }
268
269 return convert
270
271 @classmethod
272 def convert_JSON_element(cls, _):
273 import json
274
275 def convert(value):
276 if value is None:
277 return value
278 try:
279 return json.loads(value)
280 except (TypeError, json.JSONDecodeError):
281 return value
282
283 return convert
284
285 @classmethod
286 def convert_Timestamp_element(cls, dtype):
287 def converter(value, dtype=dtype):
288 if value is None:
289 return value
290
291 with contextlib.suppress(AttributeError):
292 value = value.item()
293
294 if isinstance(value, int):
295 # this can only mean a numpy or pandas timestamp because they
296 # both support nanosecond precision
297 #
298 # when the precision is less than or equal to the value
299 # supported by Python datetime.dateimte a call to .item() will
300 # return a datetime.datetime but when the precision is higher
301 # than the value supported by Python the value is an integer
302 #
303 # TODO: can we do better than implicit truncation to microseconds?
304 value = datetime.datetime.utcfromtimestamp(value / 1e9)
305
306 if (tz := dtype.timezone) is not None:
307 return value.astimezone(normalize_timezone(tz))
308
309 return value.replace(tzinfo=None)
310
311 return converter
312
313 @classmethod
314 def convert_Array_element(cls, dtype):
315 convert_value = cls.get_element_converter(dtype.value_type)
316
317 def convert(values):
318 if values is None:
319 return values
320
321 return [
322 convert_value(value) if value is not None else value for value in values
323 ]
324
325 return convert
326
327 @classmethod
328 def convert_Map_element(cls, dtype):
329 convert_key = cls.get_element_converter(dtype.key_type)
330 convert_value = cls.get_element_converter(dtype.value_type)
331
332 def convert(raw_row):
333 if raw_row is None:
334 return raw_row
335
336 row = dict(raw_row)
337 return dict(
338 zip(map(convert_key, row.keys()), map(convert_value, row.values()))
339 )
340
341 return convert
342
343 @classmethod
344 def convert_UUID_element(cls, _):
345 from uuid import UUID
346
347 def convert(value):
348 if value is None:
349 return value
350 elif isinstance(value, UUID):
351 return value
352 return UUID(value)
353
354 return convert
355
356
357 class DaskData(PandasData):
358 @classmethod
359 def infer_column(cls, s):
360 return PyArrowData.infer_column(s.compute())
361
362
363 class PandasDataFrameProxy(TableProxy[pd.DataFrame]):
364 def to_frame(self) -> pd.DataFrame:
365 return self.obj
366
367 def to_pyarrow(self, schema: sch.Schema) -> pa.Table:
368 pyarrow_schema = PyArrowSchema.from_ibis(schema)
369 return pa.Table.from_pandas(self.obj, schema=pyarrow_schema)
370
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ibis/formats/pandas.py b/ibis/formats/pandas.py
--- a/ibis/formats/pandas.py
+++ b/ibis/formats/pandas.py
@@ -33,7 +33,9 @@
elif pdt.is_datetime64_dtype(typ):
return dt.Timestamp(nullable=nullable)
elif isinstance(typ, pdt.CategoricalDtype):
- return dt.String(nullable=nullable)
+ if typ.categories is None or pdt.is_string_dtype(typ.categories):
+ return dt.String(nullable=nullable)
+ return cls.to_ibis(typ.categories.dtype, nullable=nullable)
elif pdt.is_extension_array_dtype(typ):
if _has_arrow_dtype and isinstance(typ, pd.ArrowDtype):
return PyArrowType.to_ibis(typ.pyarrow_dtype, nullable=nullable)
| {"golden_diff": "diff --git a/ibis/formats/pandas.py b/ibis/formats/pandas.py\n--- a/ibis/formats/pandas.py\n+++ b/ibis/formats/pandas.py\n@@ -33,7 +33,9 @@\n elif pdt.is_datetime64_dtype(typ):\n return dt.Timestamp(nullable=nullable)\n elif isinstance(typ, pdt.CategoricalDtype):\n- return dt.String(nullable=nullable)\n+ if typ.categories is None or pdt.is_string_dtype(typ.categories):\n+ return dt.String(nullable=nullable)\n+ return cls.to_ibis(typ.categories.dtype, nullable=nullable)\n elif pdt.is_extension_array_dtype(typ):\n if _has_arrow_dtype and isinstance(typ, pd.ArrowDtype):\n return PyArrowType.to_ibis(typ.pyarrow_dtype, nullable=nullable)\n", "issue": "bug: non-string Categorical from pandas crash memtable()\n### What happened?\n\n```python\r\nimport ibis\r\nimport pandas as pd\r\n\r\ndf = pd.DataFrame(\r\n {\r\n \"cat_string\": pd.Series([\"a\", \"b\", \"c\"], dtype=\"category\"),\r\n \"cat_int\": pd.Series([1, 2, 3], dtype=\"category\"),\r\n }\r\n)\r\nibis.memtable(df).execute()\r\n# ArrowTypeError: ('Expected a string or bytes dtype, got int64', 'Conversion failed for column cat_int with type category')\r\n```\n\n### What version of ibis are you using?\n\nmain (5998143f09add788bb569b9e3ea091240ba00a26, Jan 22, 2024)\n\n### What backend(s) are you using, if any?\n\nduckdb\n\n### Relevant log output\n\n_No response_\n\n### Code of Conduct\n\n- [X] I agree to follow this project's Code of Conduct\n", "before_files": [{"content": "from __future__ import annotations\n\nimport contextlib\nimport datetime\nimport warnings\n\nimport numpy as np\nimport pandas as pd\nimport pandas.api.types as pdt\nimport pyarrow as pa\n\nimport ibis.expr.datatypes as dt\nimport ibis.expr.schema as sch\nfrom ibis.common.temporal import normalize_timezone\nfrom ibis.formats import DataMapper, SchemaMapper, TableProxy\nfrom ibis.formats.numpy import NumpyType\nfrom ibis.formats.pyarrow import PyArrowData, PyArrowSchema, PyArrowType\n\n_has_arrow_dtype = hasattr(pd, \"ArrowDtype\")\n\nif not _has_arrow_dtype:\n warnings.warn(\n f\"The `ArrowDtype` class is not available in pandas {pd.__version__}. \"\n \"Install pandas >= 1.5.0 for interop with pandas and arrow dtype support\"\n )\n\n\nclass PandasType(NumpyType):\n @classmethod\n def to_ibis(cls, typ, nullable=True):\n if isinstance(typ, pdt.DatetimeTZDtype):\n return dt.Timestamp(timezone=str(typ.tz), nullable=nullable)\n elif pdt.is_datetime64_dtype(typ):\n return dt.Timestamp(nullable=nullable)\n elif isinstance(typ, pdt.CategoricalDtype):\n return dt.String(nullable=nullable)\n elif pdt.is_extension_array_dtype(typ):\n if _has_arrow_dtype and isinstance(typ, pd.ArrowDtype):\n return PyArrowType.to_ibis(typ.pyarrow_dtype, nullable=nullable)\n else:\n name = typ.__class__.__name__.replace(\"Dtype\", \"\")\n klass = getattr(dt, name)\n return klass(nullable=nullable)\n else:\n return super().to_ibis(typ, nullable=nullable)\n\n @classmethod\n def from_ibis(cls, dtype):\n if dtype.is_timestamp() and dtype.timezone:\n return pdt.DatetimeTZDtype(\"ns\", dtype.timezone)\n elif dtype.is_interval():\n return np.dtype(f\"timedelta64[{dtype.unit.short}]\")\n else:\n return super().from_ibis(dtype)\n\n\nclass PandasSchema(SchemaMapper):\n @classmethod\n def to_ibis(cls, pandas_schema):\n if isinstance(pandas_schema, pd.Series):\n pandas_schema = pandas_schema.to_list()\n\n fields = {name: PandasType.to_ibis(t) for name, t in pandas_schema}\n\n return sch.Schema(fields)\n\n @classmethod\n def from_ibis(cls, schema):\n names = schema.names\n types = [PandasType.from_ibis(t) for t in schema.types]\n return list(zip(names, types))\n\n\nclass PandasData(DataMapper):\n @classmethod\n def infer_scalar(cls, s):\n return PyArrowData.infer_scalar(s)\n\n @classmethod\n def infer_column(cls, s):\n return PyArrowData.infer_column(s)\n\n @classmethod\n def infer_table(cls, df, schema=None):\n schema = schema if schema is not None else {}\n\n pairs = []\n for column_name in df.dtypes.keys():\n if not isinstance(column_name, str):\n raise TypeError(\n \"Column names must be strings to use the pandas backend\"\n )\n\n if column_name in schema:\n ibis_dtype = schema[column_name]\n else:\n pandas_column = df[column_name]\n pandas_dtype = pandas_column.dtype\n if pandas_dtype == np.object_:\n ibis_dtype = cls.infer_column(pandas_column)\n else:\n ibis_dtype = PandasType.to_ibis(pandas_dtype)\n\n pairs.append((column_name, ibis_dtype))\n\n return sch.Schema.from_tuples(pairs)\n\n @classmethod\n def convert_table(cls, df, schema):\n if len(schema) != len(df.columns):\n raise ValueError(\n \"schema column count does not match input data column count\"\n )\n\n for (name, series), dtype in zip(df.items(), schema.types):\n df[name] = cls.convert_column(series, dtype)\n\n # return data with the schema's columns which may be different than the\n # input columns\n df.columns = schema.names\n return df\n\n @classmethod\n def convert_column(cls, obj, dtype):\n pandas_type = PandasType.from_ibis(dtype)\n\n if obj.dtype == pandas_type and dtype.is_primitive():\n return obj\n\n method_name = f\"convert_{dtype.__class__.__name__}\"\n convert_method = getattr(cls, method_name, cls.convert_default)\n\n result = convert_method(obj, dtype, pandas_type)\n assert not isinstance(result, np.ndarray), f\"{convert_method} -> {type(result)}\"\n return result\n\n @classmethod\n def convert_scalar(cls, obj, dtype):\n df = PandasData.convert_table(obj, sch.Schema({obj.columns[0]: dtype}))\n return df.iat[0, 0]\n\n @classmethod\n def convert_GeoSpatial(cls, s, dtype, pandas_type):\n return s\n\n convert_Point = (\n convert_LineString\n ) = (\n convert_Polygon\n ) = (\n convert_MultiLineString\n ) = convert_MultiPoint = convert_MultiPolygon = convert_GeoSpatial\n\n @classmethod\n def convert_default(cls, s, dtype, pandas_type):\n try:\n return s.astype(pandas_type)\n except Exception: # noqa: BLE001\n return s\n\n @classmethod\n def convert_Boolean(cls, s, dtype, pandas_type):\n if s.empty:\n return s.astype(pandas_type)\n elif pdt.is_object_dtype(s.dtype):\n return s\n elif s.dtype != pandas_type:\n return s.map(bool, na_action=\"ignore\")\n else:\n return s\n\n @classmethod\n def convert_Timestamp(cls, s, dtype, pandas_type):\n if isinstance(dtype, pd.DatetimeTZDtype):\n return s.dt.tz_convert(dtype.timezone)\n elif pdt.is_datetime64_dtype(s.dtype):\n return s.dt.tz_localize(dtype.timezone)\n else:\n try:\n return s.astype(pandas_type)\n except pd.errors.OutOfBoundsDatetime: # uncovered\n try:\n from dateutil.parser import parse as date_parse\n\n return s.map(date_parse, na_action=\"ignore\")\n except TypeError:\n return s\n except TypeError:\n try:\n return pd.to_datetime(s).dt.tz_convert(dtype.timezone)\n except TypeError:\n return pd.to_datetime(s).dt.tz_localize(dtype.timezone)\n\n @classmethod\n def convert_Date(cls, s, dtype, pandas_type):\n if isinstance(s.dtype, pd.DatetimeTZDtype):\n s = s.dt.tz_convert(\"UTC\").dt.tz_localize(None)\n try:\n return s.astype(pandas_type).dt.date\n except (TypeError, pd._libs.tslibs.OutOfBoundsDatetime):\n\n def try_date(v):\n if isinstance(v, datetime.datetime):\n return v.date()\n elif isinstance(v, str):\n return datetime.date.fromisoformat(v)\n else:\n return v\n\n return s.map(try_date, na_action=\"ignore\")\n\n @classmethod\n def convert_Interval(cls, s, dtype, pandas_type):\n values = s.values\n try:\n result = values.astype(pandas_type)\n except ValueError: # can happen when `column` is DateOffsets # uncovered\n result = s\n else:\n result = s.__class__(result, index=s.index, name=s.name)\n return result\n\n @classmethod\n def convert_String(cls, s, dtype, pandas_type):\n return s.astype(pandas_type, errors=\"ignore\")\n\n @classmethod\n def convert_UUID(cls, s, dtype, pandas_type):\n return s.map(cls.get_element_converter(dtype), na_action=\"ignore\")\n\n @classmethod\n def convert_Struct(cls, s, dtype, pandas_type):\n return s.map(cls.get_element_converter(dtype), na_action=\"ignore\")\n\n @classmethod\n def convert_Array(cls, s, dtype, pandas_type):\n return s.map(cls.get_element_converter(dtype), na_action=\"ignore\")\n\n @classmethod\n def convert_Map(cls, s, dtype, pandas_type):\n return s.map(cls.get_element_converter(dtype), na_action=\"ignore\")\n\n @classmethod\n def convert_JSON(cls, s, dtype, pandas_type):\n return s.map(cls.get_element_converter(dtype), na_action=\"ignore\").astype(\n \"object\"\n )\n\n @classmethod\n def get_element_converter(cls, dtype):\n name = f\"convert_{type(dtype).__name__}_element\"\n funcgen = getattr(cls, name, lambda _: lambda x: x)\n return funcgen(dtype)\n\n @classmethod\n def convert_Struct_element(cls, dtype):\n converters = tuple(map(cls.get_element_converter, dtype.types))\n\n def convert(values, names=dtype.names, converters=converters):\n if values is None:\n return values\n\n items = values.items() if isinstance(values, dict) else zip(names, values)\n return {\n k: converter(v) if v is not None else v\n for converter, (k, v) in zip(converters, items)\n }\n\n return convert\n\n @classmethod\n def convert_JSON_element(cls, _):\n import json\n\n def convert(value):\n if value is None:\n return value\n try:\n return json.loads(value)\n except (TypeError, json.JSONDecodeError):\n return value\n\n return convert\n\n @classmethod\n def convert_Timestamp_element(cls, dtype):\n def converter(value, dtype=dtype):\n if value is None:\n return value\n\n with contextlib.suppress(AttributeError):\n value = value.item()\n\n if isinstance(value, int):\n # this can only mean a numpy or pandas timestamp because they\n # both support nanosecond precision\n #\n # when the precision is less than or equal to the value\n # supported by Python datetime.dateimte a call to .item() will\n # return a datetime.datetime but when the precision is higher\n # than the value supported by Python the value is an integer\n #\n # TODO: can we do better than implicit truncation to microseconds?\n value = datetime.datetime.utcfromtimestamp(value / 1e9)\n\n if (tz := dtype.timezone) is not None:\n return value.astimezone(normalize_timezone(tz))\n\n return value.replace(tzinfo=None)\n\n return converter\n\n @classmethod\n def convert_Array_element(cls, dtype):\n convert_value = cls.get_element_converter(dtype.value_type)\n\n def convert(values):\n if values is None:\n return values\n\n return [\n convert_value(value) if value is not None else value for value in values\n ]\n\n return convert\n\n @classmethod\n def convert_Map_element(cls, dtype):\n convert_key = cls.get_element_converter(dtype.key_type)\n convert_value = cls.get_element_converter(dtype.value_type)\n\n def convert(raw_row):\n if raw_row is None:\n return raw_row\n\n row = dict(raw_row)\n return dict(\n zip(map(convert_key, row.keys()), map(convert_value, row.values()))\n )\n\n return convert\n\n @classmethod\n def convert_UUID_element(cls, _):\n from uuid import UUID\n\n def convert(value):\n if value is None:\n return value\n elif isinstance(value, UUID):\n return value\n return UUID(value)\n\n return convert\n\n\nclass DaskData(PandasData):\n @classmethod\n def infer_column(cls, s):\n return PyArrowData.infer_column(s.compute())\n\n\nclass PandasDataFrameProxy(TableProxy[pd.DataFrame]):\n def to_frame(self) -> pd.DataFrame:\n return self.obj\n\n def to_pyarrow(self, schema: sch.Schema) -> pa.Table:\n pyarrow_schema = PyArrowSchema.from_ibis(schema)\n return pa.Table.from_pandas(self.obj, schema=pyarrow_schema)\n", "path": "ibis/formats/pandas.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport contextlib\nimport datetime\nimport warnings\n\nimport numpy as np\nimport pandas as pd\nimport pandas.api.types as pdt\nimport pyarrow as pa\n\nimport ibis.expr.datatypes as dt\nimport ibis.expr.schema as sch\nfrom ibis.common.temporal import normalize_timezone\nfrom ibis.formats import DataMapper, SchemaMapper, TableProxy\nfrom ibis.formats.numpy import NumpyType\nfrom ibis.formats.pyarrow import PyArrowData, PyArrowSchema, PyArrowType\n\n_has_arrow_dtype = hasattr(pd, \"ArrowDtype\")\n\nif not _has_arrow_dtype:\n warnings.warn(\n f\"The `ArrowDtype` class is not available in pandas {pd.__version__}. \"\n \"Install pandas >= 1.5.0 for interop with pandas and arrow dtype support\"\n )\n\n\nclass PandasType(NumpyType):\n @classmethod\n def to_ibis(cls, typ, nullable=True):\n if isinstance(typ, pdt.DatetimeTZDtype):\n return dt.Timestamp(timezone=str(typ.tz), nullable=nullable)\n elif pdt.is_datetime64_dtype(typ):\n return dt.Timestamp(nullable=nullable)\n elif isinstance(typ, pdt.CategoricalDtype):\n if typ.categories is None or pdt.is_string_dtype(typ.categories):\n return dt.String(nullable=nullable)\n return cls.to_ibis(typ.categories.dtype, nullable=nullable)\n elif pdt.is_extension_array_dtype(typ):\n if _has_arrow_dtype and isinstance(typ, pd.ArrowDtype):\n return PyArrowType.to_ibis(typ.pyarrow_dtype, nullable=nullable)\n else:\n name = typ.__class__.__name__.replace(\"Dtype\", \"\")\n klass = getattr(dt, name)\n return klass(nullable=nullable)\n else:\n return super().to_ibis(typ, nullable=nullable)\n\n @classmethod\n def from_ibis(cls, dtype):\n if dtype.is_timestamp() and dtype.timezone:\n return pdt.DatetimeTZDtype(\"ns\", dtype.timezone)\n elif dtype.is_interval():\n return np.dtype(f\"timedelta64[{dtype.unit.short}]\")\n else:\n return super().from_ibis(dtype)\n\n\nclass PandasSchema(SchemaMapper):\n @classmethod\n def to_ibis(cls, pandas_schema):\n if isinstance(pandas_schema, pd.Series):\n pandas_schema = pandas_schema.to_list()\n\n fields = {name: PandasType.to_ibis(t) for name, t in pandas_schema}\n\n return sch.Schema(fields)\n\n @classmethod\n def from_ibis(cls, schema):\n names = schema.names\n types = [PandasType.from_ibis(t) for t in schema.types]\n return list(zip(names, types))\n\n\nclass PandasData(DataMapper):\n @classmethod\n def infer_scalar(cls, s):\n return PyArrowData.infer_scalar(s)\n\n @classmethod\n def infer_column(cls, s):\n return PyArrowData.infer_column(s)\n\n @classmethod\n def infer_table(cls, df, schema=None):\n schema = schema if schema is not None else {}\n\n pairs = []\n for column_name in df.dtypes.keys():\n if not isinstance(column_name, str):\n raise TypeError(\n \"Column names must be strings to use the pandas backend\"\n )\n\n if column_name in schema:\n ibis_dtype = schema[column_name]\n else:\n pandas_column = df[column_name]\n pandas_dtype = pandas_column.dtype\n if pandas_dtype == np.object_:\n ibis_dtype = cls.infer_column(pandas_column)\n else:\n ibis_dtype = PandasType.to_ibis(pandas_dtype)\n\n pairs.append((column_name, ibis_dtype))\n\n return sch.Schema.from_tuples(pairs)\n\n @classmethod\n def convert_table(cls, df, schema):\n if len(schema) != len(df.columns):\n raise ValueError(\n \"schema column count does not match input data column count\"\n )\n\n for (name, series), dtype in zip(df.items(), schema.types):\n df[name] = cls.convert_column(series, dtype)\n\n # return data with the schema's columns which may be different than the\n # input columns\n df.columns = schema.names\n return df\n\n @classmethod\n def convert_column(cls, obj, dtype):\n pandas_type = PandasType.from_ibis(dtype)\n\n if obj.dtype == pandas_type and dtype.is_primitive():\n return obj\n\n method_name = f\"convert_{dtype.__class__.__name__}\"\n convert_method = getattr(cls, method_name, cls.convert_default)\n\n result = convert_method(obj, dtype, pandas_type)\n assert not isinstance(result, np.ndarray), f\"{convert_method} -> {type(result)}\"\n return result\n\n @classmethod\n def convert_scalar(cls, obj, dtype):\n df = PandasData.convert_table(obj, sch.Schema({obj.columns[0]: dtype}))\n return df.iat[0, 0]\n\n @classmethod\n def convert_GeoSpatial(cls, s, dtype, pandas_type):\n return s\n\n convert_Point = (\n convert_LineString\n ) = (\n convert_Polygon\n ) = (\n convert_MultiLineString\n ) = convert_MultiPoint = convert_MultiPolygon = convert_GeoSpatial\n\n @classmethod\n def convert_default(cls, s, dtype, pandas_type):\n try:\n return s.astype(pandas_type)\n except Exception: # noqa: BLE001\n return s\n\n @classmethod\n def convert_Boolean(cls, s, dtype, pandas_type):\n if s.empty:\n return s.astype(pandas_type)\n elif pdt.is_object_dtype(s.dtype):\n return s\n elif s.dtype != pandas_type:\n return s.map(bool, na_action=\"ignore\")\n else:\n return s\n\n @classmethod\n def convert_Timestamp(cls, s, dtype, pandas_type):\n if isinstance(dtype, pd.DatetimeTZDtype):\n return s.dt.tz_convert(dtype.timezone)\n elif pdt.is_datetime64_dtype(s.dtype):\n return s.dt.tz_localize(dtype.timezone)\n else:\n try:\n return s.astype(pandas_type)\n except pd.errors.OutOfBoundsDatetime: # uncovered\n try:\n from dateutil.parser import parse as date_parse\n\n return s.map(date_parse, na_action=\"ignore\")\n except TypeError:\n return s\n except TypeError:\n try:\n return pd.to_datetime(s).dt.tz_convert(dtype.timezone)\n except TypeError:\n return pd.to_datetime(s).dt.tz_localize(dtype.timezone)\n\n @classmethod\n def convert_Date(cls, s, dtype, pandas_type):\n if isinstance(s.dtype, pd.DatetimeTZDtype):\n s = s.dt.tz_convert(\"UTC\").dt.tz_localize(None)\n try:\n return s.astype(pandas_type).dt.date\n except (TypeError, pd._libs.tslibs.OutOfBoundsDatetime):\n\n def try_date(v):\n if isinstance(v, datetime.datetime):\n return v.date()\n elif isinstance(v, str):\n return datetime.date.fromisoformat(v)\n else:\n return v\n\n return s.map(try_date, na_action=\"ignore\")\n\n @classmethod\n def convert_Interval(cls, s, dtype, pandas_type):\n values = s.values\n try:\n result = values.astype(pandas_type)\n except ValueError: # can happen when `column` is DateOffsets # uncovered\n result = s\n else:\n result = s.__class__(result, index=s.index, name=s.name)\n return result\n\n @classmethod\n def convert_String(cls, s, dtype, pandas_type):\n return s.astype(pandas_type, errors=\"ignore\")\n\n @classmethod\n def convert_UUID(cls, s, dtype, pandas_type):\n return s.map(cls.get_element_converter(dtype), na_action=\"ignore\")\n\n @classmethod\n def convert_Struct(cls, s, dtype, pandas_type):\n return s.map(cls.get_element_converter(dtype), na_action=\"ignore\")\n\n @classmethod\n def convert_Array(cls, s, dtype, pandas_type):\n return s.map(cls.get_element_converter(dtype), na_action=\"ignore\")\n\n @classmethod\n def convert_Map(cls, s, dtype, pandas_type):\n return s.map(cls.get_element_converter(dtype), na_action=\"ignore\")\n\n @classmethod\n def convert_JSON(cls, s, dtype, pandas_type):\n return s.map(cls.get_element_converter(dtype), na_action=\"ignore\").astype(\n \"object\"\n )\n\n @classmethod\n def get_element_converter(cls, dtype):\n name = f\"convert_{type(dtype).__name__}_element\"\n funcgen = getattr(cls, name, lambda _: lambda x: x)\n return funcgen(dtype)\n\n @classmethod\n def convert_Struct_element(cls, dtype):\n converters = tuple(map(cls.get_element_converter, dtype.types))\n\n def convert(values, names=dtype.names, converters=converters):\n if values is None:\n return values\n\n items = values.items() if isinstance(values, dict) else zip(names, values)\n return {\n k: converter(v) if v is not None else v\n for converter, (k, v) in zip(converters, items)\n }\n\n return convert\n\n @classmethod\n def convert_JSON_element(cls, _):\n import json\n\n def convert(value):\n if value is None:\n return value\n try:\n return json.loads(value)\n except (TypeError, json.JSONDecodeError):\n return value\n\n return convert\n\n @classmethod\n def convert_Timestamp_element(cls, dtype):\n def converter(value, dtype=dtype):\n if value is None:\n return value\n\n with contextlib.suppress(AttributeError):\n value = value.item()\n\n if isinstance(value, int):\n # this can only mean a numpy or pandas timestamp because they\n # both support nanosecond precision\n #\n # when the precision is less than or equal to the value\n # supported by Python datetime.dateimte a call to .item() will\n # return a datetime.datetime but when the precision is higher\n # than the value supported by Python the value is an integer\n #\n # TODO: can we do better than implicit truncation to microseconds?\n value = datetime.datetime.utcfromtimestamp(value / 1e9)\n\n if (tz := dtype.timezone) is not None:\n return value.astimezone(normalize_timezone(tz))\n\n return value.replace(tzinfo=None)\n\n return converter\n\n @classmethod\n def convert_Array_element(cls, dtype):\n convert_value = cls.get_element_converter(dtype.value_type)\n\n def convert(values):\n if values is None:\n return values\n\n return [\n convert_value(value) if value is not None else value for value in values\n ]\n\n return convert\n\n @classmethod\n def convert_Map_element(cls, dtype):\n convert_key = cls.get_element_converter(dtype.key_type)\n convert_value = cls.get_element_converter(dtype.value_type)\n\n def convert(raw_row):\n if raw_row is None:\n return raw_row\n\n row = dict(raw_row)\n return dict(\n zip(map(convert_key, row.keys()), map(convert_value, row.values()))\n )\n\n return convert\n\n @classmethod\n def convert_UUID_element(cls, _):\n from uuid import UUID\n\n def convert(value):\n if value is None:\n return value\n elif isinstance(value, UUID):\n return value\n return UUID(value)\n\n return convert\n\n\nclass DaskData(PandasData):\n @classmethod\n def infer_column(cls, s):\n return PyArrowData.infer_column(s.compute())\n\n\nclass PandasDataFrameProxy(TableProxy[pd.DataFrame]):\n def to_frame(self) -> pd.DataFrame:\n return self.obj\n\n def to_pyarrow(self, schema: sch.Schema) -> pa.Table:\n pyarrow_schema = PyArrowSchema.from_ibis(schema)\n return pa.Table.from_pandas(self.obj, schema=pyarrow_schema)\n", "path": "ibis/formats/pandas.py"}]} | 4,071 | 179 |
gh_patches_debug_40266 | rasdani/github-patches | git_diff | ultrabug__py3status-1093 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
mpd_status creates a new socket connection every second
I'm not sure if this should be a bug report or a feature request, but I've been bitten by this, erm, _feature_: I use `mopidy` (mpd with spotify integration) and that gets confused when a request comes in from `mpd_status` while it processes a mpd-command-block.
This also keeps a lot of abandoned connections in `TIME_WAIT` state - 60 currently at my system.
I've had a quick look at the code for this module, and the connection i s esatablished in `current_track()`. I do not know if it is possible, but the best apporach (IMHO) would be to `connect()` once and keep the socket alive until `py3status` gets reloaded.
mpd_status creates a new socket connection every second
I'm not sure if this should be a bug report or a feature request, but I've been bitten by this, erm, _feature_: I use `mopidy` (mpd with spotify integration) and that gets confused when a request comes in from `mpd_status` while it processes a mpd-command-block.
This also keeps a lot of abandoned connections in `TIME_WAIT` state - 60 currently at my system.
I've had a quick look at the code for this module, and the connection i s esatablished in `current_track()`. I do not know if it is possible, but the best apporach (IMHO) would be to `connect()` once and keep the socket alive until `py3status` gets reloaded.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `py3status/modules/mpd_status.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 Display song currently playing in mpd.
4
5 Configuration parameters:
6 cache_timeout: how often we refresh this module in seconds (default 2)
7 format: template string (see below)
8 (default '{state} [[[{artist}] - {title}]|[{file}]]')
9 hide_when_paused: hide the status if state is paused (default False)
10 hide_when_stopped: hide the status if state is stopped (default True)
11 host: mpd host (default 'localhost')
12 max_width: maximum status length (default 120)
13 password: mpd password (default None)
14 port: mpd port (default '6600')
15 state_pause: label to display for "paused" state (default '[pause]')
16 state_play: label to display for "playing" state (default '[play]')
17 state_stop: label to display for "stopped" state (default '[stop]')
18
19 Color options:
20 color_pause: Paused, default color_degraded
21 color_play: Playing, default color_good
22 color_stop: Stopped, default color_bad
23
24 Format placeholders:
25 {state} state (paused, playing. stopped) can be defined via `state_..`
26 configuration parameters
27 Refer to the mpc(1) manual page for the list of available placeholders to
28 be used in the format. Placeholders should use braces `{}` rather than
29 percent `%%` eg `{artist}`.
30 Every placeholder can also be prefixed with
31 `next_` to retrieve the data for the song following the one currently
32 playing.
33
34 Requires:
35 python-mpd2: (NOT python2-mpd2)
36 ```
37 # pip install python-mpd2
38 ```
39
40 Note: previously formats using %field% where allowed for this module, but
41 standard placeholders should be used.
42
43 Examples of `format`
44 ```
45 # Show state and (artist -) title, if no title fallback to file:
46 {state} [[[{artist} - ]{title}]|[{file}]]
47
48 # Show state, [duration], title (or file) and next song title (or file):
49 {state} \[{time}\] [{title}|{file}] → [{next_title}|{next_file}]
50 ```
51
52 @author shadowprince, zopieux
53 @license Eclipse Public License
54
55 SAMPLE OUTPUT
56 {'color': '#00ff00', 'full_text': '[play] Music For Programming - Idol Eyes'}
57
58 paused
59 {'color': '#ffff00', 'full_text': '[pause] Music For Programming - Idol Eyes'}
60
61 stopped
62 {'color': '#ff0000', 'full_text': '[stop] Music For Programming - Idol Eyes'}
63 """
64
65 import datetime
66 import re
67 import socket
68 from mpd import MPDClient, CommandError
69
70
71 def song_attr(song, attr):
72 def parse_mtime(date_str):
73 return datetime.datetime.strptime(date_str, '%Y-%m-%dT%H:%M:%SZ')
74
75 if attr == 'time':
76 try:
77 duration = int(song['time'])
78 if duration > 0:
79 minutes, seconds = divmod(duration, 60)
80 return '{:d}:{:02d}'.format(minutes, seconds)
81 raise ValueError
82 except (KeyError, ValueError):
83 return ''
84 elif attr == 'position':
85 try:
86 return '{}'.format(int(song['pos']) + 1)
87 except (KeyError, ValueError):
88 return ''
89 elif attr == 'mtime':
90 return parse_mtime(song['last-modified']).strftime('%c')
91 elif attr == 'mdate':
92 return parse_mtime(song['last-modified']).strftime('%x')
93
94 return song.get(attr, '')
95
96
97 class Py3status:
98 """
99 """
100 # available configuration parameters
101 cache_timeout = 2
102 format = '{state} [[[{artist}] - {title}]|[{file}]]'
103 hide_when_paused = False
104 hide_when_stopped = True
105 host = 'localhost'
106 max_width = 120
107 password = None
108 port = '6600'
109 state_pause = '[pause]'
110 state_play = '[play]'
111 state_stop = '[stop]'
112
113 def post_config_hook(self):
114 # Convert from %placeholder% to {placeholder}
115 # This is not perfect but should be good enough
116 if not self.py3.get_placeholders_list(self.format) and '%' in self.format:
117 self.format = re.sub('%([a-z]+)%', r'{\1}', self.format)
118 self.py3.log('Old % style format DEPRECATED use { style format')
119
120 def _state_character(self, state):
121 if state == 'play':
122 return self.state_play
123 elif state == 'pause':
124 return self.state_pause
125 elif state == 'stop':
126 return self.state_stop
127 return '?'
128
129 def current_track(self):
130 try:
131 c = MPDClient()
132 c.connect(host=self.host, port=self.port)
133 if self.password:
134 c.password(self.password)
135
136 status = c.status()
137 song = int(status.get('song', 0))
138 next_song = int(status.get('nextsong', 0))
139
140 state = status.get('state')
141
142 if ((state == 'pause' and self.hide_when_paused) or
143 (state == 'stop' and self.hide_when_stopped)):
144 text = ''
145
146 else:
147 playlist_info = c.playlistinfo()
148 try:
149 song = playlist_info[song]
150 except IndexError:
151 song = {}
152 try:
153 next_song = playlist_info[next_song]
154 except IndexError:
155 next_song = {}
156
157 song['state'] = next_song['state'] \
158 = self._state_character(state)
159
160 def attr_getter(attr):
161 if attr.startswith('next_'):
162 return song_attr(next_song, attr[5:])
163 return song_attr(song, attr)
164
165 text = self.py3.safe_format(self.format, attr_getter=attr_getter)
166
167 except socket.error:
168 text = "Failed to connect to mpd!"
169 state = None
170 except CommandError:
171 text = "Failed to authenticate to mpd!"
172 state = None
173 c.disconnect()
174 else:
175 c.disconnect()
176
177 if len(text) > self.max_width:
178 text = u'{}...'.format(text[:self.max_width - 3])
179
180 response = {
181 'cached_until': self.py3.time_in(self.cache_timeout),
182 'full_text': text,
183 }
184
185 if state:
186 if state == 'play':
187 response['color'] = self.py3.COLOR_PLAY or self.py3.COLOR_GOOD
188 elif state == 'pause':
189 response['color'] = (self.py3.COLOR_PAUSE or
190 self.py3.COLOR_DEGRADED)
191 elif state == 'stop':
192 response['color'] = self.py3.COLOR_STOP or self.py3.COLOR_BAD
193
194 return response
195
196
197 if __name__ == "__main__":
198 """
199 Run module in test mode.
200 """
201 from py3status.module_test import module_test
202 module_test(Py3status)
203
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/py3status/modules/mpd_status.py b/py3status/modules/mpd_status.py
--- a/py3status/modules/mpd_status.py
+++ b/py3status/modules/mpd_status.py
@@ -65,7 +65,7 @@
import datetime
import re
import socket
-from mpd import MPDClient, CommandError
+from mpd import MPDClient, CommandError, ConnectionError
def song_attr(song, attr):
@@ -116,6 +116,27 @@
if not self.py3.get_placeholders_list(self.format) and '%' in self.format:
self.format = re.sub('%([a-z]+)%', r'{\1}', self.format)
self.py3.log('Old % style format DEPRECATED use { style format')
+ # class variables:
+ self.client = None
+
+ def _get_mpd(self, disconnect=False):
+ if disconnect:
+ try:
+ self.client.disconnect()
+ finally:
+ self.client = None
+ return
+
+ try:
+ if self.client is None:
+ self.client = MPDClient()
+ self.client.connect(host=self.host, port=self.port)
+ if self.password:
+ self.client.password(self.password)
+ return self.client
+ except (socket.error, ConnectionError, CommandError) as e:
+ self.client = None
+ raise e
def _state_character(self, state):
if state == 'play':
@@ -128,12 +149,7 @@
def current_track(self):
try:
- c = MPDClient()
- c.connect(host=self.host, port=self.port)
- if self.password:
- c.password(self.password)
-
- status = c.status()
+ status = self._get_mpd().status()
song = int(status.get('song', 0))
next_song = int(status.get('nextsong', 0))
@@ -144,7 +160,7 @@
text = ''
else:
- playlist_info = c.playlistinfo()
+ playlist_info = self._get_mpd().playlistinfo()
try:
song = playlist_info[song]
except IndexError:
@@ -167,12 +183,14 @@
except socket.error:
text = "Failed to connect to mpd!"
state = None
+ except ConnectionError:
+ text = "Error while connecting to mpd!"
+ state = None
+ self._get_mpd(disconnect=True)
except CommandError:
text = "Failed to authenticate to mpd!"
state = None
- c.disconnect()
- else:
- c.disconnect()
+ self._get_mpd(disconnect=True)
if len(text) > self.max_width:
text = u'{}...'.format(text[:self.max_width - 3])
@@ -193,6 +211,9 @@
return response
+ def kill(self):
+ self._get_mpd(disconnect=True)
+
if __name__ == "__main__":
"""
| {"golden_diff": "diff --git a/py3status/modules/mpd_status.py b/py3status/modules/mpd_status.py\n--- a/py3status/modules/mpd_status.py\n+++ b/py3status/modules/mpd_status.py\n@@ -65,7 +65,7 @@\n import datetime\n import re\n import socket\n-from mpd import MPDClient, CommandError\n+from mpd import MPDClient, CommandError, ConnectionError\n \n \n def song_attr(song, attr):\n@@ -116,6 +116,27 @@\n if not self.py3.get_placeholders_list(self.format) and '%' in self.format:\n self.format = re.sub('%([a-z]+)%', r'{\\1}', self.format)\n self.py3.log('Old % style format DEPRECATED use { style format')\n+ # class variables:\n+ self.client = None\n+\n+ def _get_mpd(self, disconnect=False):\n+ if disconnect:\n+ try:\n+ self.client.disconnect()\n+ finally:\n+ self.client = None\n+ return\n+\n+ try:\n+ if self.client is None:\n+ self.client = MPDClient()\n+ self.client.connect(host=self.host, port=self.port)\n+ if self.password:\n+ self.client.password(self.password)\n+ return self.client\n+ except (socket.error, ConnectionError, CommandError) as e:\n+ self.client = None\n+ raise e\n \n def _state_character(self, state):\n if state == 'play':\n@@ -128,12 +149,7 @@\n \n def current_track(self):\n try:\n- c = MPDClient()\n- c.connect(host=self.host, port=self.port)\n- if self.password:\n- c.password(self.password)\n-\n- status = c.status()\n+ status = self._get_mpd().status()\n song = int(status.get('song', 0))\n next_song = int(status.get('nextsong', 0))\n \n@@ -144,7 +160,7 @@\n text = ''\n \n else:\n- playlist_info = c.playlistinfo()\n+ playlist_info = self._get_mpd().playlistinfo()\n try:\n song = playlist_info[song]\n except IndexError:\n@@ -167,12 +183,14 @@\n except socket.error:\n text = \"Failed to connect to mpd!\"\n state = None\n+ except ConnectionError:\n+ text = \"Error while connecting to mpd!\"\n+ state = None\n+ self._get_mpd(disconnect=True)\n except CommandError:\n text = \"Failed to authenticate to mpd!\"\n state = None\n- c.disconnect()\n- else:\n- c.disconnect()\n+ self._get_mpd(disconnect=True)\n \n if len(text) > self.max_width:\n text = u'{}...'.format(text[:self.max_width - 3])\n@@ -193,6 +211,9 @@\n \n return response\n \n+ def kill(self):\n+ self._get_mpd(disconnect=True)\n+\n \n if __name__ == \"__main__\":\n \"\"\"\n", "issue": "mpd_status creates a new socket connection every second\nI'm not sure if this should be a bug report or a feature request, but I've been bitten by this, erm, _feature_: I use `mopidy` (mpd with spotify integration) and that gets confused when a request comes in from `mpd_status` while it processes a mpd-command-block. \r\n\r\nThis also keeps a lot of abandoned connections in `TIME_WAIT` state - 60 currently at my system. \r\n\r\nI've had a quick look at the code for this module, and the connection i s esatablished in `current_track()`. I do not know if it is possible, but the best apporach (IMHO) would be to `connect()` once and keep the socket alive until `py3status` gets reloaded. \nmpd_status creates a new socket connection every second\nI'm not sure if this should be a bug report or a feature request, but I've been bitten by this, erm, _feature_: I use `mopidy` (mpd with spotify integration) and that gets confused when a request comes in from `mpd_status` while it processes a mpd-command-block. \r\n\r\nThis also keeps a lot of abandoned connections in `TIME_WAIT` state - 60 currently at my system. \r\n\r\nI've had a quick look at the code for this module, and the connection i s esatablished in `current_track()`. I do not know if it is possible, but the best apporach (IMHO) would be to `connect()` once and keep the socket alive until `py3status` gets reloaded. \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDisplay song currently playing in mpd.\n\nConfiguration parameters:\n cache_timeout: how often we refresh this module in seconds (default 2)\n format: template string (see below)\n (default '{state} [[[{artist}] - {title}]|[{file}]]')\n hide_when_paused: hide the status if state is paused (default False)\n hide_when_stopped: hide the status if state is stopped (default True)\n host: mpd host (default 'localhost')\n max_width: maximum status length (default 120)\n password: mpd password (default None)\n port: mpd port (default '6600')\n state_pause: label to display for \"paused\" state (default '[pause]')\n state_play: label to display for \"playing\" state (default '[play]')\n state_stop: label to display for \"stopped\" state (default '[stop]')\n\nColor options:\n color_pause: Paused, default color_degraded\n color_play: Playing, default color_good\n color_stop: Stopped, default color_bad\n\nFormat placeholders:\n {state} state (paused, playing. stopped) can be defined via `state_..`\n configuration parameters\n Refer to the mpc(1) manual page for the list of available placeholders to\n be used in the format. Placeholders should use braces `{}` rather than\n percent `%%` eg `{artist}`.\n Every placeholder can also be prefixed with\n `next_` to retrieve the data for the song following the one currently\n playing.\n\nRequires:\n python-mpd2: (NOT python2-mpd2)\n```\n# pip install python-mpd2\n```\n\nNote: previously formats using %field% where allowed for this module, but\nstandard placeholders should be used.\n\nExamples of `format`\n```\n# Show state and (artist -) title, if no title fallback to file:\n{state} [[[{artist} - ]{title}]|[{file}]]\n\n# Show state, [duration], title (or file) and next song title (or file):\n{state} \\[{time}\\] [{title}|{file}] \u2192 [{next_title}|{next_file}]\n```\n\n@author shadowprince, zopieux\n@license Eclipse Public License\n\nSAMPLE OUTPUT\n{'color': '#00ff00', 'full_text': '[play] Music For Programming - Idol Eyes'}\n\npaused\n{'color': '#ffff00', 'full_text': '[pause] Music For Programming - Idol Eyes'}\n\nstopped\n{'color': '#ff0000', 'full_text': '[stop] Music For Programming - Idol Eyes'}\n\"\"\"\n\nimport datetime\nimport re\nimport socket\nfrom mpd import MPDClient, CommandError\n\n\ndef song_attr(song, attr):\n def parse_mtime(date_str):\n return datetime.datetime.strptime(date_str, '%Y-%m-%dT%H:%M:%SZ')\n\n if attr == 'time':\n try:\n duration = int(song['time'])\n if duration > 0:\n minutes, seconds = divmod(duration, 60)\n return '{:d}:{:02d}'.format(minutes, seconds)\n raise ValueError\n except (KeyError, ValueError):\n return ''\n elif attr == 'position':\n try:\n return '{}'.format(int(song['pos']) + 1)\n except (KeyError, ValueError):\n return ''\n elif attr == 'mtime':\n return parse_mtime(song['last-modified']).strftime('%c')\n elif attr == 'mdate':\n return parse_mtime(song['last-modified']).strftime('%x')\n\n return song.get(attr, '')\n\n\nclass Py3status:\n \"\"\"\n \"\"\"\n # available configuration parameters\n cache_timeout = 2\n format = '{state} [[[{artist}] - {title}]|[{file}]]'\n hide_when_paused = False\n hide_when_stopped = True\n host = 'localhost'\n max_width = 120\n password = None\n port = '6600'\n state_pause = '[pause]'\n state_play = '[play]'\n state_stop = '[stop]'\n\n def post_config_hook(self):\n # Convert from %placeholder% to {placeholder}\n # This is not perfect but should be good enough\n if not self.py3.get_placeholders_list(self.format) and '%' in self.format:\n self.format = re.sub('%([a-z]+)%', r'{\\1}', self.format)\n self.py3.log('Old % style format DEPRECATED use { style format')\n\n def _state_character(self, state):\n if state == 'play':\n return self.state_play\n elif state == 'pause':\n return self.state_pause\n elif state == 'stop':\n return self.state_stop\n return '?'\n\n def current_track(self):\n try:\n c = MPDClient()\n c.connect(host=self.host, port=self.port)\n if self.password:\n c.password(self.password)\n\n status = c.status()\n song = int(status.get('song', 0))\n next_song = int(status.get('nextsong', 0))\n\n state = status.get('state')\n\n if ((state == 'pause' and self.hide_when_paused) or\n (state == 'stop' and self.hide_when_stopped)):\n text = ''\n\n else:\n playlist_info = c.playlistinfo()\n try:\n song = playlist_info[song]\n except IndexError:\n song = {}\n try:\n next_song = playlist_info[next_song]\n except IndexError:\n next_song = {}\n\n song['state'] = next_song['state'] \\\n = self._state_character(state)\n\n def attr_getter(attr):\n if attr.startswith('next_'):\n return song_attr(next_song, attr[5:])\n return song_attr(song, attr)\n\n text = self.py3.safe_format(self.format, attr_getter=attr_getter)\n\n except socket.error:\n text = \"Failed to connect to mpd!\"\n state = None\n except CommandError:\n text = \"Failed to authenticate to mpd!\"\n state = None\n c.disconnect()\n else:\n c.disconnect()\n\n if len(text) > self.max_width:\n text = u'{}...'.format(text[:self.max_width - 3])\n\n response = {\n 'cached_until': self.py3.time_in(self.cache_timeout),\n 'full_text': text,\n }\n\n if state:\n if state == 'play':\n response['color'] = self.py3.COLOR_PLAY or self.py3.COLOR_GOOD\n elif state == 'pause':\n response['color'] = (self.py3.COLOR_PAUSE or\n self.py3.COLOR_DEGRADED)\n elif state == 'stop':\n response['color'] = self.py3.COLOR_STOP or self.py3.COLOR_BAD\n\n return response\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Run module in test mode.\n \"\"\"\n from py3status.module_test import module_test\n module_test(Py3status)\n", "path": "py3status/modules/mpd_status.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDisplay song currently playing in mpd.\n\nConfiguration parameters:\n cache_timeout: how often we refresh this module in seconds (default 2)\n format: template string (see below)\n (default '{state} [[[{artist}] - {title}]|[{file}]]')\n hide_when_paused: hide the status if state is paused (default False)\n hide_when_stopped: hide the status if state is stopped (default True)\n host: mpd host (default 'localhost')\n max_width: maximum status length (default 120)\n password: mpd password (default None)\n port: mpd port (default '6600')\n state_pause: label to display for \"paused\" state (default '[pause]')\n state_play: label to display for \"playing\" state (default '[play]')\n state_stop: label to display for \"stopped\" state (default '[stop]')\n\nColor options:\n color_pause: Paused, default color_degraded\n color_play: Playing, default color_good\n color_stop: Stopped, default color_bad\n\nFormat placeholders:\n {state} state (paused, playing. stopped) can be defined via `state_..`\n configuration parameters\n Refer to the mpc(1) manual page for the list of available placeholders to\n be used in the format. Placeholders should use braces `{}` rather than\n percent `%%` eg `{artist}`.\n Every placeholder can also be prefixed with\n `next_` to retrieve the data for the song following the one currently\n playing.\n\nRequires:\n python-mpd2: (NOT python2-mpd2)\n```\n# pip install python-mpd2\n```\n\nNote: previously formats using %field% where allowed for this module, but\nstandard placeholders should be used.\n\nExamples of `format`\n```\n# Show state and (artist -) title, if no title fallback to file:\n{state} [[[{artist} - ]{title}]|[{file}]]\n\n# Show state, [duration], title (or file) and next song title (or file):\n{state} \\[{time}\\] [{title}|{file}] \u2192 [{next_title}|{next_file}]\n```\n\n@author shadowprince, zopieux\n@license Eclipse Public License\n\nSAMPLE OUTPUT\n{'color': '#00ff00', 'full_text': '[play] Music For Programming - Idol Eyes'}\n\npaused\n{'color': '#ffff00', 'full_text': '[pause] Music For Programming - Idol Eyes'}\n\nstopped\n{'color': '#ff0000', 'full_text': '[stop] Music For Programming - Idol Eyes'}\n\"\"\"\n\nimport datetime\nimport re\nimport socket\nfrom mpd import MPDClient, CommandError, ConnectionError\n\n\ndef song_attr(song, attr):\n def parse_mtime(date_str):\n return datetime.datetime.strptime(date_str, '%Y-%m-%dT%H:%M:%SZ')\n\n if attr == 'time':\n try:\n duration = int(song['time'])\n if duration > 0:\n minutes, seconds = divmod(duration, 60)\n return '{:d}:{:02d}'.format(minutes, seconds)\n raise ValueError\n except (KeyError, ValueError):\n return ''\n elif attr == 'position':\n try:\n return '{}'.format(int(song['pos']) + 1)\n except (KeyError, ValueError):\n return ''\n elif attr == 'mtime':\n return parse_mtime(song['last-modified']).strftime('%c')\n elif attr == 'mdate':\n return parse_mtime(song['last-modified']).strftime('%x')\n\n return song.get(attr, '')\n\n\nclass Py3status:\n \"\"\"\n \"\"\"\n # available configuration parameters\n cache_timeout = 2\n format = '{state} [[[{artist}] - {title}]|[{file}]]'\n hide_when_paused = False\n hide_when_stopped = True\n host = 'localhost'\n max_width = 120\n password = None\n port = '6600'\n state_pause = '[pause]'\n state_play = '[play]'\n state_stop = '[stop]'\n\n def post_config_hook(self):\n # Convert from %placeholder% to {placeholder}\n # This is not perfect but should be good enough\n if not self.py3.get_placeholders_list(self.format) and '%' in self.format:\n self.format = re.sub('%([a-z]+)%', r'{\\1}', self.format)\n self.py3.log('Old % style format DEPRECATED use { style format')\n # class variables:\n self.client = None\n\n def _get_mpd(self, disconnect=False):\n if disconnect:\n try:\n self.client.disconnect()\n finally:\n self.client = None\n return\n\n try:\n if self.client is None:\n self.client = MPDClient()\n self.client.connect(host=self.host, port=self.port)\n if self.password:\n self.client.password(self.password)\n return self.client\n except (socket.error, ConnectionError, CommandError) as e:\n self.client = None\n raise e\n\n def _state_character(self, state):\n if state == 'play':\n return self.state_play\n elif state == 'pause':\n return self.state_pause\n elif state == 'stop':\n return self.state_stop\n return '?'\n\n def current_track(self):\n try:\n status = self._get_mpd().status()\n song = int(status.get('song', 0))\n next_song = int(status.get('nextsong', 0))\n\n state = status.get('state')\n\n if ((state == 'pause' and self.hide_when_paused) or\n (state == 'stop' and self.hide_when_stopped)):\n text = ''\n\n else:\n playlist_info = self._get_mpd().playlistinfo()\n try:\n song = playlist_info[song]\n except IndexError:\n song = {}\n try:\n next_song = playlist_info[next_song]\n except IndexError:\n next_song = {}\n\n song['state'] = next_song['state'] \\\n = self._state_character(state)\n\n def attr_getter(attr):\n if attr.startswith('next_'):\n return song_attr(next_song, attr[5:])\n return song_attr(song, attr)\n\n text = self.py3.safe_format(self.format, attr_getter=attr_getter)\n\n except socket.error:\n text = \"Failed to connect to mpd!\"\n state = None\n except ConnectionError:\n text = \"Error while connecting to mpd!\"\n state = None\n self._get_mpd(disconnect=True)\n except CommandError:\n text = \"Failed to authenticate to mpd!\"\n state = None\n self._get_mpd(disconnect=True)\n\n if len(text) > self.max_width:\n text = u'{}...'.format(text[:self.max_width - 3])\n\n response = {\n 'cached_until': self.py3.time_in(self.cache_timeout),\n 'full_text': text,\n }\n\n if state:\n if state == 'play':\n response['color'] = self.py3.COLOR_PLAY or self.py3.COLOR_GOOD\n elif state == 'pause':\n response['color'] = (self.py3.COLOR_PAUSE or\n self.py3.COLOR_DEGRADED)\n elif state == 'stop':\n response['color'] = self.py3.COLOR_STOP or self.py3.COLOR_BAD\n\n return response\n\n def kill(self):\n self._get_mpd(disconnect=True)\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Run module in test mode.\n \"\"\"\n from py3status.module_test import module_test\n module_test(Py3status)\n", "path": "py3status/modules/mpd_status.py"}]} | 2,638 | 686 |
gh_patches_debug_17308 | rasdani/github-patches | git_diff | pre-commit__pre-commit-315 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
:args seems to break with {} in list.
I am working on a repo with some hooks for my company: https://github.com/marick/pre-commit-hooks
There is a hook that works fine with this `.pre-commit-config.yaml`:
``` yaml
- repo: /Users/marick/src/pre-commit-hooks
sha: d6dee96f56bf9290f7ebb852c4252c50b8f6215d
stages: [commit, push]
hooks:
- id: prohibit-suspicious-patterns
args: ["AKIA[[:alnum]]", --]
```
However, it I change the first arg by adding `{1}`:
``` yaml
args: ["AKIA[[:alnum]]{1}", --]
```
... I get this:
```
prohibit suspicious patterns..................................................................
An unexpected error has occurred: IndexError: tuple index out of range
Check the log at ~/.pre-commit/pre-commit.log
```
The contents of `pre-commit.log`:
```
An unexpected error has occurred: IndexError: tuple index out of range
Traceback (most recent call last):
File "/usr/local/lib/python2.7/site-packages/pre_commit/error_handler.py", line 36, in error_handler
yield
File "/usr/local/lib/python2.7/site-packages/pre_commit/main.py", line 150, in main
return run(runner, args)
File "/usr/local/lib/python2.7/site-packages/pre_commit/commands/run.py", line 212, in run
return _run_hooks(repo_hooks, args, write, environ)
File "/usr/local/lib/python2.7/site-packages/pre_commit/commands/run.py", line 136, in _run_hooks
retval |= _run_single_hook(hook, repo, args, write, skips)
File "/usr/local/lib/python2.7/site-packages/pre_commit/commands/run.py", line 89, in _run_single_hook
retcode, stdout, stderr = repo.run_hook(hook, filenames)
File "/usr/local/lib/python2.7/site-packages/pre_commit/repository.py", line 145, in run_hook
self.cmd_runner, hook, file_args,
File "/usr/local/lib/python2.7/site-packages/pre_commit/languages/script.py", line 23, in run_hook
encoding=None,
File "/usr/local/lib/python2.7/site-packages/pre_commit/prefixed_command_runner.py", line 40, in run
replaced_cmd = _replace_cmd(cmd, prefix=self.prefix_dir)
File "/usr/local/lib/python2.7/site-packages/pre_commit/prefixed_command_runner.py", line 11, in _replace_cmd
return [part.format(**kwargs) for part in cmd]
IndexError: tuple index out of range
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/prefixed_command_runner.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import os
4 import os.path
5 import subprocess
6
7 from pre_commit.util import cmd_output
8
9
10 def _replace_cmd(cmd, **kwargs):
11 return [part.format(**kwargs) for part in cmd]
12
13
14 class PrefixedCommandRunner(object):
15 """A PrefixedCommandRunner allows you to run subprocess commands with
16 comand substitution.
17
18 For instance:
19 PrefixedCommandRunner('/tmp/foo').run(['{prefix}foo.sh', 'bar', 'baz'])
20
21 will run ['/tmp/foo/foo.sh', 'bar', 'baz']
22 """
23
24 def __init__(
25 self,
26 prefix_dir,
27 popen=subprocess.Popen,
28 makedirs=os.makedirs
29 ):
30 self.prefix_dir = prefix_dir.rstrip(os.sep) + os.sep
31 self.__popen = popen
32 self.__makedirs = makedirs
33
34 def _create_path_if_not_exists(self):
35 if not os.path.exists(self.prefix_dir):
36 self.__makedirs(self.prefix_dir)
37
38 def run(self, cmd, **kwargs):
39 self._create_path_if_not_exists()
40 replaced_cmd = _replace_cmd(cmd, prefix=self.prefix_dir)
41 return cmd_output(*replaced_cmd, __popen=self.__popen, **kwargs)
42
43 def path(self, *parts):
44 path = os.path.join(self.prefix_dir, *parts)
45 return os.path.normpath(path)
46
47 def exists(self, *parts):
48 return os.path.exists(self.path(*parts))
49
50 @classmethod
51 def from_command_runner(cls, command_runner, path_end):
52 """Constructs a new command runner from an existing one by appending
53 `path_end` to the command runner's prefix directory.
54 """
55 return cls(
56 command_runner.path(path_end),
57 popen=command_runner.__popen,
58 makedirs=command_runner.__makedirs,
59 )
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pre_commit/prefixed_command_runner.py b/pre_commit/prefixed_command_runner.py
--- a/pre_commit/prefixed_command_runner.py
+++ b/pre_commit/prefixed_command_runner.py
@@ -7,10 +7,6 @@
from pre_commit.util import cmd_output
-def _replace_cmd(cmd, **kwargs):
- return [part.format(**kwargs) for part in cmd]
-
-
class PrefixedCommandRunner(object):
"""A PrefixedCommandRunner allows you to run subprocess commands with
comand substitution.
@@ -37,7 +33,9 @@
def run(self, cmd, **kwargs):
self._create_path_if_not_exists()
- replaced_cmd = _replace_cmd(cmd, prefix=self.prefix_dir)
+ replaced_cmd = [
+ part.replace('{prefix}', self.prefix_dir) for part in cmd
+ ]
return cmd_output(*replaced_cmd, __popen=self.__popen, **kwargs)
def path(self, *parts):
| {"golden_diff": "diff --git a/pre_commit/prefixed_command_runner.py b/pre_commit/prefixed_command_runner.py\n--- a/pre_commit/prefixed_command_runner.py\n+++ b/pre_commit/prefixed_command_runner.py\n@@ -7,10 +7,6 @@\n from pre_commit.util import cmd_output\n \n \n-def _replace_cmd(cmd, **kwargs):\n- return [part.format(**kwargs) for part in cmd]\n-\n-\n class PrefixedCommandRunner(object):\n \"\"\"A PrefixedCommandRunner allows you to run subprocess commands with\n comand substitution.\n@@ -37,7 +33,9 @@\n \n def run(self, cmd, **kwargs):\n self._create_path_if_not_exists()\n- replaced_cmd = _replace_cmd(cmd, prefix=self.prefix_dir)\n+ replaced_cmd = [\n+ part.replace('{prefix}', self.prefix_dir) for part in cmd\n+ ]\n return cmd_output(*replaced_cmd, __popen=self.__popen, **kwargs)\n \n def path(self, *parts):\n", "issue": ":args seems to break with {} in list.\nI am working on a repo with some hooks for my company: https://github.com/marick/pre-commit-hooks\n\nThere is a hook that works fine with this `.pre-commit-config.yaml`:\n\n``` yaml\n- repo: /Users/marick/src/pre-commit-hooks\n sha: d6dee96f56bf9290f7ebb852c4252c50b8f6215d\n stages: [commit, push]\n hooks:\n - id: prohibit-suspicious-patterns\n args: [\"AKIA[[:alnum]]\", --]\n```\n\nHowever, it I change the first arg by adding `{1}`:\n\n``` yaml\n args: [\"AKIA[[:alnum]]{1}\", --]\n```\n\n... I get this:\n\n```\nprohibit suspicious patterns..................................................................\nAn unexpected error has occurred: IndexError: tuple index out of range\nCheck the log at ~/.pre-commit/pre-commit.log\n```\n\nThe contents of `pre-commit.log`:\n\n```\nAn unexpected error has occurred: IndexError: tuple index out of range\nTraceback (most recent call last):\n File \"/usr/local/lib/python2.7/site-packages/pre_commit/error_handler.py\", line 36, in error_handler\n yield\n File \"/usr/local/lib/python2.7/site-packages/pre_commit/main.py\", line 150, in main\n return run(runner, args)\n File \"/usr/local/lib/python2.7/site-packages/pre_commit/commands/run.py\", line 212, in run\n return _run_hooks(repo_hooks, args, write, environ)\n File \"/usr/local/lib/python2.7/site-packages/pre_commit/commands/run.py\", line 136, in _run_hooks\n retval |= _run_single_hook(hook, repo, args, write, skips)\n File \"/usr/local/lib/python2.7/site-packages/pre_commit/commands/run.py\", line 89, in _run_single_hook\n retcode, stdout, stderr = repo.run_hook(hook, filenames)\n File \"/usr/local/lib/python2.7/site-packages/pre_commit/repository.py\", line 145, in run_hook\n self.cmd_runner, hook, file_args,\n File \"/usr/local/lib/python2.7/site-packages/pre_commit/languages/script.py\", line 23, in run_hook\n encoding=None,\n File \"/usr/local/lib/python2.7/site-packages/pre_commit/prefixed_command_runner.py\", line 40, in run\n replaced_cmd = _replace_cmd(cmd, prefix=self.prefix_dir)\n File \"/usr/local/lib/python2.7/site-packages/pre_commit/prefixed_command_runner.py\", line 11, in _replace_cmd\n return [part.format(**kwargs) for part in cmd]\nIndexError: tuple index out of range\n\n```\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport os\nimport os.path\nimport subprocess\n\nfrom pre_commit.util import cmd_output\n\n\ndef _replace_cmd(cmd, **kwargs):\n return [part.format(**kwargs) for part in cmd]\n\n\nclass PrefixedCommandRunner(object):\n \"\"\"A PrefixedCommandRunner allows you to run subprocess commands with\n comand substitution.\n\n For instance:\n PrefixedCommandRunner('/tmp/foo').run(['{prefix}foo.sh', 'bar', 'baz'])\n\n will run ['/tmp/foo/foo.sh', 'bar', 'baz']\n \"\"\"\n\n def __init__(\n self,\n prefix_dir,\n popen=subprocess.Popen,\n makedirs=os.makedirs\n ):\n self.prefix_dir = prefix_dir.rstrip(os.sep) + os.sep\n self.__popen = popen\n self.__makedirs = makedirs\n\n def _create_path_if_not_exists(self):\n if not os.path.exists(self.prefix_dir):\n self.__makedirs(self.prefix_dir)\n\n def run(self, cmd, **kwargs):\n self._create_path_if_not_exists()\n replaced_cmd = _replace_cmd(cmd, prefix=self.prefix_dir)\n return cmd_output(*replaced_cmd, __popen=self.__popen, **kwargs)\n\n def path(self, *parts):\n path = os.path.join(self.prefix_dir, *parts)\n return os.path.normpath(path)\n\n def exists(self, *parts):\n return os.path.exists(self.path(*parts))\n\n @classmethod\n def from_command_runner(cls, command_runner, path_end):\n \"\"\"Constructs a new command runner from an existing one by appending\n `path_end` to the command runner's prefix directory.\n \"\"\"\n return cls(\n command_runner.path(path_end),\n popen=command_runner.__popen,\n makedirs=command_runner.__makedirs,\n )\n", "path": "pre_commit/prefixed_command_runner.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport os\nimport os.path\nimport subprocess\n\nfrom pre_commit.util import cmd_output\n\n\nclass PrefixedCommandRunner(object):\n \"\"\"A PrefixedCommandRunner allows you to run subprocess commands with\n comand substitution.\n\n For instance:\n PrefixedCommandRunner('/tmp/foo').run(['{prefix}foo.sh', 'bar', 'baz'])\n\n will run ['/tmp/foo/foo.sh', 'bar', 'baz']\n \"\"\"\n\n def __init__(\n self,\n prefix_dir,\n popen=subprocess.Popen,\n makedirs=os.makedirs\n ):\n self.prefix_dir = prefix_dir.rstrip(os.sep) + os.sep\n self.__popen = popen\n self.__makedirs = makedirs\n\n def _create_path_if_not_exists(self):\n if not os.path.exists(self.prefix_dir):\n self.__makedirs(self.prefix_dir)\n\n def run(self, cmd, **kwargs):\n self._create_path_if_not_exists()\n replaced_cmd = [\n part.replace('{prefix}', self.prefix_dir) for part in cmd\n ]\n return cmd_output(*replaced_cmd, __popen=self.__popen, **kwargs)\n\n def path(self, *parts):\n path = os.path.join(self.prefix_dir, *parts)\n return os.path.normpath(path)\n\n def exists(self, *parts):\n return os.path.exists(self.path(*parts))\n\n @classmethod\n def from_command_runner(cls, command_runner, path_end):\n \"\"\"Constructs a new command runner from an existing one by appending\n `path_end` to the command runner's prefix directory.\n \"\"\"\n return cls(\n command_runner.path(path_end),\n popen=command_runner.__popen,\n makedirs=command_runner.__makedirs,\n )\n", "path": "pre_commit/prefixed_command_runner.py"}]} | 1,392 | 217 |
gh_patches_debug_38600 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-1295 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Namibia parser should read out data timestamp
Current time in Namibia is 2 April 14:41 but the [Nampower dashboard](http://www.nampower.com.na/Scada.aspx) is stuck on 1 April 11:45:40.
Timestamp of the data is available in http://www.nampower.com.na/gis/images/File_Info.png
We could OCR that file. A quick attempt using the current tesseract methods and upscaling from 600x20 to 1200x40 seems to have worked alright, I got `SCADA File Dene: 01/04/2018 11:45:40` (it should have been "File Date" but otherwise correct). There is always the danger of OCR misreading, so maybe only ignore the datapoint if the timestamp can be read and is too far in the past.
Screenshot from our page:

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsers/NA.py`
Content:
```
1 #!/usr/bin/env python3
2
3 """Parser for Namibia."""
4
5 import arrow
6 from bs4 import BeautifulSoup
7 from .lib.validation import validate
8 from logging import getLogger
9 from PIL import Image
10 from pytesseract import image_to_string
11 import re
12 import requests
13
14
15 generation_link = 'http://www.nampower.com.na/gis/images/Gx.png'
16 exchanges_link = 'http://www.nampower.com.na/gis/images/Imports_Exports.png'
17
18 plant_mapping = {"Ruacana": "hydro",
19 "Van Eck": "coal",
20 "Paratus": "oil",
21 "Anixas": "oil",
22 "Solar": "solar",
23 "Wind": "wind"
24 }
25
26 exchange_mapping = {"NA->ZA": "ESKOM",
27 "NA->ZM": "ZESCO"
28 }
29
30
31 def get_text_from_image(link, expected_size, new_size, logger, session=None):
32 """
33 Gets image from link and checks expected size vs actual.
34 Converts to black & white and enlarges to improve OCR accuracy.
35 Performs OCR using tesseract and returns a str.
36 """
37
38 s = session or requests.Session()
39 img = Image.open(s.get(link, stream=True).raw)
40
41 if img.size != expected_size:
42 if (logger):
43 logger.warning("Check Namibia Scada dashboard for {} changes.".format(link),
44 extras={'key': 'NA'})
45 else:
46 print("Check Namibia Scada dashboard for {} changes.".format(link))
47
48 gray = img.convert('L')
49 gray_enlarged = gray.resize(new_size, Image.LANCZOS)
50 text = image_to_string(gray_enlarged, lang='eng')
51
52 return text
53
54
55 def data_processor(text):
56 """
57 Takes text produced from OCR and extracts production.
58 Returns a dictionary.
59 """
60
61 production = {}
62 for k in plant_mapping.keys():
63 pattern = re.escape(k) + r": (\d+\.\d\d)"
64 try:
65 val = re.search(pattern, text).group(1)
66 production[plant_mapping[k]] = production.get(plant_mapping[k], 0.0)+ float(val)
67 except (AttributeError, ValueError) as e:
68 production[plant_mapping[k]] = None
69
70 return production
71
72
73 def fetch_production(zone_key = 'NA', session=None, target_datetime=None, logger=getLogger(__name__)):
74 """
75 Requests the last known production mix (in MW) of a given country
76 Arguments:
77 zone_key (optional) -- used in case a parser is able to fetch multiple countries
78 session (optional) -- request session passed in order to re-use an existing session
79 Return:
80 A dictionary in the form:
81 {
82 'zoneKey': 'FR',
83 'datetime': '2017-01-01T00:00:00Z',
84 'production': {
85 'biomass': 0.0,
86 'coal': 0.0,
87 'gas': 0.0,
88 'hydro': 0.0,
89 'nuclear': null,
90 'oil': 0.0,
91 'solar': 0.0,
92 'wind': 0.0,
93 'geothermal': 0.0,
94 'unknown': 0.0
95 },
96 'storage': {
97 'hydro': -10.0,
98 },
99 'source': 'mysource.com'
100 }
101 """
102 if target_datetime:
103 raise NotImplementedError('This parser is not yet able to parse past dates')
104
105 raw_text = get_text_from_image(session=session, link=generation_link, \
106 expected_size=(400, 245), new_size=(1000,612), \
107 logger=logger)
108
109 production = data_processor(raw_text)
110
111 data = {
112 'zoneKey': zone_key,
113 'datetime': arrow.now('Africa/Windhoek').datetime,
114 'production': production,
115 'storage': {},
116 'source': 'nampower.com.na'
117 }
118
119 data = validate(data, required=['hydro'])
120
121 return data
122
123
124 def exchange_processor(text, exchange, logger):
125 """
126 Takes text produced from OCR and extracts exchange flow.
127 Returns a float or None.
128 """
129
130 utility = exchange_mapping[exchange]
131
132 try:
133 pattern = re.escape(utility) + r"([\D]*?)([-+]?\d+\.\d\d)"
134 val = re.search(pattern, text).group(2)
135 flow = float(val)
136 except (AttributeError, ValueError) as e:
137 logger.warning("""{} regex match failed on the following text.
138 {}""".format(exchange, text))
139 raise Exception("Exchange {} cannot be read.".format(exchange)) from e
140
141 return flow
142
143
144 def fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=getLogger(__name__)):
145 """Requests the last known power exchange (in MW) between two zones
146 Arguments:
147 zone_key1 -- the first country code
148 zone_key2 -- the second country code; order of the two codes in params doesn't matter
149 session (optional) -- request session passed in order to re-use an existing session
150 Return:
151 A dictionary in the form:
152 {
153 'sortedZoneKeys': 'DK->NO',
154 'datetime': '2017-01-01T00:00:00Z',
155 'netFlow': 0.0,
156 'source': 'mysource.com'
157 }
158 where net flow is from DK into NO
159 """
160 if target_datetime:
161 raise NotImplementedError('This parser is not yet able to parse past dates')
162
163 sorted_codes = "->".join(sorted([zone_key1, zone_key2]))
164
165 raw_text = get_text_from_image(session=session, link=exchanges_link,
166 expected_size=(400, 195), new_size=(1120, 546), \
167 logger=logger)
168
169 if sorted_codes == 'NA->ZA':
170 flow = exchange_processor(raw_text, 'NA->ZA', logger=logger)
171 elif sorted_codes == 'NA->ZM':
172 flow = exchange_processor(raw_text, 'NA->ZM', logger=logger)
173 else:
174 raise NotImplementedError('This exchange pair is not implemented')
175
176 #Import considered positive in data source.
177 if flow is not None:
178 flow = -1 * flow
179
180 exchange = {'sortedZoneKeys': sorted_codes,
181 'datetime': arrow.now('Africa/Windhoek').datetime,
182 'netFlow': flow,
183 'source': 'nampower.com.na'
184 }
185
186 return exchange
187
188
189 if __name__ == '__main__':
190 print('fetch_production() ->')
191 print(fetch_production())
192 print('fetch_exchange(NA, ZA)')
193 print(fetch_exchange('NA', 'ZA'))
194 print('fetch_exchange(NA, ZM)')
195 print(fetch_exchange('NA', 'ZM'))
196
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parsers/NA.py b/parsers/NA.py
--- a/parsers/NA.py
+++ b/parsers/NA.py
@@ -12,6 +12,7 @@
import requests
+timestamp_link = 'http://www.nampower.com.na/gis/images/File_Info.png'
generation_link = 'http://www.nampower.com.na/gis/images/Gx.png'
exchanges_link = 'http://www.nampower.com.na/gis/images/Imports_Exports.png'
@@ -39,11 +40,8 @@
img = Image.open(s.get(link, stream=True).raw)
if img.size != expected_size:
- if (logger):
- logger.warning("Check Namibia Scada dashboard for {} changes.".format(link),
- extras={'key': 'NA'})
- else:
- print("Check Namibia Scada dashboard for {} changes.".format(link))
+ logger.warning("Check Namibia Scada dashboard for {} changes.".format(link),
+ extras={'key': 'NA'})
gray = img.convert('L')
gray_enlarged = gray.resize(new_size, Image.LANCZOS)
@@ -52,6 +50,35 @@
return text
+def check_timestamp(session=None, logger=None):
+ """
+ Sometimes the Scada Dashboard image stops updating for a while.
+ This function tries to ensure that only data younger than 1 hour
+ is accepted.
+ """
+
+ scada_info = get_text_from_image(session=session, link=timestamp_link,
+ expected_size=(600,20), new_size=(1200,40),
+ logger=logger)
+
+ timestamp = scada_info.split(':', 1)[1]
+
+ try:
+ scada_time = arrow.get(timestamp, ' DD/MM/YYYY HH:mm:ss')
+ except arrow.parser.ParserError as e:
+ logger.warning('Namibia scada timestamp cannot be read, got {}.'.format(timestamp))
+ # The OCR of the Scada dashboard is not very reliable, on failure safer to assume data is good.
+ return
+
+ data_time = scada_time.replace(tzinfo='Africa/Windhoek')
+ current_time = arrow.now('Africa/Windhoek')
+ diff = current_time - data_time
+
+ # Need to be sure we don't get old data if image stops updating.
+ if diff.seconds > 3600:
+ raise ValueError('Namibia scada data is too old to use, data is {} hours old.'.format(diff.seconds/3600))
+
+
def data_processor(text):
"""
Takes text produced from OCR and extracts production.
@@ -107,6 +134,7 @@
logger=logger)
production = data_processor(raw_text)
+ check_timestamp(session=session, logger=logger)
data = {
'zoneKey': zone_key,
@@ -177,6 +205,8 @@
if flow is not None:
flow = -1 * flow
+ check_timestamp(session=session, logger=logger)
+
exchange = {'sortedZoneKeys': sorted_codes,
'datetime': arrow.now('Africa/Windhoek').datetime,
'netFlow': flow,
| {"golden_diff": "diff --git a/parsers/NA.py b/parsers/NA.py\n--- a/parsers/NA.py\n+++ b/parsers/NA.py\n@@ -12,6 +12,7 @@\n import requests\n \n \n+timestamp_link = 'http://www.nampower.com.na/gis/images/File_Info.png'\n generation_link = 'http://www.nampower.com.na/gis/images/Gx.png'\n exchanges_link = 'http://www.nampower.com.na/gis/images/Imports_Exports.png'\n \n@@ -39,11 +40,8 @@\n img = Image.open(s.get(link, stream=True).raw)\n \n if img.size != expected_size:\n- if (logger):\n- logger.warning(\"Check Namibia Scada dashboard for {} changes.\".format(link),\n- extras={'key': 'NA'})\n- else:\n- print(\"Check Namibia Scada dashboard for {} changes.\".format(link))\n+ logger.warning(\"Check Namibia Scada dashboard for {} changes.\".format(link),\n+ extras={'key': 'NA'})\n \n gray = img.convert('L')\n gray_enlarged = gray.resize(new_size, Image.LANCZOS)\n@@ -52,6 +50,35 @@\n return text\n \n \n+def check_timestamp(session=None, logger=None):\n+ \"\"\"\n+ Sometimes the Scada Dashboard image stops updating for a while.\n+ This function tries to ensure that only data younger than 1 hour\n+ is accepted.\n+ \"\"\"\n+\n+ scada_info = get_text_from_image(session=session, link=timestamp_link,\n+ expected_size=(600,20), new_size=(1200,40),\n+ logger=logger)\n+\n+ timestamp = scada_info.split(':', 1)[1]\n+\n+ try:\n+ scada_time = arrow.get(timestamp, ' DD/MM/YYYY HH:mm:ss')\n+ except arrow.parser.ParserError as e:\n+ logger.warning('Namibia scada timestamp cannot be read, got {}.'.format(timestamp))\n+ # The OCR of the Scada dashboard is not very reliable, on failure safer to assume data is good.\n+ return\n+\n+ data_time = scada_time.replace(tzinfo='Africa/Windhoek')\n+ current_time = arrow.now('Africa/Windhoek')\n+ diff = current_time - data_time\n+\n+ # Need to be sure we don't get old data if image stops updating.\n+ if diff.seconds > 3600:\n+ raise ValueError('Namibia scada data is too old to use, data is {} hours old.'.format(diff.seconds/3600))\n+\n+\n def data_processor(text):\n \"\"\"\n Takes text produced from OCR and extracts production.\n@@ -107,6 +134,7 @@\n logger=logger)\n \n production = data_processor(raw_text)\n+ check_timestamp(session=session, logger=logger)\n \n data = {\n 'zoneKey': zone_key,\n@@ -177,6 +205,8 @@\n if flow is not None:\n flow = -1 * flow\n \n+ check_timestamp(session=session, logger=logger)\n+\n exchange = {'sortedZoneKeys': sorted_codes,\n 'datetime': arrow.now('Africa/Windhoek').datetime,\n 'netFlow': flow,\n", "issue": "Namibia parser should read out data timestamp\nCurrent time in Namibia is 2 April 14:41 but the [Nampower dashboard](http://www.nampower.com.na/Scada.aspx) is stuck on 1 April 11:45:40.\r\n\r\nTimestamp of the data is available in http://www.nampower.com.na/gis/images/File_Info.png\r\n\r\nWe could OCR that file. A quick attempt using the current tesseract methods and upscaling from 600x20 to 1200x40 seems to have worked alright, I got `SCADA File Dene: 01/04/2018 11:45:40` (it should have been \"File Date\" but otherwise correct). There is always the danger of OCR misreading, so maybe only ignore the datapoint if the timestamp can be read and is too far in the past.\r\n\r\nScreenshot from our page:\r\n\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n\n\"\"\"Parser for Namibia.\"\"\"\n\nimport arrow\nfrom bs4 import BeautifulSoup\nfrom .lib.validation import validate\nfrom logging import getLogger\nfrom PIL import Image\nfrom pytesseract import image_to_string\nimport re\nimport requests\n\n\ngeneration_link = 'http://www.nampower.com.na/gis/images/Gx.png'\nexchanges_link = 'http://www.nampower.com.na/gis/images/Imports_Exports.png'\n\nplant_mapping = {\"Ruacana\": \"hydro\",\n \"Van Eck\": \"coal\",\n \"Paratus\": \"oil\",\n \"Anixas\": \"oil\",\n \"Solar\": \"solar\",\n \"Wind\": \"wind\"\n }\n\nexchange_mapping = {\"NA->ZA\": \"ESKOM\",\n \"NA->ZM\": \"ZESCO\"\n }\n\n\ndef get_text_from_image(link, expected_size, new_size, logger, session=None):\n \"\"\"\n Gets image from link and checks expected size vs actual.\n Converts to black & white and enlarges to improve OCR accuracy.\n Performs OCR using tesseract and returns a str.\n \"\"\"\n\n s = session or requests.Session()\n img = Image.open(s.get(link, stream=True).raw)\n\n if img.size != expected_size:\n if (logger):\n logger.warning(\"Check Namibia Scada dashboard for {} changes.\".format(link),\n extras={'key': 'NA'})\n else:\n print(\"Check Namibia Scada dashboard for {} changes.\".format(link))\n\n gray = img.convert('L')\n gray_enlarged = gray.resize(new_size, Image.LANCZOS)\n text = image_to_string(gray_enlarged, lang='eng')\n\n return text\n\n\ndef data_processor(text):\n \"\"\"\n Takes text produced from OCR and extracts production.\n Returns a dictionary.\n \"\"\"\n\n production = {}\n for k in plant_mapping.keys():\n pattern = re.escape(k) + r\": (\\d+\\.\\d\\d)\"\n try:\n val = re.search(pattern, text).group(1)\n production[plant_mapping[k]] = production.get(plant_mapping[k], 0.0)+ float(val)\n except (AttributeError, ValueError) as e:\n production[plant_mapping[k]] = None\n\n return production\n\n\ndef fetch_production(zone_key = 'NA', session=None, target_datetime=None, logger=getLogger(__name__)):\n \"\"\"\n Requests the last known production mix (in MW) of a given country\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple countries\n session (optional) -- request session passed in order to re-use an existing session\n Return:\n A dictionary in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n raw_text = get_text_from_image(session=session, link=generation_link, \\\n expected_size=(400, 245), new_size=(1000,612), \\\n logger=logger)\n\n production = data_processor(raw_text)\n\n data = {\n 'zoneKey': zone_key,\n 'datetime': arrow.now('Africa/Windhoek').datetime,\n 'production': production,\n 'storage': {},\n 'source': 'nampower.com.na'\n }\n\n data = validate(data, required=['hydro'])\n\n return data\n\n\ndef exchange_processor(text, exchange, logger):\n \"\"\"\n Takes text produced from OCR and extracts exchange flow.\n Returns a float or None.\n \"\"\"\n\n utility = exchange_mapping[exchange]\n\n try:\n pattern = re.escape(utility) + r\"([\\D]*?)([-+]?\\d+\\.\\d\\d)\"\n val = re.search(pattern, text).group(2)\n flow = float(val)\n except (AttributeError, ValueError) as e:\n logger.warning(\"\"\"{} regex match failed on the following text.\n {}\"\"\".format(exchange, text))\n raise Exception(\"Exchange {} cannot be read.\".format(exchange)) from e\n\n return flow\n\n\ndef fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=getLogger(__name__)):\n \"\"\"Requests the last known power exchange (in MW) between two zones\n Arguments:\n zone_key1 -- the first country code\n zone_key2 -- the second country code; order of the two codes in params doesn't matter\n session (optional) -- request session passed in order to re-use an existing session\n Return:\n A dictionary in the form:\n {\n 'sortedZoneKeys': 'DK->NO',\n 'datetime': '2017-01-01T00:00:00Z',\n 'netFlow': 0.0,\n 'source': 'mysource.com'\n }\n where net flow is from DK into NO\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n sorted_codes = \"->\".join(sorted([zone_key1, zone_key2]))\n\n raw_text = get_text_from_image(session=session, link=exchanges_link,\n expected_size=(400, 195), new_size=(1120, 546), \\\n logger=logger)\n\n if sorted_codes == 'NA->ZA':\n flow = exchange_processor(raw_text, 'NA->ZA', logger=logger)\n elif sorted_codes == 'NA->ZM':\n flow = exchange_processor(raw_text, 'NA->ZM', logger=logger)\n else:\n raise NotImplementedError('This exchange pair is not implemented')\n\n #Import considered positive in data source.\n if flow is not None:\n flow = -1 * flow\n\n exchange = {'sortedZoneKeys': sorted_codes,\n 'datetime': arrow.now('Africa/Windhoek').datetime,\n 'netFlow': flow,\n 'source': 'nampower.com.na'\n }\n\n return exchange\n\n\nif __name__ == '__main__':\n print('fetch_production() ->')\n print(fetch_production())\n print('fetch_exchange(NA, ZA)')\n print(fetch_exchange('NA', 'ZA'))\n print('fetch_exchange(NA, ZM)')\n print(fetch_exchange('NA', 'ZM'))\n", "path": "parsers/NA.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n\"\"\"Parser for Namibia.\"\"\"\n\nimport arrow\nfrom bs4 import BeautifulSoup\nfrom .lib.validation import validate\nfrom logging import getLogger\nfrom PIL import Image\nfrom pytesseract import image_to_string\nimport re\nimport requests\n\n\ntimestamp_link = 'http://www.nampower.com.na/gis/images/File_Info.png'\ngeneration_link = 'http://www.nampower.com.na/gis/images/Gx.png'\nexchanges_link = 'http://www.nampower.com.na/gis/images/Imports_Exports.png'\n\nplant_mapping = {\"Ruacana\": \"hydro\",\n \"Van Eck\": \"coal\",\n \"Paratus\": \"oil\",\n \"Anixas\": \"oil\",\n \"Solar\": \"solar\",\n \"Wind\": \"wind\"\n }\n\nexchange_mapping = {\"NA->ZA\": \"ESKOM\",\n \"NA->ZM\": \"ZESCO\"\n }\n\n\ndef get_text_from_image(link, expected_size, new_size, logger, session=None):\n \"\"\"\n Gets image from link and checks expected size vs actual.\n Converts to black & white and enlarges to improve OCR accuracy.\n Performs OCR using tesseract and returns a str.\n \"\"\"\n\n s = session or requests.Session()\n img = Image.open(s.get(link, stream=True).raw)\n\n if img.size != expected_size:\n logger.warning(\"Check Namibia Scada dashboard for {} changes.\".format(link),\n extras={'key': 'NA'})\n\n gray = img.convert('L')\n gray_enlarged = gray.resize(new_size, Image.LANCZOS)\n text = image_to_string(gray_enlarged, lang='eng')\n\n return text\n\n\ndef check_timestamp(session=None, logger=None):\n \"\"\"\n Sometimes the Scada Dashboard image stops updating for a while.\n This function tries to ensure that only data younger than 1 hour\n is accepted.\n \"\"\"\n\n scada_info = get_text_from_image(session=session, link=timestamp_link,\n expected_size=(600,20), new_size=(1200,40),\n logger=logger)\n\n timestamp = scada_info.split(':', 1)[1]\n\n try:\n scada_time = arrow.get(timestamp, ' DD/MM/YYYY HH:mm:ss')\n except arrow.parser.ParserError as e:\n logger.warning('Namibia scada timestamp cannot be read, got {}.'.format(timestamp))\n # The OCR of the Scada dashboard is not very reliable, on failure safer to assume data is good.\n return\n\n data_time = scada_time.replace(tzinfo='Africa/Windhoek')\n current_time = arrow.now('Africa/Windhoek')\n diff = current_time - data_time\n\n # Need to be sure we don't get old data if image stops updating.\n if diff.seconds > 3600:\n raise ValueError('Namibia scada data is too old to use, data is {} hours old.'.format(diff.seconds/3600))\n\n\ndef data_processor(text):\n \"\"\"\n Takes text produced from OCR and extracts production.\n Returns a dictionary.\n \"\"\"\n\n production = {}\n for k in plant_mapping.keys():\n pattern = re.escape(k) + r\": (\\d+\\.\\d\\d)\"\n try:\n val = re.search(pattern, text).group(1)\n production[plant_mapping[k]] = production.get(plant_mapping[k], 0.0)+ float(val)\n except (AttributeError, ValueError) as e:\n production[plant_mapping[k]] = None\n\n return production\n\n\ndef fetch_production(zone_key = 'NA', session=None, target_datetime=None, logger=getLogger(__name__)):\n \"\"\"\n Requests the last known production mix (in MW) of a given country\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple countries\n session (optional) -- request session passed in order to re-use an existing session\n Return:\n A dictionary in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n raw_text = get_text_from_image(session=session, link=generation_link, \\\n expected_size=(400, 245), new_size=(1000,612), \\\n logger=logger)\n\n production = data_processor(raw_text)\n check_timestamp(session=session, logger=logger)\n\n data = {\n 'zoneKey': zone_key,\n 'datetime': arrow.now('Africa/Windhoek').datetime,\n 'production': production,\n 'storage': {},\n 'source': 'nampower.com.na'\n }\n\n data = validate(data, required=['hydro'])\n\n return data\n\n\ndef exchange_processor(text, exchange, logger):\n \"\"\"\n Takes text produced from OCR and extracts exchange flow.\n Returns a float or None.\n \"\"\"\n\n utility = exchange_mapping[exchange]\n\n try:\n pattern = re.escape(utility) + r\"([\\D]*?)([-+]?\\d+\\.\\d\\d)\"\n val = re.search(pattern, text).group(2)\n flow = float(val)\n except (AttributeError, ValueError) as e:\n logger.warning(\"\"\"{} regex match failed on the following text.\n {}\"\"\".format(exchange, text))\n raise Exception(\"Exchange {} cannot be read.\".format(exchange)) from e\n\n return flow\n\n\ndef fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=getLogger(__name__)):\n \"\"\"Requests the last known power exchange (in MW) between two zones\n Arguments:\n zone_key1 -- the first country code\n zone_key2 -- the second country code; order of the two codes in params doesn't matter\n session (optional) -- request session passed in order to re-use an existing session\n Return:\n A dictionary in the form:\n {\n 'sortedZoneKeys': 'DK->NO',\n 'datetime': '2017-01-01T00:00:00Z',\n 'netFlow': 0.0,\n 'source': 'mysource.com'\n }\n where net flow is from DK into NO\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n sorted_codes = \"->\".join(sorted([zone_key1, zone_key2]))\n\n raw_text = get_text_from_image(session=session, link=exchanges_link,\n expected_size=(400, 195), new_size=(1120, 546), \\\n logger=logger)\n\n if sorted_codes == 'NA->ZA':\n flow = exchange_processor(raw_text, 'NA->ZA', logger=logger)\n elif sorted_codes == 'NA->ZM':\n flow = exchange_processor(raw_text, 'NA->ZM', logger=logger)\n else:\n raise NotImplementedError('This exchange pair is not implemented')\n\n #Import considered positive in data source.\n if flow is not None:\n flow = -1 * flow\n\n check_timestamp(session=session, logger=logger)\n\n exchange = {'sortedZoneKeys': sorted_codes,\n 'datetime': arrow.now('Africa/Windhoek').datetime,\n 'netFlow': flow,\n 'source': 'nampower.com.na'\n }\n\n return exchange\n\n\nif __name__ == '__main__':\n print('fetch_production() ->')\n print(fetch_production())\n print('fetch_exchange(NA, ZA)')\n print(fetch_exchange('NA', 'ZA'))\n print('fetch_exchange(NA, ZM)')\n print(fetch_exchange('NA', 'ZM'))\n", "path": "parsers/NA.py"}]} | 2,530 | 719 |
gh_patches_debug_31061 | rasdani/github-patches | git_diff | conan-io__conan-center-index-2686 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[package] libunwind/1.3.1 library dependency order is wrong
### Package and Environment Details (include every applicable attribute)
* Package Name/Version: **libunwind/1.3.1**
* Operating System+version: **Linux Ubuntu 18.04**
* Compiler+version: **GCC 7**
* Conan version: **conan 1.21.1**
* Python version: **Python 2.7.17, 3.6.9**
### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use)
```
[settings]
os=Linux
os_build=Linux
arch=x86_64
arch_build=x86_64
compiler=gcc
compiler.version=7
compiler.libcxx=libstdc++1+
[options]
[build_requires]
[env]
```
But **error occurs when static linking**, that is shared=False.
### Steps to reproduce (Include if Applicable)
Soon I will write an example to reproduce.
But there is [an example about libunwind library dependency](https://github.com/daniel-thompson/libunwind-examples/blob/master/Makefile)
* current order : `unwind` `unwind-generic` `unwind-ptrace`
* right order : `unwind-ptrace` `unwind-generic` `unwind`
### Logs (Include/Attach if Applicable)
<details><summary>Click to expand log</summary>
```
Put your log output here
```
</details>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `recipes/libunwind/all/conanfile.py`
Content:
```
1 from conans import ConanFile, AutoToolsBuildEnvironment, tools
2 from conans.errors import ConanInvalidConfiguration
3 import os
4 import glob
5
6
7 class LiunwindConan(ConanFile):
8 name = "libunwind"
9 description = "Manipulate the preserved state of each call-frame and resume the execution at any point."
10 topics = ("conan", "libunwind", "unwind", "debuggers", "exception-handling", "introspection", "setjmp")
11 url = "https://github.com/conan-io/conan-center-index"
12 homepage = "https://github.com/libunwind/libunwind"
13 license = "MIT"
14 settings = "os", "arch", "compiler", "build_type"
15 options = {"shared": [True, False], "fPIC": [True, False], "coredump": [True, False], "ptrace": [True, False], "setjmp": [True, False]}
16 default_options = {"shared": False, "fPIC": True, "coredump": True, "ptrace": True, "setjmp": True}
17 requires = "xz_utils/5.2.4"
18 _autotools = None
19
20 @property
21 def _source_subfolder(self):
22 return "source_subfolder"
23
24 def configure(self):
25 if self.settings.os not in ["Linux", "FreeBSD"]:
26 raise ConanInvalidConfiguration("libunwind is only supported on Linux and FreeBSD")
27 del self.settings.compiler.libcxx
28 del self.settings.compiler.cppstd
29
30 def source(self):
31 tools.get(**self.conan_data["sources"][self.version])
32 extracted_dir = self.name + "-" + self.version
33 os.rename(extracted_dir, self._source_subfolder)
34
35 def _configure_autotools(self):
36 if not self._autotools:
37 self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
38 args = [
39 "--enable-shared={}".format("yes" if self.options.shared else "no"),
40 "--enable-static={}".format("no" if self.options.shared else "yes"),
41 "--enable-coredump={}".format("yes" if self.options.coredump else "no"),
42 "--enable-ptrace={}".format("yes" if self.options.ptrace else "no"),
43 "--enable-setjmp={}".format("yes" if self.options.setjmp else "no"),
44 "--disable-tests",
45 "--disable-documentation"
46 ]
47 self._autotools.configure(configure_dir=self._source_subfolder, args=args)
48 return self._autotools
49
50 def build(self):
51 autotools = self._configure_autotools()
52 autotools.make()
53
54 def package(self):
55 self.copy(pattern="COPYING", dst="licenses", src=self._source_subfolder)
56 autotools = self._configure_autotools()
57 autotools.install()
58 tools.rmdir(os.path.join(self.package_folder, 'lib', 'pkgconfig'))
59 with tools.chdir(os.path.join(self.package_folder, "lib")):
60 for filename in glob.glob("*.la"):
61 os.unlink(filename)
62
63 def package_info(self):
64 self.cpp_info.libs = tools.collect_libs(self)
65 if self.settings.os == "Linux":
66 self.cpp_info.system_libs.append("pthread")
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/recipes/libunwind/all/conanfile.py b/recipes/libunwind/all/conanfile.py
--- a/recipes/libunwind/all/conanfile.py
+++ b/recipes/libunwind/all/conanfile.py
@@ -24,6 +24,8 @@
def configure(self):
if self.settings.os not in ["Linux", "FreeBSD"]:
raise ConanInvalidConfiguration("libunwind is only supported on Linux and FreeBSD")
+ if self.options.shared:
+ del self.options.fPIC
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
@@ -61,6 +63,23 @@
os.unlink(filename)
def package_info(self):
- self.cpp_info.libs = tools.collect_libs(self)
+ self.cpp_info.components["unwind"].names["pkg_config"] = "libunwind"
+ self.cpp_info.components["unwind"].libs = ["unwind"]
+ self.cpp_info.components["unwind"].requires = ["xz_utils::xz_utils"]
if self.settings.os == "Linux":
- self.cpp_info.system_libs.append("pthread")
+ self.cpp_info.components["unwind"].system_libs.append("pthread")
+ self.cpp_info.components["generic"].names["pkg_config"] = "libunwind-generic"
+ self.cpp_info.components["generic"].libs = ["unwind-generic"]
+ self.cpp_info.components["generic"].requires = ["unwind"]
+ if self.options.ptrace:
+ self.cpp_info.components["ptrace"].names["pkg_config"] = "libunwind-ptrace"
+ self.cpp_info.components["ptrace"].libs = ["unwind-ptrace"]
+ self.cpp_info.components["ptrace"].requires = ["generic", "unwind"]
+ if self.options.setjmp:
+ self.cpp_info.components["setjmp"].names["pkg_config"] = "libunwind-setjmp"
+ self.cpp_info.components["setjmp"].libs = ["unwind-setjmp"]
+ self.cpp_info.components["setjmp"].requires = ["unwind"]
+ if self.options.coredump:
+ self.cpp_info.components["coredump"].names["pkg_config"] = "libunwind-coredump"
+ self.cpp_info.components["coredump"].libs = ["unwind-coredump"]
+ self.cpp_info.components["coredump"].requires = ["generic", "unwind"]
| {"golden_diff": "diff --git a/recipes/libunwind/all/conanfile.py b/recipes/libunwind/all/conanfile.py\n--- a/recipes/libunwind/all/conanfile.py\n+++ b/recipes/libunwind/all/conanfile.py\n@@ -24,6 +24,8 @@\n def configure(self):\n if self.settings.os not in [\"Linux\", \"FreeBSD\"]:\n raise ConanInvalidConfiguration(\"libunwind is only supported on Linux and FreeBSD\")\n+ if self.options.shared:\n+ del self.options.fPIC\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n \n@@ -61,6 +63,23 @@\n os.unlink(filename)\n \n def package_info(self):\n- self.cpp_info.libs = tools.collect_libs(self)\n+ self.cpp_info.components[\"unwind\"].names[\"pkg_config\"] = \"libunwind\"\n+ self.cpp_info.components[\"unwind\"].libs = [\"unwind\"]\n+ self.cpp_info.components[\"unwind\"].requires = [\"xz_utils::xz_utils\"]\n if self.settings.os == \"Linux\":\n- self.cpp_info.system_libs.append(\"pthread\")\n+ self.cpp_info.components[\"unwind\"].system_libs.append(\"pthread\")\n+ self.cpp_info.components[\"generic\"].names[\"pkg_config\"] = \"libunwind-generic\"\n+ self.cpp_info.components[\"generic\"].libs = [\"unwind-generic\"]\n+ self.cpp_info.components[\"generic\"].requires = [\"unwind\"]\n+ if self.options.ptrace:\n+ self.cpp_info.components[\"ptrace\"].names[\"pkg_config\"] = \"libunwind-ptrace\"\n+ self.cpp_info.components[\"ptrace\"].libs = [\"unwind-ptrace\"]\n+ self.cpp_info.components[\"ptrace\"].requires = [\"generic\", \"unwind\"]\n+ if self.options.setjmp:\n+ self.cpp_info.components[\"setjmp\"].names[\"pkg_config\"] = \"libunwind-setjmp\"\n+ self.cpp_info.components[\"setjmp\"].libs = [\"unwind-setjmp\"]\n+ self.cpp_info.components[\"setjmp\"].requires = [\"unwind\"]\n+ if self.options.coredump:\n+ self.cpp_info.components[\"coredump\"].names[\"pkg_config\"] = \"libunwind-coredump\"\n+ self.cpp_info.components[\"coredump\"].libs = [\"unwind-coredump\"]\n+ self.cpp_info.components[\"coredump\"].requires = [\"generic\", \"unwind\"]\n", "issue": "[package] libunwind/1.3.1 library dependency order is wrong\n### Package and Environment Details (include every applicable attribute)\r\n * Package Name/Version: **libunwind/1.3.1**\r\n * Operating System+version: **Linux Ubuntu 18.04**\r\n * Compiler+version: **GCC 7**\r\n * Conan version: **conan 1.21.1**\r\n * Python version: **Python 2.7.17, 3.6.9**\r\n\r\n\r\n### Conan profile (output of `conan profile show default` or `conan profile show <profile>` if custom profile is in use)\r\n```\r\n[settings]\r\nos=Linux\r\nos_build=Linux\r\narch=x86_64\r\narch_build=x86_64\r\ncompiler=gcc\r\ncompiler.version=7\r\ncompiler.libcxx=libstdc++1+\r\n[options]\r\n[build_requires]\r\n[env]\r\n```\r\n\r\nBut **error occurs when static linking**, that is shared=False.\r\n\r\n### Steps to reproduce (Include if Applicable)\r\n\r\nSoon I will write an example to reproduce.\r\n\r\nBut there is [an example about libunwind library dependency](https://github.com/daniel-thompson/libunwind-examples/blob/master/Makefile)\r\n\r\n* current order : `unwind` `unwind-generic` `unwind-ptrace`\r\n* right order : `unwind-ptrace` `unwind-generic` `unwind`\r\n\r\n### Logs (Include/Attach if Applicable)\r\n<details><summary>Click to expand log</summary>\r\n\r\n```\r\nPut your log output here\r\n```\r\n\r\n</details>\r\n\n", "before_files": [{"content": "from conans import ConanFile, AutoToolsBuildEnvironment, tools\nfrom conans.errors import ConanInvalidConfiguration\nimport os\nimport glob\n\n\nclass LiunwindConan(ConanFile):\n name = \"libunwind\"\n description = \"Manipulate the preserved state of each call-frame and resume the execution at any point.\"\n topics = (\"conan\", \"libunwind\", \"unwind\", \"debuggers\", \"exception-handling\", \"introspection\", \"setjmp\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/libunwind/libunwind\"\n license = \"MIT\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\"shared\": [True, False], \"fPIC\": [True, False], \"coredump\": [True, False], \"ptrace\": [True, False], \"setjmp\": [True, False]}\n default_options = {\"shared\": False, \"fPIC\": True, \"coredump\": True, \"ptrace\": True, \"setjmp\": True}\n requires = \"xz_utils/5.2.4\"\n _autotools = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def configure(self):\n if self.settings.os not in [\"Linux\", \"FreeBSD\"]:\n raise ConanInvalidConfiguration(\"libunwind is only supported on Linux and FreeBSD\")\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = self.name + \"-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n def _configure_autotools(self):\n if not self._autotools:\n self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)\n args = [\n \"--enable-shared={}\".format(\"yes\" if self.options.shared else \"no\"),\n \"--enable-static={}\".format(\"no\" if self.options.shared else \"yes\"),\n \"--enable-coredump={}\".format(\"yes\" if self.options.coredump else \"no\"),\n \"--enable-ptrace={}\".format(\"yes\" if self.options.ptrace else \"no\"),\n \"--enable-setjmp={}\".format(\"yes\" if self.options.setjmp else \"no\"),\n \"--disable-tests\",\n \"--disable-documentation\"\n ]\n self._autotools.configure(configure_dir=self._source_subfolder, args=args)\n return self._autotools\n\n def build(self):\n autotools = self._configure_autotools()\n autotools.make()\n\n def package(self):\n self.copy(pattern=\"COPYING\", dst=\"licenses\", src=self._source_subfolder)\n autotools = self._configure_autotools()\n autotools.install()\n tools.rmdir(os.path.join(self.package_folder, 'lib', 'pkgconfig'))\n with tools.chdir(os.path.join(self.package_folder, \"lib\")):\n for filename in glob.glob(\"*.la\"):\n os.unlink(filename)\n\n def package_info(self):\n self.cpp_info.libs = tools.collect_libs(self)\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs.append(\"pthread\")\n", "path": "recipes/libunwind/all/conanfile.py"}], "after_files": [{"content": "from conans import ConanFile, AutoToolsBuildEnvironment, tools\nfrom conans.errors import ConanInvalidConfiguration\nimport os\nimport glob\n\n\nclass LiunwindConan(ConanFile):\n name = \"libunwind\"\n description = \"Manipulate the preserved state of each call-frame and resume the execution at any point.\"\n topics = (\"conan\", \"libunwind\", \"unwind\", \"debuggers\", \"exception-handling\", \"introspection\", \"setjmp\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/libunwind/libunwind\"\n license = \"MIT\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n options = {\"shared\": [True, False], \"fPIC\": [True, False], \"coredump\": [True, False], \"ptrace\": [True, False], \"setjmp\": [True, False]}\n default_options = {\"shared\": False, \"fPIC\": True, \"coredump\": True, \"ptrace\": True, \"setjmp\": True}\n requires = \"xz_utils/5.2.4\"\n _autotools = None\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def configure(self):\n if self.settings.os not in [\"Linux\", \"FreeBSD\"]:\n raise ConanInvalidConfiguration(\"libunwind is only supported on Linux and FreeBSD\")\n if self.options.shared:\n del self.options.fPIC\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = self.name + \"-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n def _configure_autotools(self):\n if not self._autotools:\n self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)\n args = [\n \"--enable-shared={}\".format(\"yes\" if self.options.shared else \"no\"),\n \"--enable-static={}\".format(\"no\" if self.options.shared else \"yes\"),\n \"--enable-coredump={}\".format(\"yes\" if self.options.coredump else \"no\"),\n \"--enable-ptrace={}\".format(\"yes\" if self.options.ptrace else \"no\"),\n \"--enable-setjmp={}\".format(\"yes\" if self.options.setjmp else \"no\"),\n \"--disable-tests\",\n \"--disable-documentation\"\n ]\n self._autotools.configure(configure_dir=self._source_subfolder, args=args)\n return self._autotools\n\n def build(self):\n autotools = self._configure_autotools()\n autotools.make()\n\n def package(self):\n self.copy(pattern=\"COPYING\", dst=\"licenses\", src=self._source_subfolder)\n autotools = self._configure_autotools()\n autotools.install()\n tools.rmdir(os.path.join(self.package_folder, 'lib', 'pkgconfig'))\n with tools.chdir(os.path.join(self.package_folder, \"lib\")):\n for filename in glob.glob(\"*.la\"):\n os.unlink(filename)\n\n def package_info(self):\n self.cpp_info.components[\"unwind\"].names[\"pkg_config\"] = \"libunwind\"\n self.cpp_info.components[\"unwind\"].libs = [\"unwind\"]\n self.cpp_info.components[\"unwind\"].requires = [\"xz_utils::xz_utils\"]\n if self.settings.os == \"Linux\":\n self.cpp_info.components[\"unwind\"].system_libs.append(\"pthread\")\n self.cpp_info.components[\"generic\"].names[\"pkg_config\"] = \"libunwind-generic\"\n self.cpp_info.components[\"generic\"].libs = [\"unwind-generic\"]\n self.cpp_info.components[\"generic\"].requires = [\"unwind\"]\n if self.options.ptrace:\n self.cpp_info.components[\"ptrace\"].names[\"pkg_config\"] = \"libunwind-ptrace\"\n self.cpp_info.components[\"ptrace\"].libs = [\"unwind-ptrace\"]\n self.cpp_info.components[\"ptrace\"].requires = [\"generic\", \"unwind\"]\n if self.options.setjmp:\n self.cpp_info.components[\"setjmp\"].names[\"pkg_config\"] = \"libunwind-setjmp\"\n self.cpp_info.components[\"setjmp\"].libs = [\"unwind-setjmp\"]\n self.cpp_info.components[\"setjmp\"].requires = [\"unwind\"]\n if self.options.coredump:\n self.cpp_info.components[\"coredump\"].names[\"pkg_config\"] = \"libunwind-coredump\"\n self.cpp_info.components[\"coredump\"].libs = [\"unwind-coredump\"]\n self.cpp_info.components[\"coredump\"].requires = [\"generic\", \"unwind\"]\n", "path": "recipes/libunwind/all/conanfile.py"}]} | 1,448 | 519 |
gh_patches_debug_5977 | rasdani/github-patches | git_diff | bokeh__bokeh-4754 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Typo in categorical example plot title
File examples/plotting/file/categorical.py contains a typo in plot name.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/plotting/file/categorical.py`
Content:
```
1 from bokeh.layouts import row
2 from bokeh.plotting import figure, show, output_file
3
4 factors = ["a", "b", "c", "d", "e", "f", "g", "h"]
5 x = [50, 40, 65, 10, 25, 37, 80, 60]
6
7 dot = figure(title="Catgorical Dot Plot", tools="", toolbar_location=None,
8 y_range=factors, x_range=[0,100])
9
10 dot.segment(0, factors, x, factors, line_width=2, line_color="green", )
11 dot.circle(x, factors, size=15, fill_color="orange", line_color="green", line_width=3, )
12
13 factors = ["foo", "bar", "baz"]
14 x = ["foo", "foo", "foo", "bar", "bar", "bar", "baz", "baz", "baz"]
15 y = ["foo", "bar", "baz", "foo", "bar", "baz", "foo", "bar", "baz"]
16 colors = [
17 "#0B486B", "#79BD9A", "#CFF09E",
18 "#79BD9A", "#0B486B", "#79BD9A",
19 "#CFF09E", "#79BD9A", "#0B486B"
20 ]
21
22 hm = figure(title="Categorical Heatmap", tools="hover", toolbar_location=None,
23 x_range=factors, y_range=factors)
24
25 hm.rect(x, y, color=colors, width=1, height=1)
26
27 output_file("categorical.html", title="categorical.py example")
28
29 show(row(hm, dot, sizing_mode="scale_width")) # open a browser
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/plotting/file/categorical.py b/examples/plotting/file/categorical.py
--- a/examples/plotting/file/categorical.py
+++ b/examples/plotting/file/categorical.py
@@ -4,7 +4,7 @@
factors = ["a", "b", "c", "d", "e", "f", "g", "h"]
x = [50, 40, 65, 10, 25, 37, 80, 60]
-dot = figure(title="Catgorical Dot Plot", tools="", toolbar_location=None,
+dot = figure(title="Categorical Dot Plot", tools="", toolbar_location=None,
y_range=factors, x_range=[0,100])
dot.segment(0, factors, x, factors, line_width=2, line_color="green", )
| {"golden_diff": "diff --git a/examples/plotting/file/categorical.py b/examples/plotting/file/categorical.py\n--- a/examples/plotting/file/categorical.py\n+++ b/examples/plotting/file/categorical.py\n@@ -4,7 +4,7 @@\n factors = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\"]\n x = [50, 40, 65, 10, 25, 37, 80, 60]\n \n-dot = figure(title=\"Catgorical Dot Plot\", tools=\"\", toolbar_location=None,\n+dot = figure(title=\"Categorical Dot Plot\", tools=\"\", toolbar_location=None,\n y_range=factors, x_range=[0,100])\n \n dot.segment(0, factors, x, factors, line_width=2, line_color=\"green\", )\n", "issue": "Typo in categorical example plot title\nFile examples/plotting/file/categorical.py contains a typo in plot name.\n\n", "before_files": [{"content": "from bokeh.layouts import row\nfrom bokeh.plotting import figure, show, output_file\n\nfactors = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\"]\nx = [50, 40, 65, 10, 25, 37, 80, 60]\n\ndot = figure(title=\"Catgorical Dot Plot\", tools=\"\", toolbar_location=None,\n y_range=factors, x_range=[0,100])\n\ndot.segment(0, factors, x, factors, line_width=2, line_color=\"green\", )\ndot.circle(x, factors, size=15, fill_color=\"orange\", line_color=\"green\", line_width=3, )\n\nfactors = [\"foo\", \"bar\", \"baz\"]\nx = [\"foo\", \"foo\", \"foo\", \"bar\", \"bar\", \"bar\", \"baz\", \"baz\", \"baz\"]\ny = [\"foo\", \"bar\", \"baz\", \"foo\", \"bar\", \"baz\", \"foo\", \"bar\", \"baz\"]\ncolors = [\n \"#0B486B\", \"#79BD9A\", \"#CFF09E\",\n \"#79BD9A\", \"#0B486B\", \"#79BD9A\",\n \"#CFF09E\", \"#79BD9A\", \"#0B486B\"\n]\n\nhm = figure(title=\"Categorical Heatmap\", tools=\"hover\", toolbar_location=None,\n x_range=factors, y_range=factors)\n\nhm.rect(x, y, color=colors, width=1, height=1)\n\noutput_file(\"categorical.html\", title=\"categorical.py example\")\n\nshow(row(hm, dot, sizing_mode=\"scale_width\")) # open a browser\n", "path": "examples/plotting/file/categorical.py"}], "after_files": [{"content": "from bokeh.layouts import row\nfrom bokeh.plotting import figure, show, output_file\n\nfactors = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\"]\nx = [50, 40, 65, 10, 25, 37, 80, 60]\n\ndot = figure(title=\"Categorical Dot Plot\", tools=\"\", toolbar_location=None,\n y_range=factors, x_range=[0,100])\n\ndot.segment(0, factors, x, factors, line_width=2, line_color=\"green\", )\ndot.circle(x, factors, size=15, fill_color=\"orange\", line_color=\"green\", line_width=3, )\n\nfactors = [\"foo\", \"bar\", \"baz\"]\nx = [\"foo\", \"foo\", \"foo\", \"bar\", \"bar\", \"bar\", \"baz\", \"baz\", \"baz\"]\ny = [\"foo\", \"bar\", \"baz\", \"foo\", \"bar\", \"baz\", \"foo\", \"bar\", \"baz\"]\ncolors = [\n \"#0B486B\", \"#79BD9A\", \"#CFF09E\",\n \"#79BD9A\", \"#0B486B\", \"#79BD9A\",\n \"#CFF09E\", \"#79BD9A\", \"#0B486B\"\n]\n\nhm = figure(title=\"Categorical Heatmap\", tools=\"hover\", toolbar_location=None,\n x_range=factors, y_range=factors)\n\nhm.rect(x, y, color=colors, width=1, height=1)\n\noutput_file(\"categorical.html\", title=\"categorical.py example\")\n\nshow(row(hm, dot, sizing_mode=\"scale_width\")) # open a browser\n", "path": "examples/plotting/file/categorical.py"}]} | 729 | 188 |
gh_patches_debug_32242 | rasdani/github-patches | git_diff | doccano__doccano-863 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug report] Error on Django Admin search
How to reproduce the behaviour
---------

Press "Search".
```
backend_1 | Internal Server Error: /admin/api/document/
backend_1 | Traceback (most recent call last):
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/core/handlers/exception.py", line 34, in inner
backend_1 | response = get_response(request)
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/core/handlers/base.py", line 115, in _get_response
backend_1 | response = self.process_exception_by_middleware(e, request)
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/core/handlers/base.py", line 113, in _get_response
backend_1 | response = wrapped_callback(request, *callback_args, **callback_kwargs)
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/contrib/admin/options.py", line 606, in wrapper
backend_1 | return self.admin_site.admin_view(view)(*args, **kwargs)
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/utils/decorators.py", line 142, in _wrapped_view
backend_1 | response = view_func(request, *args, **kwargs)
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/views/decorators/cache.py", line 44, in _wrapped_view_func
backend_1 | response = view_func(request, *args, **kwargs)
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/contrib/admin/sites.py", line 223, in inner
backend_1 | return view(request, *args, **kwargs)
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/utils/decorators.py", line 45, in _wrapper
backend_1 | return bound_method(*args, **kwargs)
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/utils/decorators.py", line 142, in _wrapped_view
backend_1 | response = view_func(request, *args, **kwargs)
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/contrib/admin/options.py", line 1685, in changelist_view
backend_1 | cl = self.get_changelist_instance(request)
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/contrib/admin/options.py", line 744, in get_changelist_instance
backend_1 | sortable_by,
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/contrib/admin/views/main.py", line 81, in __init__
backend_1 | self.queryset = self.get_queryset(request)
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/contrib/admin/views/main.py", line 439, in get_queryset
backend_1 | qs, search_use_distinct = self.model_admin.get_search_results(request, qs, self.query)
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/contrib/admin/options.py", line 1023, in get_search_results
backend_1 | queryset = queryset.filter(reduce(operator.or_, or_queries))
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/db/models/query.py", line 892, in filter
backend_1 | return self._filter_or_exclude(False, *args, **kwargs)
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/db/models/query.py", line 910, in _filter_or_exclude
backend_1 | clone.query.add_q(Q(*args, **kwargs))
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/db/models/sql/query.py", line 1290, in add_q
backend_1 | clause, _ = self._add_q(q_object, self.used_aliases)
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/db/models/sql/query.py", line 1312, in _add_q
backend_1 | current_negated, allow_joins, split_subq, simple_col)
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/db/models/sql/query.py", line 1318, in _add_q
backend_1 | split_subq=split_subq, simple_col=simple_col,
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/db/models/sql/query.py", line 1251, in build_filter
backend_1 | condition = self.build_lookup(lookups, col, value)
backend_1 | File "/src/venv/lib/python3.6/site-packages/django/db/models/sql/query.py", line 1107, in build_lookup
backend_1 | raise FieldError('Related Field got invalid lookup: {}'.format(lookup_name))
backend_1 | django.core.exceptions.FieldError: Related Field got invalid lookup: icontains
backend_1 | [29/Jun/2020 12:25:49] "GET /admin/api/document/?q=request HTTP/1.1" 500 160618
```
Is this like https://stackoverflow.com/questions/11754877/troubleshooting-related-field-has-invalid-lookup-icontains?
Your Environment
---------
<!-- Include details of your environment.-->
* Operating System: Calculate Linux 20.6
* Python Version Used: system Python version is 3.7.7
* When you install doccano: 4927a01f090b91d8e14e467f2fd40d8301612e72
* How did you install doccano (Heroku button etc): Docker Compose
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/api/admin.py`
Content:
```
1 from django.contrib import admin
2
3 from .models import Label, Document, Project
4 from .models import Role, RoleMapping
5 from .models import DocumentAnnotation, SequenceAnnotation, Seq2seqAnnotation
6 from .models import TextClassificationProject, SequenceLabelingProject, Seq2seqProject
7
8
9 class LabelAdmin(admin.ModelAdmin):
10 list_display = ('text', 'project', 'text_color', 'background_color')
11 ordering = ('project',)
12 search_fields = ('project',)
13
14
15 class DocumentAdmin(admin.ModelAdmin):
16 list_display = ('text', 'project', 'meta')
17 ordering = ('project',)
18 search_fields = ('project',)
19
20
21 class ProjectAdmin(admin.ModelAdmin):
22 list_display = ('name', 'description', 'project_type', 'randomize_document_order', 'collaborative_annotation')
23 ordering = ('project_type',)
24 search_fields = ('name',)
25
26
27 class SequenceAnnotationAdmin(admin.ModelAdmin):
28 list_display = ('document', 'label', 'start_offset', 'user')
29 ordering = ('document',)
30 search_fields = ('document',)
31
32
33 class DocumentAnnotationAdmin(admin.ModelAdmin):
34 list_display = ('document', 'label', 'user')
35 ordering = ('document',)
36 search_fields = ('document',)
37
38
39 class Seq2seqAnnotationAdmin(admin.ModelAdmin):
40 list_display = ('document', 'text', 'user')
41 ordering = ('document',)
42 search_fields = ('document',)
43
44
45 class RoleAdmin(admin.ModelAdmin):
46 list_display = ('name', 'description')
47 ordering = ('name',)
48 search_fields = ('name',)
49
50
51 class RoleMappingAdmin(admin.ModelAdmin):
52 list_display = ('user', 'role', 'project', )
53 ordering = ('user',)
54 search_fields = ('user',)
55
56
57 admin.site.register(DocumentAnnotation, DocumentAnnotationAdmin)
58 admin.site.register(SequenceAnnotation, SequenceAnnotationAdmin)
59 admin.site.register(Seq2seqAnnotation, Seq2seqAnnotationAdmin)
60 admin.site.register(Label, LabelAdmin)
61 admin.site.register(Document, DocumentAdmin)
62 admin.site.register(Project, ProjectAdmin)
63 admin.site.register(TextClassificationProject, ProjectAdmin)
64 admin.site.register(SequenceLabelingProject, ProjectAdmin)
65 admin.site.register(Seq2seqProject, ProjectAdmin)
66 admin.site.register(Role, RoleAdmin)
67 admin.site.register(RoleMapping, RoleMappingAdmin)
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/api/admin.py b/app/api/admin.py
--- a/app/api/admin.py
+++ b/app/api/admin.py
@@ -9,13 +9,13 @@
class LabelAdmin(admin.ModelAdmin):
list_display = ('text', 'project', 'text_color', 'background_color')
ordering = ('project',)
- search_fields = ('project',)
+ search_fields = ('text',)
class DocumentAdmin(admin.ModelAdmin):
list_display = ('text', 'project', 'meta')
ordering = ('project',)
- search_fields = ('project',)
+ search_fields = ('text',)
class ProjectAdmin(admin.ModelAdmin):
@@ -27,19 +27,19 @@
class SequenceAnnotationAdmin(admin.ModelAdmin):
list_display = ('document', 'label', 'start_offset', 'user')
ordering = ('document',)
- search_fields = ('document',)
+ search_fields = ('document__text',)
class DocumentAnnotationAdmin(admin.ModelAdmin):
list_display = ('document', 'label', 'user')
ordering = ('document',)
- search_fields = ('document',)
+ search_fields = ('document__text',)
class Seq2seqAnnotationAdmin(admin.ModelAdmin):
list_display = ('document', 'text', 'user')
ordering = ('document',)
- search_fields = ('document',)
+ search_fields = ('document__text',)
class RoleAdmin(admin.ModelAdmin):
@@ -51,7 +51,7 @@
class RoleMappingAdmin(admin.ModelAdmin):
list_display = ('user', 'role', 'project', )
ordering = ('user',)
- search_fields = ('user',)
+ search_fields = ('user__username',)
admin.site.register(DocumentAnnotation, DocumentAnnotationAdmin)
| {"golden_diff": "diff --git a/app/api/admin.py b/app/api/admin.py\n--- a/app/api/admin.py\n+++ b/app/api/admin.py\n@@ -9,13 +9,13 @@\n class LabelAdmin(admin.ModelAdmin):\n list_display = ('text', 'project', 'text_color', 'background_color')\n ordering = ('project',)\n- search_fields = ('project',)\n+ search_fields = ('text',)\n \n \n class DocumentAdmin(admin.ModelAdmin):\n list_display = ('text', 'project', 'meta')\n ordering = ('project',)\n- search_fields = ('project',)\n+ search_fields = ('text',)\n \n \n class ProjectAdmin(admin.ModelAdmin):\n@@ -27,19 +27,19 @@\n class SequenceAnnotationAdmin(admin.ModelAdmin):\n list_display = ('document', 'label', 'start_offset', 'user')\n ordering = ('document',)\n- search_fields = ('document',)\n+ search_fields = ('document__text',)\n \n \n class DocumentAnnotationAdmin(admin.ModelAdmin):\n list_display = ('document', 'label', 'user')\n ordering = ('document',)\n- search_fields = ('document',)\n+ search_fields = ('document__text',)\n \n \n class Seq2seqAnnotationAdmin(admin.ModelAdmin):\n list_display = ('document', 'text', 'user')\n ordering = ('document',)\n- search_fields = ('document',)\n+ search_fields = ('document__text',)\n \n \n class RoleAdmin(admin.ModelAdmin):\n@@ -51,7 +51,7 @@\n class RoleMappingAdmin(admin.ModelAdmin):\n list_display = ('user', 'role', 'project', )\n ordering = ('user',)\n- search_fields = ('user',)\n+ search_fields = ('user__username',)\n \n \n admin.site.register(DocumentAnnotation, DocumentAnnotationAdmin)\n", "issue": "[Bug report] Error on Django Admin search\nHow to reproduce the behaviour\r\n---------\r\n\r\n\r\nPress \"Search\".\r\n\r\n```\r\nbackend_1 | Internal Server Error: /admin/api/document/\r\nbackend_1 | Traceback (most recent call last):\r\nbackend_1 | File \"/src/venv/lib/python3.6/site-packages/django/core/handlers/exception.py\", line 34, in inner\r\nbackend_1 | response = get_response(request)\r\nbackend_1 | File \"/src/venv/lib/python3.6/site-packages/django/core/handlers/base.py\", line 115, in _get_response\r\nbackend_1 | response = self.process_exception_by_middleware(e, request)\r\nbackend_1 | File \"/src/venv/lib/python3.6/site-packages/django/core/handlers/base.py\", line 113, in _get_response\r\nbackend_1 | response = wrapped_callback(request, *callback_args, **callback_kwargs)\r\nbackend_1 | File \"/src/venv/lib/python3.6/site-packages/django/contrib/admin/options.py\", line 606, in wrapper\r\nbackend_1 | return self.admin_site.admin_view(view)(*args, **kwargs)\r\nbackend_1 | File \"/src/venv/lib/python3.6/site-packages/django/utils/decorators.py\", line 142, in _wrapped_view\r\nbackend_1 | response = view_func(request, *args, **kwargs)\r\nbackend_1 | File \"/src/venv/lib/python3.6/site-packages/django/views/decorators/cache.py\", line 44, in _wrapped_view_func\r\nbackend_1 | response = view_func(request, *args, **kwargs)\r\nbackend_1 | File \"/src/venv/lib/python3.6/site-packages/django/contrib/admin/sites.py\", line 223, in inner\r\nbackend_1 | return view(request, *args, **kwargs)\r\nbackend_1 | File \"/src/venv/lib/python3.6/site-packages/django/utils/decorators.py\", line 45, in _wrapper\r\nbackend_1 | return bound_method(*args, **kwargs)\r\nbackend_1 | File \"/src/venv/lib/python3.6/site-packages/django/utils/decorators.py\", line 142, in _wrapped_view\r\nbackend_1 | response = view_func(request, *args, **kwargs)\r\nbackend_1 | File \"/src/venv/lib/python3.6/site-packages/django/contrib/admin/options.py\", line 1685, in changelist_view\r\nbackend_1 | cl = self.get_changelist_instance(request)\r\nbackend_1 | File \"/src/venv/lib/python3.6/site-packages/django/contrib/admin/options.py\", line 744, in get_changelist_instance\r\nbackend_1 | sortable_by,\r\nbackend_1 | File \"/src/venv/lib/python3.6/site-packages/django/contrib/admin/views/main.py\", line 81, in __init__\r\nbackend_1 | self.queryset = self.get_queryset(request)\r\nbackend_1 | File \"/src/venv/lib/python3.6/site-packages/django/contrib/admin/views/main.py\", line 439, in get_queryset\r\nbackend_1 | qs, search_use_distinct = self.model_admin.get_search_results(request, qs, self.query)\r\nbackend_1 | File \"/src/venv/lib/python3.6/site-packages/django/contrib/admin/options.py\", line 1023, in get_search_results\r\nbackend_1 | queryset = queryset.filter(reduce(operator.or_, or_queries))\r\nbackend_1 | File \"/src/venv/lib/python3.6/site-packages/django/db/models/query.py\", line 892, in filter\r\nbackend_1 | return self._filter_or_exclude(False, *args, **kwargs)\r\nbackend_1 | File \"/src/venv/lib/python3.6/site-packages/django/db/models/query.py\", line 910, in _filter_or_exclude\r\nbackend_1 | clone.query.add_q(Q(*args, **kwargs))\r\nbackend_1 | File \"/src/venv/lib/python3.6/site-packages/django/db/models/sql/query.py\", line 1290, in add_q\r\nbackend_1 | clause, _ = self._add_q(q_object, self.used_aliases)\r\nbackend_1 | File \"/src/venv/lib/python3.6/site-packages/django/db/models/sql/query.py\", line 1312, in _add_q\r\nbackend_1 | current_negated, allow_joins, split_subq, simple_col)\r\nbackend_1 | File \"/src/venv/lib/python3.6/site-packages/django/db/models/sql/query.py\", line 1318, in _add_q\r\nbackend_1 | split_subq=split_subq, simple_col=simple_col,\r\nbackend_1 | File \"/src/venv/lib/python3.6/site-packages/django/db/models/sql/query.py\", line 1251, in build_filter\r\nbackend_1 | condition = self.build_lookup(lookups, col, value)\r\nbackend_1 | File \"/src/venv/lib/python3.6/site-packages/django/db/models/sql/query.py\", line 1107, in build_lookup\r\nbackend_1 | raise FieldError('Related Field got invalid lookup: {}'.format(lookup_name))\r\nbackend_1 | django.core.exceptions.FieldError: Related Field got invalid lookup: icontains\r\nbackend_1 | [29/Jun/2020 12:25:49] \"GET /admin/api/document/?q=request HTTP/1.1\" 500 160618\r\n```\r\n\r\nIs this like https://stackoverflow.com/questions/11754877/troubleshooting-related-field-has-invalid-lookup-icontains?\r\n\r\nYour Environment\r\n---------\r\n<!-- Include details of your environment.-->\r\n* Operating System: Calculate Linux 20.6\r\n* Python Version Used: system Python version is 3.7.7\r\n* When you install doccano: 4927a01f090b91d8e14e467f2fd40d8301612e72\r\n* How did you install doccano (Heroku button etc): Docker Compose\r\n\n", "before_files": [{"content": "from django.contrib import admin\n\nfrom .models import Label, Document, Project\nfrom .models import Role, RoleMapping\nfrom .models import DocumentAnnotation, SequenceAnnotation, Seq2seqAnnotation\nfrom .models import TextClassificationProject, SequenceLabelingProject, Seq2seqProject\n\n\nclass LabelAdmin(admin.ModelAdmin):\n list_display = ('text', 'project', 'text_color', 'background_color')\n ordering = ('project',)\n search_fields = ('project',)\n\n\nclass DocumentAdmin(admin.ModelAdmin):\n list_display = ('text', 'project', 'meta')\n ordering = ('project',)\n search_fields = ('project',)\n\n\nclass ProjectAdmin(admin.ModelAdmin):\n list_display = ('name', 'description', 'project_type', 'randomize_document_order', 'collaborative_annotation')\n ordering = ('project_type',)\n search_fields = ('name',)\n\n\nclass SequenceAnnotationAdmin(admin.ModelAdmin):\n list_display = ('document', 'label', 'start_offset', 'user')\n ordering = ('document',)\n search_fields = ('document',)\n\n\nclass DocumentAnnotationAdmin(admin.ModelAdmin):\n list_display = ('document', 'label', 'user')\n ordering = ('document',)\n search_fields = ('document',)\n\n\nclass Seq2seqAnnotationAdmin(admin.ModelAdmin):\n list_display = ('document', 'text', 'user')\n ordering = ('document',)\n search_fields = ('document',)\n\n\nclass RoleAdmin(admin.ModelAdmin):\n list_display = ('name', 'description')\n ordering = ('name',)\n search_fields = ('name',)\n\n\nclass RoleMappingAdmin(admin.ModelAdmin):\n list_display = ('user', 'role', 'project', )\n ordering = ('user',)\n search_fields = ('user',)\n\n\nadmin.site.register(DocumentAnnotation, DocumentAnnotationAdmin)\nadmin.site.register(SequenceAnnotation, SequenceAnnotationAdmin)\nadmin.site.register(Seq2seqAnnotation, Seq2seqAnnotationAdmin)\nadmin.site.register(Label, LabelAdmin)\nadmin.site.register(Document, DocumentAdmin)\nadmin.site.register(Project, ProjectAdmin)\nadmin.site.register(TextClassificationProject, ProjectAdmin)\nadmin.site.register(SequenceLabelingProject, ProjectAdmin)\nadmin.site.register(Seq2seqProject, ProjectAdmin)\nadmin.site.register(Role, RoleAdmin)\nadmin.site.register(RoleMapping, RoleMappingAdmin)\n", "path": "app/api/admin.py"}], "after_files": [{"content": "from django.contrib import admin\n\nfrom .models import Label, Document, Project\nfrom .models import Role, RoleMapping\nfrom .models import DocumentAnnotation, SequenceAnnotation, Seq2seqAnnotation\nfrom .models import TextClassificationProject, SequenceLabelingProject, Seq2seqProject\n\n\nclass LabelAdmin(admin.ModelAdmin):\n list_display = ('text', 'project', 'text_color', 'background_color')\n ordering = ('project',)\n search_fields = ('text',)\n\n\nclass DocumentAdmin(admin.ModelAdmin):\n list_display = ('text', 'project', 'meta')\n ordering = ('project',)\n search_fields = ('text',)\n\n\nclass ProjectAdmin(admin.ModelAdmin):\n list_display = ('name', 'description', 'project_type', 'randomize_document_order', 'collaborative_annotation')\n ordering = ('project_type',)\n search_fields = ('name',)\n\n\nclass SequenceAnnotationAdmin(admin.ModelAdmin):\n list_display = ('document', 'label', 'start_offset', 'user')\n ordering = ('document',)\n search_fields = ('document__text',)\n\n\nclass DocumentAnnotationAdmin(admin.ModelAdmin):\n list_display = ('document', 'label', 'user')\n ordering = ('document',)\n search_fields = ('document__text',)\n\n\nclass Seq2seqAnnotationAdmin(admin.ModelAdmin):\n list_display = ('document', 'text', 'user')\n ordering = ('document',)\n search_fields = ('document__text',)\n\n\nclass RoleAdmin(admin.ModelAdmin):\n list_display = ('name', 'description')\n ordering = ('name',)\n search_fields = ('name',)\n\n\nclass RoleMappingAdmin(admin.ModelAdmin):\n list_display = ('user', 'role', 'project', )\n ordering = ('user',)\n search_fields = ('user__username',)\n\n\nadmin.site.register(DocumentAnnotation, DocumentAnnotationAdmin)\nadmin.site.register(SequenceAnnotation, SequenceAnnotationAdmin)\nadmin.site.register(Seq2seqAnnotation, Seq2seqAnnotationAdmin)\nadmin.site.register(Label, LabelAdmin)\nadmin.site.register(Document, DocumentAdmin)\nadmin.site.register(Project, ProjectAdmin)\nadmin.site.register(TextClassificationProject, ProjectAdmin)\nadmin.site.register(SequenceLabelingProject, ProjectAdmin)\nadmin.site.register(Seq2seqProject, ProjectAdmin)\nadmin.site.register(Role, RoleAdmin)\nadmin.site.register(RoleMapping, RoleMappingAdmin)\n", "path": "app/api/admin.py"}]} | 2,356 | 364 |
gh_patches_debug_26203 | rasdani/github-patches | git_diff | pyg-team__pytorch_geometric-6546 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bipartite graph support for utils.dense_to_sparse
### 🚀 The feature, motivation and pitch
I have a nearly-dense bipartite graph (that is, most features in node set A are connected to most features in node set B), and so it is easiest for me to define the edge adjacency matrix as a dense, non-square matrix. However, the message passing class expects a sparse edge adjacency layout. The dense_to_sparse utility would seem to be ideal for this purpose, but it can only take square matrices (thus, is unhelpful for bipartite graphs).
### Alternatives
A way to implicitly request propagate to pass messages from every node in A to every node in B would be even better (storing fully connected graphs is very memory inefficient), but I know that pyg is meant for sparser graph constructions so this would likely be a feature that wasn't used very much by other people.
### Additional context
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torch_geometric/utils/sparse.py`
Content:
```
1 from typing import Any, Optional, Tuple, Union
2
3 import torch
4 from torch import Tensor
5
6 from torch_geometric.typing import SparseTensor
7
8
9 def dense_to_sparse(adj: Tensor) -> Tuple[Tensor, Tensor]:
10 r"""Converts a dense adjacency matrix to a sparse adjacency matrix defined
11 by edge indices and edge attributes.
12
13 Args:
14 adj (Tensor): The dense adjacency matrix.
15 :rtype: (:class:`LongTensor`, :class:`Tensor`)
16
17 Examples:
18
19 >>> # Forr a single adjacency matrix
20 >>> adj = torch.tensor([[3, 1],
21 ... [2, 0]])
22 >>> dense_to_sparse(adj)
23 (tensor([[0, 0, 1],
24 [0, 1, 0]]),
25 tensor([3, 1, 2]))
26
27 >>> # For two adjacency matrixes
28 >>> adj = torch.tensor([[[3, 1],
29 ... [2, 0]],
30 ... [[0, 1],
31 ... [0, 2]]])
32 >>> dense_to_sparse(adj)
33 (tensor([[0, 0, 1, 2, 3],
34 [0, 1, 0, 3, 3]]),
35 tensor([3, 1, 2, 1, 2]))
36 """
37 assert adj.dim() >= 2 and adj.dim() <= 3
38 assert adj.size(-1) == adj.size(-2)
39
40 edge_index = adj.nonzero().t()
41
42 if edge_index.size(0) == 2:
43 edge_attr = adj[edge_index[0], edge_index[1]]
44 return edge_index, edge_attr
45 else:
46 edge_attr = adj[edge_index[0], edge_index[1], edge_index[2]]
47 batch = edge_index[0] * adj.size(-1)
48 row = batch + edge_index[1]
49 col = batch + edge_index[2]
50 return torch.stack([row, col], dim=0), edge_attr
51
52
53 def is_torch_sparse_tensor(src: Any) -> bool:
54 """Returns :obj:`True` if the input :obj:`src` is a
55 :class:`torch.sparse.Tensor` (in any sparse layout).
56
57 Args:
58 src (Any): The input object to be checked.
59 """
60 return isinstance(src, Tensor) and src.is_sparse
61
62
63 def is_sparse(src: Any) -> bool:
64 """Returns :obj:`True` if the input :obj:`src` is of type
65 :class:`torch.sparse.Tensor` (in any sparse layout) or of type
66 :class:`torch_sparse.SparseTensor`.
67
68 Args:
69 src (Any): The input object to be checked.
70 """
71 return is_torch_sparse_tensor(src) or isinstance(src, SparseTensor)
72
73
74 def to_torch_coo_tensor(
75 edge_index: Tensor,
76 edge_attr: Optional[Tensor] = None,
77 size: Optional[Union[int, Tuple[int, int]]] = None,
78 ) -> Tensor:
79 """Converts a sparse adjacency matrix defined by edge indices and edge
80 attributes to a :class:`torch.sparse.Tensor`.
81
82 Args:
83 edge_index (LongTensor): The edge indices.
84 edge_attr (Tensor, optional): The edge attributes.
85 (default: :obj:`None`)
86 size (int or (int, int), optional): The size of the sparse matrix.
87 If given as an integer, will create a quadratic sparse matrix.
88 If set to :obj:`None`, will infer a quadratic sparse matrix based
89 on :obj:`edge_index.max() + 1`. (default: :obj:`None`)
90
91 :rtype: :class:`torch.sparse.FloatTensor`
92
93 Example:
94
95 >>> edge_index = torch.tensor([[0, 1, 1, 2, 2, 3],
96 ... [1, 0, 2, 1, 3, 2]])
97 >>> to_torch_coo_tensor(edge_index)
98 tensor(indices=tensor([[0, 1, 1, 2, 2, 3],
99 [1, 0, 2, 1, 3, 2]]),
100 values=tensor([1., 1., 1., 1., 1., 1.]),
101 size=(4, 4), nnz=6, layout=torch.sparse_coo)
102
103 """
104 if size is None:
105 size = int(edge_index.max()) + 1
106 if not isinstance(size, (tuple, list)):
107 size = (size, size)
108
109 if edge_attr is None:
110 edge_attr = torch.ones(edge_index.size(1), device=edge_index.device)
111
112 size = tuple(size) + edge_attr.size()[1:]
113 out = torch.sparse_coo_tensor(edge_index, edge_attr, size,
114 device=edge_index.device)
115 out = out.coalesce()
116 return out
117
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torch_geometric/utils/sparse.py b/torch_geometric/utils/sparse.py
--- a/torch_geometric/utils/sparse.py
+++ b/torch_geometric/utils/sparse.py
@@ -11,7 +11,10 @@
by edge indices and edge attributes.
Args:
- adj (Tensor): The dense adjacency matrix.
+ adj (Tensor): The dense adjacency matrix of shape
+ :obj:`[num_nodes, num_nodes]` or
+ :obj:`[batch_size, num_nodes, num_nodes]`.
+
:rtype: (:class:`LongTensor`, :class:`Tensor`)
Examples:
@@ -34,8 +37,9 @@
[0, 1, 0, 3, 3]]),
tensor([3, 1, 2, 1, 2]))
"""
- assert adj.dim() >= 2 and adj.dim() <= 3
- assert adj.size(-1) == adj.size(-2)
+ if adj.dim() < 2 or adj.dim() > 3:
+ raise ValueError(f"Dense adjacency matrix 'adj' must be 2- or "
+ f"3-dimensional (got {adj.dim()} dimensions)")
edge_index = adj.nonzero().t()
@@ -44,9 +48,8 @@
return edge_index, edge_attr
else:
edge_attr = adj[edge_index[0], edge_index[1], edge_index[2]]
- batch = edge_index[0] * adj.size(-1)
- row = batch + edge_index[1]
- col = batch + edge_index[2]
+ row = edge_index[1] + adj.size(-2) * edge_index[0]
+ col = edge_index[2] + adj.size(-1) * edge_index[0]
return torch.stack([row, col], dim=0), edge_attr
| {"golden_diff": "diff --git a/torch_geometric/utils/sparse.py b/torch_geometric/utils/sparse.py\n--- a/torch_geometric/utils/sparse.py\n+++ b/torch_geometric/utils/sparse.py\n@@ -11,7 +11,10 @@\n by edge indices and edge attributes.\n \n Args:\n- adj (Tensor): The dense adjacency matrix.\n+ adj (Tensor): The dense adjacency matrix of shape\n+ :obj:`[num_nodes, num_nodes]` or\n+ :obj:`[batch_size, num_nodes, num_nodes]`.\n+\n :rtype: (:class:`LongTensor`, :class:`Tensor`)\n \n Examples:\n@@ -34,8 +37,9 @@\n [0, 1, 0, 3, 3]]),\n tensor([3, 1, 2, 1, 2]))\n \"\"\"\n- assert adj.dim() >= 2 and adj.dim() <= 3\n- assert adj.size(-1) == adj.size(-2)\n+ if adj.dim() < 2 or adj.dim() > 3:\n+ raise ValueError(f\"Dense adjacency matrix 'adj' must be 2- or \"\n+ f\"3-dimensional (got {adj.dim()} dimensions)\")\n \n edge_index = adj.nonzero().t()\n \n@@ -44,9 +48,8 @@\n return edge_index, edge_attr\n else:\n edge_attr = adj[edge_index[0], edge_index[1], edge_index[2]]\n- batch = edge_index[0] * adj.size(-1)\n- row = batch + edge_index[1]\n- col = batch + edge_index[2]\n+ row = edge_index[1] + adj.size(-2) * edge_index[0]\n+ col = edge_index[2] + adj.size(-1) * edge_index[0]\n return torch.stack([row, col], dim=0), edge_attr\n", "issue": "Bipartite graph support for utils.dense_to_sparse\n### \ud83d\ude80 The feature, motivation and pitch\n\nI have a nearly-dense bipartite graph (that is, most features in node set A are connected to most features in node set B), and so it is easiest for me to define the edge adjacency matrix as a dense, non-square matrix. However, the message passing class expects a sparse edge adjacency layout. The dense_to_sparse utility would seem to be ideal for this purpose, but it can only take square matrices (thus, is unhelpful for bipartite graphs).\n\n### Alternatives\n\nA way to implicitly request propagate to pass messages from every node in A to every node in B would be even better (storing fully connected graphs is very memory inefficient), but I know that pyg is meant for sparser graph constructions so this would likely be a feature that wasn't used very much by other people.\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "from typing import Any, Optional, Tuple, Union\n\nimport torch\nfrom torch import Tensor\n\nfrom torch_geometric.typing import SparseTensor\n\n\ndef dense_to_sparse(adj: Tensor) -> Tuple[Tensor, Tensor]:\n r\"\"\"Converts a dense adjacency matrix to a sparse adjacency matrix defined\n by edge indices and edge attributes.\n\n Args:\n adj (Tensor): The dense adjacency matrix.\n :rtype: (:class:`LongTensor`, :class:`Tensor`)\n\n Examples:\n\n >>> # Forr a single adjacency matrix\n >>> adj = torch.tensor([[3, 1],\n ... [2, 0]])\n >>> dense_to_sparse(adj)\n (tensor([[0, 0, 1],\n [0, 1, 0]]),\n tensor([3, 1, 2]))\n\n >>> # For two adjacency matrixes\n >>> adj = torch.tensor([[[3, 1],\n ... [2, 0]],\n ... [[0, 1],\n ... [0, 2]]])\n >>> dense_to_sparse(adj)\n (tensor([[0, 0, 1, 2, 3],\n [0, 1, 0, 3, 3]]),\n tensor([3, 1, 2, 1, 2]))\n \"\"\"\n assert adj.dim() >= 2 and adj.dim() <= 3\n assert adj.size(-1) == adj.size(-2)\n\n edge_index = adj.nonzero().t()\n\n if edge_index.size(0) == 2:\n edge_attr = adj[edge_index[0], edge_index[1]]\n return edge_index, edge_attr\n else:\n edge_attr = adj[edge_index[0], edge_index[1], edge_index[2]]\n batch = edge_index[0] * adj.size(-1)\n row = batch + edge_index[1]\n col = batch + edge_index[2]\n return torch.stack([row, col], dim=0), edge_attr\n\n\ndef is_torch_sparse_tensor(src: Any) -> bool:\n \"\"\"Returns :obj:`True` if the input :obj:`src` is a\n :class:`torch.sparse.Tensor` (in any sparse layout).\n\n Args:\n src (Any): The input object to be checked.\n \"\"\"\n return isinstance(src, Tensor) and src.is_sparse\n\n\ndef is_sparse(src: Any) -> bool:\n \"\"\"Returns :obj:`True` if the input :obj:`src` is of type\n :class:`torch.sparse.Tensor` (in any sparse layout) or of type\n :class:`torch_sparse.SparseTensor`.\n\n Args:\n src (Any): The input object to be checked.\n \"\"\"\n return is_torch_sparse_tensor(src) or isinstance(src, SparseTensor)\n\n\ndef to_torch_coo_tensor(\n edge_index: Tensor,\n edge_attr: Optional[Tensor] = None,\n size: Optional[Union[int, Tuple[int, int]]] = None,\n) -> Tensor:\n \"\"\"Converts a sparse adjacency matrix defined by edge indices and edge\n attributes to a :class:`torch.sparse.Tensor`.\n\n Args:\n edge_index (LongTensor): The edge indices.\n edge_attr (Tensor, optional): The edge attributes.\n (default: :obj:`None`)\n size (int or (int, int), optional): The size of the sparse matrix.\n If given as an integer, will create a quadratic sparse matrix.\n If set to :obj:`None`, will infer a quadratic sparse matrix based\n on :obj:`edge_index.max() + 1`. (default: :obj:`None`)\n\n :rtype: :class:`torch.sparse.FloatTensor`\n\n Example:\n\n >>> edge_index = torch.tensor([[0, 1, 1, 2, 2, 3],\n ... [1, 0, 2, 1, 3, 2]])\n >>> to_torch_coo_tensor(edge_index)\n tensor(indices=tensor([[0, 1, 1, 2, 2, 3],\n [1, 0, 2, 1, 3, 2]]),\n values=tensor([1., 1., 1., 1., 1., 1.]),\n size=(4, 4), nnz=6, layout=torch.sparse_coo)\n\n \"\"\"\n if size is None:\n size = int(edge_index.max()) + 1\n if not isinstance(size, (tuple, list)):\n size = (size, size)\n\n if edge_attr is None:\n edge_attr = torch.ones(edge_index.size(1), device=edge_index.device)\n\n size = tuple(size) + edge_attr.size()[1:]\n out = torch.sparse_coo_tensor(edge_index, edge_attr, size,\n device=edge_index.device)\n out = out.coalesce()\n return out\n", "path": "torch_geometric/utils/sparse.py"}], "after_files": [{"content": "from typing import Any, Optional, Tuple, Union\n\nimport torch\nfrom torch import Tensor\n\nfrom torch_geometric.typing import SparseTensor\n\n\ndef dense_to_sparse(adj: Tensor) -> Tuple[Tensor, Tensor]:\n r\"\"\"Converts a dense adjacency matrix to a sparse adjacency matrix defined\n by edge indices and edge attributes.\n\n Args:\n adj (Tensor): The dense adjacency matrix of shape\n :obj:`[num_nodes, num_nodes]` or\n :obj:`[batch_size, num_nodes, num_nodes]`.\n\n :rtype: (:class:`LongTensor`, :class:`Tensor`)\n\n Examples:\n\n >>> # Forr a single adjacency matrix\n >>> adj = torch.tensor([[3, 1],\n ... [2, 0]])\n >>> dense_to_sparse(adj)\n (tensor([[0, 0, 1],\n [0, 1, 0]]),\n tensor([3, 1, 2]))\n\n >>> # For two adjacency matrixes\n >>> adj = torch.tensor([[[3, 1],\n ... [2, 0]],\n ... [[0, 1],\n ... [0, 2]]])\n >>> dense_to_sparse(adj)\n (tensor([[0, 0, 1, 2, 3],\n [0, 1, 0, 3, 3]]),\n tensor([3, 1, 2, 1, 2]))\n \"\"\"\n if adj.dim() < 2 or adj.dim() > 3:\n raise ValueError(f\"Dense adjacency matrix 'adj' must be 2- or \"\n f\"3-dimensional (got {adj.dim()} dimensions)\")\n\n edge_index = adj.nonzero().t()\n\n if edge_index.size(0) == 2:\n edge_attr = adj[edge_index[0], edge_index[1]]\n return edge_index, edge_attr\n else:\n edge_attr = adj[edge_index[0], edge_index[1], edge_index[2]]\n row = edge_index[1] + adj.size(-2) * edge_index[0]\n col = edge_index[2] + adj.size(-1) * edge_index[0]\n return torch.stack([row, col], dim=0), edge_attr\n\n\ndef is_torch_sparse_tensor(src: Any) -> bool:\n \"\"\"Returns :obj:`True` if the input :obj:`src` is a\n :class:`torch.sparse.Tensor` (in any sparse layout).\n\n Args:\n src (Any): The input object to be checked.\n \"\"\"\n return isinstance(src, Tensor) and src.is_sparse\n\n\ndef is_sparse(src: Any) -> bool:\n \"\"\"Returns :obj:`True` if the input :obj:`src` is of type\n :class:`torch.sparse.Tensor` (in any sparse layout) or of type\n :class:`torch_sparse.SparseTensor`.\n\n Args:\n src (Any): The input object to be checked.\n \"\"\"\n return is_torch_sparse_tensor(src) or isinstance(src, SparseTensor)\n\n\ndef to_torch_coo_tensor(\n edge_index: Tensor,\n edge_attr: Optional[Tensor] = None,\n size: Optional[Union[int, Tuple[int, int]]] = None,\n) -> Tensor:\n \"\"\"Converts a sparse adjacency matrix defined by edge indices and edge\n attributes to a :class:`torch.sparse.Tensor`.\n\n Args:\n edge_index (LongTensor): The edge indices.\n edge_attr (Tensor, optional): The edge attributes.\n (default: :obj:`None`)\n size (int or (int, int), optional): The size of the sparse matrix.\n If given as an integer, will create a quadratic sparse matrix.\n If set to :obj:`None`, will infer a quadratic sparse matrix based\n on :obj:`edge_index.max() + 1`. (default: :obj:`None`)\n\n :rtype: :class:`torch.sparse.FloatTensor`\n\n Example:\n\n >>> edge_index = torch.tensor([[0, 1, 1, 2, 2, 3],\n ... [1, 0, 2, 1, 3, 2]])\n >>> to_torch_coo_tensor(edge_index)\n tensor(indices=tensor([[0, 1, 1, 2, 2, 3],\n [1, 0, 2, 1, 3, 2]]),\n values=tensor([1., 1., 1., 1., 1., 1.]),\n size=(4, 4), nnz=6, layout=torch.sparse_coo)\n\n \"\"\"\n if size is None:\n size = int(edge_index.max()) + 1\n if not isinstance(size, (tuple, list)):\n size = (size, size)\n\n if edge_attr is None:\n edge_attr = torch.ones(edge_index.size(1), device=edge_index.device)\n\n size = tuple(size) + edge_attr.size()[1:]\n out = torch.sparse_coo_tensor(edge_index, edge_attr, size,\n device=edge_index.device)\n out = out.coalesce()\n return out\n", "path": "torch_geometric/utils/sparse.py"}]} | 1,763 | 423 |
gh_patches_debug_2242 | rasdani/github-patches | git_diff | python-poetry__poetry-1577 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
poetry v1.0.0b4 breaks on zip packages
<!-- Checked checkbox should look like this: [x] -->
- [x] I am on the [latest](https://github.com/sdispater/poetry/releases/latest) Poetry version.
- [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate.
- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).
- **OS version and name**: Windows 10 with a virtual environment for Python v3.7.4
- **Poetry version**: 1.0.0b4
- **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: (empty project)
## Issue Summary
The newly refactored distribution loading mechanism from [PR 1549](https://github.com/sdispater/poetry/pull/1549/) fails when a zip-based package is present. The presenting problem is that the `zipp.Path` class is not compatible with the `__fspath__` protocol.
## Issue Details
After updating to Poetry v1.0.0b4, I get this traceback
```
% poetry update -v
Using virtualenv: C:\Users\garyd\devel\video-storage\venv-new
[TypeError]
expected str, bytes or os.PathLike object, not Path
Traceback (most recent call last):
File "C:\Users\garyd\.poetry\lib\poetry\_vendor\py3.7\clikit\console_application.py", line 131, in run
status_code = command.handle(parsed_args, io)
File "C:\Users\garyd\.poetry\lib\poetry\_vendor\py3.7\clikit\api\command\command.py", line 120, in handle
status_code = self._do_handle(args, io)
File "C:\Users\garyd\.poetry\lib\poetry\_vendor\py3.7\clikit\api\command\command.py", line 171, in _do_handle
return getattr(handler, handler_method)(args, io, self)
File "C:\Users\garyd\.poetry\lib\poetry\_vendor\py3.7\cleo\commands\command.py", line 92, in wrap_handle
return self.handle()
File "C:\Users\garyd\.poetry\lib\poetry\console\commands\update.py", line 36, in handle
self.io, self.env, self.poetry.package, self.poetry.locker, self.poetry.pool
File "C:\Users\garyd\.poetry\lib\poetry\installation\installer.py", line 55, in __init__
installed = self._get_installed()
File "C:\Users\garyd\.poetry\lib\poetry\installation\installer.py", line 507, in _get_installed
return InstalledRepository.load(self._env)
File "C:\Users\garyd\.poetry\lib\poetry\repositories\installed_repository.py", line 30, in load
path = Path(distribution._path)
File "C:\Users\garyd\AppData\Local\Programs\Python\Python37\lib\pathlib.py", line 1010, in __new__
self = cls._from_parts(args, init=False)
```
When I run the broken part of the code in my console, I find that the broken distribution is:
* type == <class 'importlib_metadata.PathDistribution'>
* type(dist._path) == <class 'zipp.Path'>
* dist._path == C:\Users\garyd\devel\video-storage\venv\lib\site-packages\setuptools-40.8.0-py3.7.egg/EGG-INFO/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `poetry/repositories/installed_repository.py`
Content:
```
1 from importlib_metadata import distributions
2 from poetry.packages import Package
3 from poetry.utils._compat import Path
4 from poetry.utils.env import Env
5
6 from .repository import Repository
7
8
9 class InstalledRepository(Repository):
10 @classmethod
11 def load(cls, env): # type: (Env) -> InstalledRepository
12 """
13 Load installed packages.
14
15 For now, it uses the pip "freeze" command.
16 """
17 repo = cls()
18
19 for distribution in sorted(
20 distributions(path=env.sys_path), key=lambda d: str(d._path),
21 ):
22 metadata = distribution.metadata
23 name = metadata["name"]
24 version = metadata["version"]
25 package = Package(name, version, version)
26 package.description = metadata.get("summary", "")
27
28 repo.add_package(package)
29
30 path = Path(distribution._path)
31 is_standard_package = True
32 try:
33 path.relative_to(env.site_packages)
34 except ValueError:
35 is_standard_package = False
36
37 if is_standard_package:
38 continue
39
40 src_path = env.path / "src"
41
42 # A VCS dependency should have been installed
43 # in the src directory. If not, it's a path dependency
44 try:
45 path.relative_to(src_path)
46
47 from poetry.vcs.git import Git
48
49 git = Git()
50 revision = git.rev_parse("HEAD", src_path / package.name).strip()
51 url = git.remote_url(src_path / package.name)
52
53 package.source_type = "git"
54 package.source_url = url
55 package.source_reference = revision
56 except ValueError:
57 package.source_type = "directory"
58 package.source_url = str(path.parent)
59
60 return repo
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/poetry/repositories/installed_repository.py b/poetry/repositories/installed_repository.py
--- a/poetry/repositories/installed_repository.py
+++ b/poetry/repositories/installed_repository.py
@@ -27,7 +27,7 @@
repo.add_package(package)
- path = Path(distribution._path)
+ path = Path(str(distribution._path))
is_standard_package = True
try:
path.relative_to(env.site_packages)
| {"golden_diff": "diff --git a/poetry/repositories/installed_repository.py b/poetry/repositories/installed_repository.py\n--- a/poetry/repositories/installed_repository.py\n+++ b/poetry/repositories/installed_repository.py\n@@ -27,7 +27,7 @@\n \n repo.add_package(package)\n \n- path = Path(distribution._path)\n+ path = Path(str(distribution._path))\n is_standard_package = True\n try:\n path.relative_to(env.site_packages)\n", "issue": "poetry v1.0.0b4 breaks on zip packages\n<!-- Checked checkbox should look like this: [x] -->\r\n- [x] I am on the [latest](https://github.com/sdispater/poetry/releases/latest) Poetry version.\r\n- [x] I have searched the [issues](https://github.com/sdispater/poetry/issues) of this repo and believe that this is not a duplicate.\r\n- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option).\r\n\r\n- **OS version and name**: Windows 10 with a virtual environment for Python v3.7.4\r\n- **Poetry version**: 1.0.0b4\r\n- **Link of a [Gist](https://gist.github.com/) with the contents of your pyproject.toml file**: (empty project)\r\n\r\n## Issue Summary\r\nThe newly refactored distribution loading mechanism from [PR 1549](https://github.com/sdispater/poetry/pull/1549/) fails when a zip-based package is present. The presenting problem is that the `zipp.Path` class is not compatible with the `__fspath__` protocol.\r\n\r\n## Issue Details\r\nAfter updating to Poetry v1.0.0b4, I get this traceback\r\n\r\n```\r\n% poetry update -v\r\nUsing virtualenv: C:\\Users\\garyd\\devel\\video-storage\\venv-new\r\n[TypeError]\r\nexpected str, bytes or os.PathLike object, not Path\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\garyd\\.poetry\\lib\\poetry\\_vendor\\py3.7\\clikit\\console_application.py\", line 131, in run\r\n status_code = command.handle(parsed_args, io)\r\n File \"C:\\Users\\garyd\\.poetry\\lib\\poetry\\_vendor\\py3.7\\clikit\\api\\command\\command.py\", line 120, in handle\r\n status_code = self._do_handle(args, io)\r\n File \"C:\\Users\\garyd\\.poetry\\lib\\poetry\\_vendor\\py3.7\\clikit\\api\\command\\command.py\", line 171, in _do_handle\r\n return getattr(handler, handler_method)(args, io, self)\r\n File \"C:\\Users\\garyd\\.poetry\\lib\\poetry\\_vendor\\py3.7\\cleo\\commands\\command.py\", line 92, in wrap_handle\r\n return self.handle()\r\n File \"C:\\Users\\garyd\\.poetry\\lib\\poetry\\console\\commands\\update.py\", line 36, in handle\r\n self.io, self.env, self.poetry.package, self.poetry.locker, self.poetry.pool\r\n File \"C:\\Users\\garyd\\.poetry\\lib\\poetry\\installation\\installer.py\", line 55, in __init__\r\n installed = self._get_installed()\r\n File \"C:\\Users\\garyd\\.poetry\\lib\\poetry\\installation\\installer.py\", line 507, in _get_installed\r\n return InstalledRepository.load(self._env)\r\n File \"C:\\Users\\garyd\\.poetry\\lib\\poetry\\repositories\\installed_repository.py\", line 30, in load\r\n path = Path(distribution._path)\r\n File \"C:\\Users\\garyd\\AppData\\Local\\Programs\\Python\\Python37\\lib\\pathlib.py\", line 1010, in __new__\r\n self = cls._from_parts(args, init=False)\r\n```\r\n\r\nWhen I run the broken part of the code in my console, I find that the broken distribution is:\r\n\r\n* type == <class 'importlib_metadata.PathDistribution'>\r\n* type(dist._path) == <class 'zipp.Path'>\r\n* dist._path == C:\\Users\\garyd\\devel\\video-storage\\venv\\lib\\site-packages\\setuptools-40.8.0-py3.7.egg/EGG-INFO/\r\n\r\n\n", "before_files": [{"content": "from importlib_metadata import distributions\nfrom poetry.packages import Package\nfrom poetry.utils._compat import Path\nfrom poetry.utils.env import Env\n\nfrom .repository import Repository\n\n\nclass InstalledRepository(Repository):\n @classmethod\n def load(cls, env): # type: (Env) -> InstalledRepository\n \"\"\"\n Load installed packages.\n\n For now, it uses the pip \"freeze\" command.\n \"\"\"\n repo = cls()\n\n for distribution in sorted(\n distributions(path=env.sys_path), key=lambda d: str(d._path),\n ):\n metadata = distribution.metadata\n name = metadata[\"name\"]\n version = metadata[\"version\"]\n package = Package(name, version, version)\n package.description = metadata.get(\"summary\", \"\")\n\n repo.add_package(package)\n\n path = Path(distribution._path)\n is_standard_package = True\n try:\n path.relative_to(env.site_packages)\n except ValueError:\n is_standard_package = False\n\n if is_standard_package:\n continue\n\n src_path = env.path / \"src\"\n\n # A VCS dependency should have been installed\n # in the src directory. If not, it's a path dependency\n try:\n path.relative_to(src_path)\n\n from poetry.vcs.git import Git\n\n git = Git()\n revision = git.rev_parse(\"HEAD\", src_path / package.name).strip()\n url = git.remote_url(src_path / package.name)\n\n package.source_type = \"git\"\n package.source_url = url\n package.source_reference = revision\n except ValueError:\n package.source_type = \"directory\"\n package.source_url = str(path.parent)\n\n return repo\n", "path": "poetry/repositories/installed_repository.py"}], "after_files": [{"content": "from importlib_metadata import distributions\nfrom poetry.packages import Package\nfrom poetry.utils._compat import Path\nfrom poetry.utils.env import Env\n\nfrom .repository import Repository\n\n\nclass InstalledRepository(Repository):\n @classmethod\n def load(cls, env): # type: (Env) -> InstalledRepository\n \"\"\"\n Load installed packages.\n\n For now, it uses the pip \"freeze\" command.\n \"\"\"\n repo = cls()\n\n for distribution in sorted(\n distributions(path=env.sys_path), key=lambda d: str(d._path),\n ):\n metadata = distribution.metadata\n name = metadata[\"name\"]\n version = metadata[\"version\"]\n package = Package(name, version, version)\n package.description = metadata.get(\"summary\", \"\")\n\n repo.add_package(package)\n\n path = Path(str(distribution._path))\n is_standard_package = True\n try:\n path.relative_to(env.site_packages)\n except ValueError:\n is_standard_package = False\n\n if is_standard_package:\n continue\n\n src_path = env.path / \"src\"\n\n # A VCS dependency should have been installed\n # in the src directory. If not, it's a path dependency\n try:\n path.relative_to(src_path)\n\n from poetry.vcs.git import Git\n\n git = Git()\n revision = git.rev_parse(\"HEAD\", src_path / package.name).strip()\n url = git.remote_url(src_path / package.name)\n\n package.source_type = \"git\"\n package.source_url = url\n package.source_reference = revision\n except ValueError:\n package.source_type = \"directory\"\n package.source_url = str(path.parent)\n\n return repo\n", "path": "poetry/repositories/installed_repository.py"}]} | 1,613 | 107 |
gh_patches_debug_20658 | rasdani/github-patches | git_diff | svthalia__concrexit-3531 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Entry.DoesNotExist in RegistrationAdmin changeform
Sentry Issue: [CONCREXIT-Q5](https://thalia.sentry.io/issues/4058249887/?referrer=github_integration)
```
Entry.DoesNotExist: Entry matching query does not exist.
(5 additional frame(s) were not displayed)
...
File "registrations/admin.py", line 169, in changeform_view
obj = Entry.objects.get(id=object_id)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/registrations/admin.py`
Content:
```
1 """Registers admin interfaces for the registrations module."""
2 from functools import partial
3
4 from django.contrib import admin
5 from django.forms import Field
6 from django.utils.translation import gettext_lazy as _
7
8 from payments.widgets import PaymentWidget
9 from registrations.services import (
10 accept_registration,
11 accept_renewal,
12 reject_registration,
13 reject_renewal,
14 )
15
16 from .forms import RegistrationAdminForm
17 from .models import Entry, Reference, Registration, Renewal
18
19
20 class ReferenceInline(admin.StackedInline):
21 model = Reference
22 extra = 0
23
24
25 @admin.register(Registration)
26 class RegistrationAdmin(admin.ModelAdmin):
27 """Manage the registrations."""
28
29 list_display = (
30 "name",
31 "email",
32 "status",
33 "membership_type",
34 "contribution",
35 "created_at",
36 "payment",
37 "no_references",
38 "reference_count",
39 )
40 list_filter = (
41 "status",
42 "programme",
43 "membership_type",
44 "no_references",
45 "payment__type",
46 "contribution",
47 )
48 inlines = (ReferenceInline,)
49 search_fields = (
50 "first_name",
51 "last_name",
52 "email",
53 "phone_number",
54 "student_number",
55 )
56 date_hierarchy = "created_at"
57 fieldsets = (
58 (
59 _("Application information"),
60 {
61 "fields": (
62 "created_at",
63 "updated_at",
64 "username",
65 "length",
66 "contribution",
67 "membership_type",
68 "status",
69 "payment",
70 "remarks",
71 )
72 },
73 ),
74 (
75 _("Personal information"),
76 {
77 "fields": (
78 "first_name",
79 "last_name",
80 "birthday",
81 "optin_birthday",
82 "email",
83 "optin_mailinglist",
84 "phone_number",
85 )
86 },
87 ),
88 (
89 _("Address"),
90 {
91 "fields": (
92 "address_street",
93 "address_street2",
94 "address_postal_code",
95 "address_city",
96 "address_country",
97 )
98 },
99 ),
100 (
101 _("Financial"),
102 {
103 "fields": (
104 "direct_debit",
105 "initials",
106 "iban",
107 "bic",
108 "signature",
109 )
110 },
111 ),
112 (
113 _("University information"),
114 {
115 "fields": (
116 "student_number",
117 "programme",
118 "starting_year",
119 )
120 },
121 ),
122 )
123
124 form = RegistrationAdminForm
125
126 actions = ["accept_registrations", "reject_registrations"]
127
128 def get_actions(self, request):
129 actions = super().get_actions(request)
130
131 if not request.user.has_perm("registrations.review_entries"):
132 if "accept_registrations" in actions:
133 del actions["accept_registrations"]
134 if "reject_registrations" in actions:
135 del actions["reject_registrations"]
136
137 return actions
138
139 @admin.action(description="Accept selected registrations")
140 def accept_registrations(self, request, queryset): # pragma: no cover
141 if queryset.exclude(status=Registration.STATUS_REVIEW).exists():
142 self.message_user(
143 request, "Only registrations in review can be accepted", "error"
144 )
145 return
146
147 count = 0
148 for registration in queryset:
149 try:
150 accept_registration(registration, actor=request.user)
151 count += 1
152 except ValueError as e:
153 self.message_user(
154 request, f"Error accepting {registration}: {e.message}", "error"
155 )
156
157 self.message_user(request, f"Accepted {count} registrations", "success")
158
159 @admin.action(description="Reject selected registrations")
160 def reject_registrations(self, request, queryset): # pragma: no cover
161 if queryset.exclude(status=Registration.STATUS_REVIEW).exists():
162 self.message_user(
163 request, "Only registrations in review can be rejected", "error"
164 )
165 return
166
167 count = queryset.count()
168 for registration in queryset:
169 reject_registration(registration, actor=request.user)
170
171 self.message_user(request, f"Rejected {count} registrations", "success")
172
173 def reference_count(self, obj):
174 return obj.reference_set.count()
175
176 reference_count.short_description = _("references")
177
178 def get_form(self, request, obj=None, **kwargs):
179 return super().get_form(
180 request,
181 obj,
182 formfield_callback=partial(
183 self.formfield_for_dbfield, request=request, obj=obj
184 ),
185 **kwargs,
186 )
187
188 def formfield_for_dbfield(self, db_field, request, obj=None, **kwargs):
189 field = super().formfield_for_dbfield(db_field, request, **kwargs)
190 if db_field.name == "payment":
191 return Field(
192 widget=PaymentWidget(obj=obj), initial=field.initial, required=False
193 )
194 return field
195
196 def changeform_view(self, request, object_id=None, form_url="", extra_context=None):
197 """Render the change formview.
198
199 Only allow when the entry has not been processed yet
200 """
201 obj = None
202 can_review = False
203 can_resend = False
204 can_revert = False
205 if object_id is not None and request.user.has_perm(
206 "registrations.review_entries"
207 ):
208 obj = Entry.objects.get(id=object_id)
209 can_review = obj.status == Entry.STATUS_REVIEW
210 can_revert = obj.status in [Entry.STATUS_ACCEPTED, Entry.STATUS_REJECTED]
211 try:
212 can_resend = obj.registration.status == Entry.STATUS_CONFIRM
213 except Registration.DoesNotExist:
214 pass
215 return super().changeform_view(
216 request,
217 object_id,
218 form_url,
219 {
220 "entry": obj,
221 "can_review": can_review,
222 "can_resend": can_resend,
223 "can_revert": can_revert,
224 },
225 )
226
227 def get_readonly_fields(self, request, obj=None):
228 if obj is None or obj.status not in (
229 Entry.STATUS_REJECTED,
230 Entry.STATUS_ACCEPTED,
231 Entry.STATUS_COMPLETED,
232 ):
233 return ["status", "created_at", "updated_at", "payment"]
234 return [
235 field.name
236 for field in self.model._meta.get_fields()
237 if field.name not in ["payment", "no_references"] and field.editable
238 ]
239
240 @staticmethod
241 def name(obj):
242 return obj.get_full_name()
243
244 def has_change_permission(self, request, obj=None):
245 """Completed registrations are read-only."""
246 return (
247 False
248 if obj and obj.status == Entry.STATUS_COMPLETED
249 else super().has_change_permission(request, obj)
250 )
251
252 def has_add_permission(self, request):
253 return False
254
255 def save_model(self, request, obj, form, change):
256 if obj.status not in (
257 Entry.STATUS_REJECTED,
258 Entry.STATUS_ACCEPTED,
259 Entry.STATUS_COMPLETED,
260 ):
261 super().save_model(request, obj, form, change)
262
263
264 @admin.register(Renewal)
265 class RenewalAdmin(RegistrationAdmin):
266 """Manage the renewals."""
267
268 list_display = (
269 "name",
270 "email",
271 "status",
272 "membership_type",
273 "contribution",
274 "created_at",
275 "payment",
276 "no_references",
277 "reference_count",
278 )
279 list_filter = (
280 "status",
281 "membership_type",
282 "no_references",
283 "payment__type",
284 "contribution",
285 )
286 search_fields = (
287 "member__first_name",
288 "member__last_name",
289 "member__email",
290 "member__profile__phone_number",
291 "member__profile__student_number",
292 )
293 date_hierarchy = "created_at"
294 fieldsets = (
295 (
296 _("Application information"),
297 {
298 "fields": (
299 "created_at",
300 "updated_at",
301 "length",
302 "contribution",
303 "membership_type",
304 "status",
305 "payment",
306 "remarks",
307 "member",
308 )
309 },
310 ),
311 )
312
313 actions = ["accept_renewals", "reject_renewals"]
314
315 def get_actions(self, request):
316 actions = super().get_actions(request)
317
318 if not request.user.has_perm("registrations.review_entries"):
319 if "accept_renewals" in actions: # pragma: no cover
320 del actions["accept_renewals"]
321 if "reject_renewals" in actions: # pragma: no cover
322 del actions["reject_renewals"]
323
324 return actions
325
326 @admin.action(description="Accept selected renewals")
327 def accept_renewals(self, request, queryset): # pragma: no cover
328 if queryset.exclude(status=Renewal.STATUS_REVIEW).exists():
329 self.message_user(
330 request, "Only renewals in review can be accepted", "error"
331 )
332 return
333
334 count = queryset.count()
335 for renewal in queryset:
336 accept_renewal(renewal, actor=request.user)
337 count += 1
338
339 self.message_user(request, f"Accepted {count} renewals", "success")
340
341 @admin.action(description="Reject selected renewals")
342 def reject_renewals(self, request, queryset): # pragma: no cover
343 if queryset.exclude(status=Renewal.STATUS_REVIEW).exists():
344 self.message_user(
345 request, "Only renewals in review can be rejected", "error"
346 )
347 return
348
349 count = queryset.count()
350 for renewal in queryset:
351 reject_renewal(renewal, actor=request.user)
352
353 self.message_user(request, f"Rejected {count} renewals", "success")
354
355 def get_readonly_fields(self, request, obj=None):
356 """Make all fields read-only and add member if needed."""
357 fields = super().get_readonly_fields(request, obj)
358 if "member" not in fields and obj is not None:
359 return fields + ["member"]
360 return fields
361
362 def has_add_permission(self, request):
363 return False
364
365 @staticmethod
366 def name(obj):
367 return obj.member.get_full_name()
368
369 name.short_description = _("name")
370
371 @staticmethod
372 def email(obj):
373 return obj.member.email
374
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/registrations/admin.py b/website/registrations/admin.py
--- a/website/registrations/admin.py
+++ b/website/registrations/admin.py
@@ -204,14 +204,18 @@
can_revert = False
if object_id is not None and request.user.has_perm(
"registrations.review_entries"
- ):
- obj = Entry.objects.get(id=object_id)
+ ): # pragma: no cover
+ obj = self.get_object(request, object_id)
+ if obj is None:
+ return self._get_obj_does_not_exist_redirect(
+ request, self.opts, object_id
+ )
can_review = obj.status == Entry.STATUS_REVIEW
can_revert = obj.status in [Entry.STATUS_ACCEPTED, Entry.STATUS_REJECTED]
- try:
- can_resend = obj.registration.status == Entry.STATUS_CONFIRM
- except Registration.DoesNotExist:
- pass
+ can_resend = obj.status == Entry.STATUS_CONFIRM and isinstance(
+ obj, Registration
+ )
+
return super().changeform_view(
request,
object_id,
| {"golden_diff": "diff --git a/website/registrations/admin.py b/website/registrations/admin.py\n--- a/website/registrations/admin.py\n+++ b/website/registrations/admin.py\n@@ -204,14 +204,18 @@\n can_revert = False\n if object_id is not None and request.user.has_perm(\n \"registrations.review_entries\"\n- ):\n- obj = Entry.objects.get(id=object_id)\n+ ): # pragma: no cover\n+ obj = self.get_object(request, object_id)\n+ if obj is None:\n+ return self._get_obj_does_not_exist_redirect(\n+ request, self.opts, object_id\n+ )\n can_review = obj.status == Entry.STATUS_REVIEW\n can_revert = obj.status in [Entry.STATUS_ACCEPTED, Entry.STATUS_REJECTED]\n- try:\n- can_resend = obj.registration.status == Entry.STATUS_CONFIRM\n- except Registration.DoesNotExist:\n- pass\n+ can_resend = obj.status == Entry.STATUS_CONFIRM and isinstance(\n+ obj, Registration\n+ )\n+\n return super().changeform_view(\n request,\n object_id,\n", "issue": "Entry.DoesNotExist in RegistrationAdmin changeform\nSentry Issue: [CONCREXIT-Q5](https://thalia.sentry.io/issues/4058249887/?referrer=github_integration)\n\n```\nEntry.DoesNotExist: Entry matching query does not exist.\n(5 additional frame(s) were not displayed)\n...\n File \"registrations/admin.py\", line 169, in changeform_view\n obj = Entry.objects.get(id=object_id)\n```\n", "before_files": [{"content": "\"\"\"Registers admin interfaces for the registrations module.\"\"\"\nfrom functools import partial\n\nfrom django.contrib import admin\nfrom django.forms import Field\nfrom django.utils.translation import gettext_lazy as _\n\nfrom payments.widgets import PaymentWidget\nfrom registrations.services import (\n accept_registration,\n accept_renewal,\n reject_registration,\n reject_renewal,\n)\n\nfrom .forms import RegistrationAdminForm\nfrom .models import Entry, Reference, Registration, Renewal\n\n\nclass ReferenceInline(admin.StackedInline):\n model = Reference\n extra = 0\n\n\[email protected](Registration)\nclass RegistrationAdmin(admin.ModelAdmin):\n \"\"\"Manage the registrations.\"\"\"\n\n list_display = (\n \"name\",\n \"email\",\n \"status\",\n \"membership_type\",\n \"contribution\",\n \"created_at\",\n \"payment\",\n \"no_references\",\n \"reference_count\",\n )\n list_filter = (\n \"status\",\n \"programme\",\n \"membership_type\",\n \"no_references\",\n \"payment__type\",\n \"contribution\",\n )\n inlines = (ReferenceInline,)\n search_fields = (\n \"first_name\",\n \"last_name\",\n \"email\",\n \"phone_number\",\n \"student_number\",\n )\n date_hierarchy = \"created_at\"\n fieldsets = (\n (\n _(\"Application information\"),\n {\n \"fields\": (\n \"created_at\",\n \"updated_at\",\n \"username\",\n \"length\",\n \"contribution\",\n \"membership_type\",\n \"status\",\n \"payment\",\n \"remarks\",\n )\n },\n ),\n (\n _(\"Personal information\"),\n {\n \"fields\": (\n \"first_name\",\n \"last_name\",\n \"birthday\",\n \"optin_birthday\",\n \"email\",\n \"optin_mailinglist\",\n \"phone_number\",\n )\n },\n ),\n (\n _(\"Address\"),\n {\n \"fields\": (\n \"address_street\",\n \"address_street2\",\n \"address_postal_code\",\n \"address_city\",\n \"address_country\",\n )\n },\n ),\n (\n _(\"Financial\"),\n {\n \"fields\": (\n \"direct_debit\",\n \"initials\",\n \"iban\",\n \"bic\",\n \"signature\",\n )\n },\n ),\n (\n _(\"University information\"),\n {\n \"fields\": (\n \"student_number\",\n \"programme\",\n \"starting_year\",\n )\n },\n ),\n )\n\n form = RegistrationAdminForm\n\n actions = [\"accept_registrations\", \"reject_registrations\"]\n\n def get_actions(self, request):\n actions = super().get_actions(request)\n\n if not request.user.has_perm(\"registrations.review_entries\"):\n if \"accept_registrations\" in actions:\n del actions[\"accept_registrations\"]\n if \"reject_registrations\" in actions:\n del actions[\"reject_registrations\"]\n\n return actions\n\n @admin.action(description=\"Accept selected registrations\")\n def accept_registrations(self, request, queryset): # pragma: no cover\n if queryset.exclude(status=Registration.STATUS_REVIEW).exists():\n self.message_user(\n request, \"Only registrations in review can be accepted\", \"error\"\n )\n return\n\n count = 0\n for registration in queryset:\n try:\n accept_registration(registration, actor=request.user)\n count += 1\n except ValueError as e:\n self.message_user(\n request, f\"Error accepting {registration}: {e.message}\", \"error\"\n )\n\n self.message_user(request, f\"Accepted {count} registrations\", \"success\")\n\n @admin.action(description=\"Reject selected registrations\")\n def reject_registrations(self, request, queryset): # pragma: no cover\n if queryset.exclude(status=Registration.STATUS_REVIEW).exists():\n self.message_user(\n request, \"Only registrations in review can be rejected\", \"error\"\n )\n return\n\n count = queryset.count()\n for registration in queryset:\n reject_registration(registration, actor=request.user)\n\n self.message_user(request, f\"Rejected {count} registrations\", \"success\")\n\n def reference_count(self, obj):\n return obj.reference_set.count()\n\n reference_count.short_description = _(\"references\")\n\n def get_form(self, request, obj=None, **kwargs):\n return super().get_form(\n request,\n obj,\n formfield_callback=partial(\n self.formfield_for_dbfield, request=request, obj=obj\n ),\n **kwargs,\n )\n\n def formfield_for_dbfield(self, db_field, request, obj=None, **kwargs):\n field = super().formfield_for_dbfield(db_field, request, **kwargs)\n if db_field.name == \"payment\":\n return Field(\n widget=PaymentWidget(obj=obj), initial=field.initial, required=False\n )\n return field\n\n def changeform_view(self, request, object_id=None, form_url=\"\", extra_context=None):\n \"\"\"Render the change formview.\n\n Only allow when the entry has not been processed yet\n \"\"\"\n obj = None\n can_review = False\n can_resend = False\n can_revert = False\n if object_id is not None and request.user.has_perm(\n \"registrations.review_entries\"\n ):\n obj = Entry.objects.get(id=object_id)\n can_review = obj.status == Entry.STATUS_REVIEW\n can_revert = obj.status in [Entry.STATUS_ACCEPTED, Entry.STATUS_REJECTED]\n try:\n can_resend = obj.registration.status == Entry.STATUS_CONFIRM\n except Registration.DoesNotExist:\n pass\n return super().changeform_view(\n request,\n object_id,\n form_url,\n {\n \"entry\": obj,\n \"can_review\": can_review,\n \"can_resend\": can_resend,\n \"can_revert\": can_revert,\n },\n )\n\n def get_readonly_fields(self, request, obj=None):\n if obj is None or obj.status not in (\n Entry.STATUS_REJECTED,\n Entry.STATUS_ACCEPTED,\n Entry.STATUS_COMPLETED,\n ):\n return [\"status\", \"created_at\", \"updated_at\", \"payment\"]\n return [\n field.name\n for field in self.model._meta.get_fields()\n if field.name not in [\"payment\", \"no_references\"] and field.editable\n ]\n\n @staticmethod\n def name(obj):\n return obj.get_full_name()\n\n def has_change_permission(self, request, obj=None):\n \"\"\"Completed registrations are read-only.\"\"\"\n return (\n False\n if obj and obj.status == Entry.STATUS_COMPLETED\n else super().has_change_permission(request, obj)\n )\n\n def has_add_permission(self, request):\n return False\n\n def save_model(self, request, obj, form, change):\n if obj.status not in (\n Entry.STATUS_REJECTED,\n Entry.STATUS_ACCEPTED,\n Entry.STATUS_COMPLETED,\n ):\n super().save_model(request, obj, form, change)\n\n\[email protected](Renewal)\nclass RenewalAdmin(RegistrationAdmin):\n \"\"\"Manage the renewals.\"\"\"\n\n list_display = (\n \"name\",\n \"email\",\n \"status\",\n \"membership_type\",\n \"contribution\",\n \"created_at\",\n \"payment\",\n \"no_references\",\n \"reference_count\",\n )\n list_filter = (\n \"status\",\n \"membership_type\",\n \"no_references\",\n \"payment__type\",\n \"contribution\",\n )\n search_fields = (\n \"member__first_name\",\n \"member__last_name\",\n \"member__email\",\n \"member__profile__phone_number\",\n \"member__profile__student_number\",\n )\n date_hierarchy = \"created_at\"\n fieldsets = (\n (\n _(\"Application information\"),\n {\n \"fields\": (\n \"created_at\",\n \"updated_at\",\n \"length\",\n \"contribution\",\n \"membership_type\",\n \"status\",\n \"payment\",\n \"remarks\",\n \"member\",\n )\n },\n ),\n )\n\n actions = [\"accept_renewals\", \"reject_renewals\"]\n\n def get_actions(self, request):\n actions = super().get_actions(request)\n\n if not request.user.has_perm(\"registrations.review_entries\"):\n if \"accept_renewals\" in actions: # pragma: no cover\n del actions[\"accept_renewals\"]\n if \"reject_renewals\" in actions: # pragma: no cover\n del actions[\"reject_renewals\"]\n\n return actions\n\n @admin.action(description=\"Accept selected renewals\")\n def accept_renewals(self, request, queryset): # pragma: no cover\n if queryset.exclude(status=Renewal.STATUS_REVIEW).exists():\n self.message_user(\n request, \"Only renewals in review can be accepted\", \"error\"\n )\n return\n\n count = queryset.count()\n for renewal in queryset:\n accept_renewal(renewal, actor=request.user)\n count += 1\n\n self.message_user(request, f\"Accepted {count} renewals\", \"success\")\n\n @admin.action(description=\"Reject selected renewals\")\n def reject_renewals(self, request, queryset): # pragma: no cover\n if queryset.exclude(status=Renewal.STATUS_REVIEW).exists():\n self.message_user(\n request, \"Only renewals in review can be rejected\", \"error\"\n )\n return\n\n count = queryset.count()\n for renewal in queryset:\n reject_renewal(renewal, actor=request.user)\n\n self.message_user(request, f\"Rejected {count} renewals\", \"success\")\n\n def get_readonly_fields(self, request, obj=None):\n \"\"\"Make all fields read-only and add member if needed.\"\"\"\n fields = super().get_readonly_fields(request, obj)\n if \"member\" not in fields and obj is not None:\n return fields + [\"member\"]\n return fields\n\n def has_add_permission(self, request):\n return False\n\n @staticmethod\n def name(obj):\n return obj.member.get_full_name()\n\n name.short_description = _(\"name\")\n\n @staticmethod\n def email(obj):\n return obj.member.email\n", "path": "website/registrations/admin.py"}], "after_files": [{"content": "\"\"\"Registers admin interfaces for the registrations module.\"\"\"\nfrom functools import partial\n\nfrom django.contrib import admin\nfrom django.forms import Field\nfrom django.utils.translation import gettext_lazy as _\n\nfrom payments.widgets import PaymentWidget\nfrom registrations.services import (\n accept_registration,\n accept_renewal,\n reject_registration,\n reject_renewal,\n)\n\nfrom .forms import RegistrationAdminForm\nfrom .models import Entry, Reference, Registration, Renewal\n\n\nclass ReferenceInline(admin.StackedInline):\n model = Reference\n extra = 0\n\n\[email protected](Registration)\nclass RegistrationAdmin(admin.ModelAdmin):\n \"\"\"Manage the registrations.\"\"\"\n\n list_display = (\n \"name\",\n \"email\",\n \"status\",\n \"membership_type\",\n \"contribution\",\n \"created_at\",\n \"payment\",\n \"no_references\",\n \"reference_count\",\n )\n list_filter = (\n \"status\",\n \"programme\",\n \"membership_type\",\n \"no_references\",\n \"payment__type\",\n \"contribution\",\n )\n inlines = (ReferenceInline,)\n search_fields = (\n \"first_name\",\n \"last_name\",\n \"email\",\n \"phone_number\",\n \"student_number\",\n )\n date_hierarchy = \"created_at\"\n fieldsets = (\n (\n _(\"Application information\"),\n {\n \"fields\": (\n \"created_at\",\n \"updated_at\",\n \"username\",\n \"length\",\n \"contribution\",\n \"membership_type\",\n \"status\",\n \"payment\",\n \"remarks\",\n )\n },\n ),\n (\n _(\"Personal information\"),\n {\n \"fields\": (\n \"first_name\",\n \"last_name\",\n \"birthday\",\n \"optin_birthday\",\n \"email\",\n \"optin_mailinglist\",\n \"phone_number\",\n )\n },\n ),\n (\n _(\"Address\"),\n {\n \"fields\": (\n \"address_street\",\n \"address_street2\",\n \"address_postal_code\",\n \"address_city\",\n \"address_country\",\n )\n },\n ),\n (\n _(\"Financial\"),\n {\n \"fields\": (\n \"direct_debit\",\n \"initials\",\n \"iban\",\n \"bic\",\n \"signature\",\n )\n },\n ),\n (\n _(\"University information\"),\n {\n \"fields\": (\n \"student_number\",\n \"programme\",\n \"starting_year\",\n )\n },\n ),\n )\n\n form = RegistrationAdminForm\n\n actions = [\"accept_registrations\", \"reject_registrations\"]\n\n def get_actions(self, request):\n actions = super().get_actions(request)\n\n if not request.user.has_perm(\"registrations.review_entries\"):\n if \"accept_registrations\" in actions:\n del actions[\"accept_registrations\"]\n if \"reject_registrations\" in actions:\n del actions[\"reject_registrations\"]\n\n return actions\n\n @admin.action(description=\"Accept selected registrations\")\n def accept_registrations(self, request, queryset): # pragma: no cover\n if queryset.exclude(status=Registration.STATUS_REVIEW).exists():\n self.message_user(\n request, \"Only registrations in review can be accepted\", \"error\"\n )\n return\n\n count = 0\n for registration in queryset:\n try:\n accept_registration(registration, actor=request.user)\n count += 1\n except ValueError as e:\n self.message_user(\n request, f\"Error accepting {registration}: {e.message}\", \"error\"\n )\n\n self.message_user(request, f\"Accepted {count} registrations\", \"success\")\n\n @admin.action(description=\"Reject selected registrations\")\n def reject_registrations(self, request, queryset): # pragma: no cover\n if queryset.exclude(status=Registration.STATUS_REVIEW).exists():\n self.message_user(\n request, \"Only registrations in review can be rejected\", \"error\"\n )\n return\n\n count = queryset.count()\n for registration in queryset:\n reject_registration(registration, actor=request.user)\n\n self.message_user(request, f\"Rejected {count} registrations\", \"success\")\n\n def reference_count(self, obj):\n return obj.reference_set.count()\n\n reference_count.short_description = _(\"references\")\n\n def get_form(self, request, obj=None, **kwargs):\n return super().get_form(\n request,\n obj,\n formfield_callback=partial(\n self.formfield_for_dbfield, request=request, obj=obj\n ),\n **kwargs,\n )\n\n def formfield_for_dbfield(self, db_field, request, obj=None, **kwargs):\n field = super().formfield_for_dbfield(db_field, request, **kwargs)\n if db_field.name == \"payment\":\n return Field(\n widget=PaymentWidget(obj=obj), initial=field.initial, required=False\n )\n return field\n\n def changeform_view(self, request, object_id=None, form_url=\"\", extra_context=None):\n \"\"\"Render the change formview.\n\n Only allow when the entry has not been processed yet\n \"\"\"\n obj = None\n can_review = False\n can_resend = False\n can_revert = False\n if object_id is not None and request.user.has_perm(\n \"registrations.review_entries\"\n ): # pragma: no cover\n obj = self.get_object(request, object_id)\n if obj is None:\n return self._get_obj_does_not_exist_redirect(\n request, self.opts, object_id\n )\n can_review = obj.status == Entry.STATUS_REVIEW\n can_revert = obj.status in [Entry.STATUS_ACCEPTED, Entry.STATUS_REJECTED]\n can_resend = obj.status == Entry.STATUS_CONFIRM and isinstance(\n obj, Registration\n )\n\n return super().changeform_view(\n request,\n object_id,\n form_url,\n {\n \"entry\": obj,\n \"can_review\": can_review,\n \"can_resend\": can_resend,\n \"can_revert\": can_revert,\n },\n )\n\n def get_readonly_fields(self, request, obj=None):\n if obj is None or obj.status not in (\n Entry.STATUS_REJECTED,\n Entry.STATUS_ACCEPTED,\n Entry.STATUS_COMPLETED,\n ):\n return [\"status\", \"created_at\", \"updated_at\", \"payment\"]\n return [\n field.name\n for field in self.model._meta.get_fields()\n if field.name not in [\"payment\", \"no_references\"] and field.editable\n ]\n\n @staticmethod\n def name(obj):\n return obj.get_full_name()\n\n def has_change_permission(self, request, obj=None):\n \"\"\"Completed registrations are read-only.\"\"\"\n return (\n False\n if obj and obj.status == Entry.STATUS_COMPLETED\n else super().has_change_permission(request, obj)\n )\n\n def has_add_permission(self, request):\n return False\n\n def save_model(self, request, obj, form, change):\n if obj.status not in (\n Entry.STATUS_REJECTED,\n Entry.STATUS_ACCEPTED,\n Entry.STATUS_COMPLETED,\n ):\n super().save_model(request, obj, form, change)\n\n\[email protected](Renewal)\nclass RenewalAdmin(RegistrationAdmin):\n \"\"\"Manage the renewals.\"\"\"\n\n list_display = (\n \"name\",\n \"email\",\n \"status\",\n \"membership_type\",\n \"contribution\",\n \"created_at\",\n \"payment\",\n \"no_references\",\n \"reference_count\",\n )\n list_filter = (\n \"status\",\n \"membership_type\",\n \"no_references\",\n \"payment__type\",\n \"contribution\",\n )\n search_fields = (\n \"member__first_name\",\n \"member__last_name\",\n \"member__email\",\n \"member__profile__phone_number\",\n \"member__profile__student_number\",\n )\n date_hierarchy = \"created_at\"\n fieldsets = (\n (\n _(\"Application information\"),\n {\n \"fields\": (\n \"created_at\",\n \"updated_at\",\n \"length\",\n \"contribution\",\n \"membership_type\",\n \"status\",\n \"payment\",\n \"remarks\",\n \"member\",\n )\n },\n ),\n )\n\n actions = [\"accept_renewals\", \"reject_renewals\"]\n\n def get_actions(self, request):\n actions = super().get_actions(request)\n\n if not request.user.has_perm(\"registrations.review_entries\"):\n if \"accept_renewals\" in actions: # pragma: no cover\n del actions[\"accept_renewals\"]\n if \"reject_renewals\" in actions: # pragma: no cover\n del actions[\"reject_renewals\"]\n\n return actions\n\n @admin.action(description=\"Accept selected renewals\")\n def accept_renewals(self, request, queryset): # pragma: no cover\n if queryset.exclude(status=Renewal.STATUS_REVIEW).exists():\n self.message_user(\n request, \"Only renewals in review can be accepted\", \"error\"\n )\n return\n\n count = queryset.count()\n for renewal in queryset:\n accept_renewal(renewal, actor=request.user)\n count += 1\n\n self.message_user(request, f\"Accepted {count} renewals\", \"success\")\n\n @admin.action(description=\"Reject selected renewals\")\n def reject_renewals(self, request, queryset): # pragma: no cover\n if queryset.exclude(status=Renewal.STATUS_REVIEW).exists():\n self.message_user(\n request, \"Only renewals in review can be rejected\", \"error\"\n )\n return\n\n count = queryset.count()\n for renewal in queryset:\n reject_renewal(renewal, actor=request.user)\n\n self.message_user(request, f\"Rejected {count} renewals\", \"success\")\n\n def get_readonly_fields(self, request, obj=None):\n \"\"\"Make all fields read-only and add member if needed.\"\"\"\n fields = super().get_readonly_fields(request, obj)\n if \"member\" not in fields and obj is not None:\n return fields + [\"member\"]\n return fields\n\n def has_add_permission(self, request):\n return False\n\n @staticmethod\n def name(obj):\n return obj.member.get_full_name()\n\n name.short_description = _(\"name\")\n\n @staticmethod\n def email(obj):\n return obj.member.email\n", "path": "website/registrations/admin.py"}]} | 3,549 | 251 |
gh_patches_debug_31355 | rasdani/github-patches | git_diff | Flexget__Flexget-2789 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pyload, error when checking version.
<!---
Before opening an issue, verify:
- Is this a feature request? Post it on https://feathub.com/Flexget/Flexget
No.
- Is this an issue with webui? Make an issue over on https://github.com/Flexget/webui
No.
- Did you recently upgrade? Look at the Change Log and Upgrade Actions to make sure that you don't need to make any
changes to your config https://flexget.com/ChangeLog https://flexget.com/UpgradeActions
No.
- Are you running FlexGet as a daemon? Stop it completely and then start it again https://flexget.com/CLI/daemon
Yes.
- Did you search to see if the issue already exists? https://github.com/Flexget/Flexget/issues
Yes.
- Did you fill out the issue template as completely as possible?
Yes!?
The issue template is here because it helps to ensure you submitted all the necessary information the first time, and allows us to more quickly review issues. Please fill it out correctly and do not ignore it, no matter how irrelevant you think it may be. Thanks in advance for your help with this!
--->
When there are accepted entries and the version check, an error occurs, I put it in the pastbin, the error aparently is related with this PR #2642
### Expected behaviour:
Add entries and download.
<!---
Please don't just say "it doesn't crash" or "it works". Explain what the expected result is.
--->
### Actual behaviour:
PASTBIN: https://pastebin.com/R3Hk3zSh
### Steps to reproduce:
- Step 1: ...
When I run a task with the pyload plugin and there are accepted entries.
#### Config:
```yaml--- config from task: pyload
accept_all: true
metainfo_series: true
pyload:
api: http://localhost:8000/api
enabled: true
folder: /media/luiz/HD/Downloads
hoster:
- MegauploadComFolder
- MegauploadCom
- ZippyshareCom
package: '{{series_name}} - {{tvdb_series_name}}'
parse_url: true
password: admin
queue: true
username: luiz
rss:
all_entries: true
ascii: false
escape: false
group_links: false
silent: false
url: /home/luiz/.flexget/rss.xml
thetvdb_lookup: true
trakt_lookup: true
---
```
#### Log:
<details>
<summary>https://pastebin.com/R3Hk3zSh</summary>
```
paste log output here
```
</details>
### Additional information:
- FlexGet version: 3.1.67.dev
- Python version: Python 3.8.2
- Installation method: git venv
- Using daemon (yes/no): yes
- OS and version: Linux HP 5.4.0-42-generic #46-Ubuntu SMP Fri Jul 10 00:24:02 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux
- Link to crash log: https://pastebin.com/R3Hk3zSh
<!---
In config and debug/crash logs, remember to redact any personal or sensitive information such as passwords, API keys, private URLs and so on.
Please verify that the following data is present before submitting your issue:
- Link to a paste service or paste above the relevant config (preferably full config, including templates if present). Please make sure the paste does not expire, if possible.
- Link to a paste service or paste above debug-level logs of the relevant task/s (use `flexget -L debug execute --tasks <Task_name>`).
- FlexGet version (use `flexget -V` to get it).
- Full Python version, for example `2.7.11` (use `python -V` to get it).
- Installation method (pip, git install, etc).
- Whether or not you're running FlexGet as a daemon.
- OS and version.
- Attach crash log if one was generated, in addition to the debug-level log. It can be found in the directory with your config file.
--->
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flexget/plugins/clients/pyload.py`
Content:
```
1 from urllib.parse import quote
2
3 from loguru import logger
4 from requests.exceptions import RequestException
5
6 from flexget import plugin
7 from flexget.config_schema import one_or_more
8 from flexget.event import event
9 from flexget.utils import json
10 from flexget.utils.template import RenderError
11
12 logger = logger.bind(name='pyload')
13
14
15 class PyloadApi:
16 def __init__(self, requests, url):
17 self.requests = requests
18 self.url = url
19
20 def get_session(self, config):
21 # Login
22 data = {'username': config['username'], 'password': config['password']}
23 result = self.post('login', data=data)
24 response = result.json()
25 if not response:
26 raise plugin.PluginError('Login failed', logger)
27
28 if isinstance(response, str):
29 return response.replace('"', '')
30 else:
31 return response
32
33 def get(self, method):
34 try:
35 return self.requests.get(self.url.rstrip("/") + "/" + method.strip("/"))
36 except RequestException as e:
37 if e.response and e.response.status_code == 500:
38 raise plugin.PluginError(
39 'Internal API Error: <%s> <%s>' % (method, self.url), logger
40 )
41 raise
42
43 def post(self, method, data):
44 try:
45 return self.requests.post(self.url.rstrip("/") + "/" + method.strip("/"), data=data)
46 except RequestException as e:
47 if e.response and e.response.status_code == 500:
48 raise plugin.PluginError(
49 'Internal API Error: <%s> <%s> <%s>' % (method, self.url, data), logger
50 )
51 raise
52
53
54 class PluginPyLoad:
55 """
56 Parse task content or url for hoster links and adds them to pyLoad.
57
58 Example::
59
60 pyload:
61 api: http://localhost:8000/api
62 queue: yes
63 username: my_username
64 password: my_password
65 folder: desired_folder
66 package: desired_package_name (jinja2 supported)
67 package_password: desired_package_password
68 hoster:
69 - YoutubeCom
70 parse_url: no
71 multiple_hoster: yes
72 enabled: yes
73
74 Default values for the config elements::
75
76 pyload:
77 api: http://localhost:8000/api
78 queue: no
79 hoster: ALL
80 parse_url: no
81 multiple_hoster: yes
82 enabled: yes
83 """
84
85 __author__ = 'http://pyload.org'
86 __version__ = '0.5'
87
88 DEFAULT_API = 'http://localhost:8000/api'
89 DEFAULT_QUEUE = False
90 DEFAULT_FOLDER = ''
91 DEFAULT_HOSTER = []
92 DEFAULT_PARSE_URL = False
93 DEFAULT_MULTIPLE_HOSTER = True
94 DEFAULT_PREFERRED_HOSTER_ONLY = False
95 DEFAULT_HANDLE_NO_URL_AS_FAILURE = False
96
97 schema = {
98 'type': 'object',
99 'properties': {
100 'api': {'type': 'string'},
101 'username': {'type': 'string'},
102 'password': {'type': 'string'},
103 'folder': {'type': 'string'},
104 'package': {'type': 'string'},
105 'package_password': {'type': 'string'},
106 'queue': {'type': 'boolean'},
107 'parse_url': {'type': 'boolean'},
108 'multiple_hoster': {'type': 'boolean'},
109 'hoster': one_or_more({'type': 'string'}),
110 'preferred_hoster_only': {'type': 'boolean'},
111 'handle_no_url_as_failure': {'type': 'boolean'},
112 'enabled': {'type': 'boolean'},
113 },
114 'required': ['username', 'password'],
115 'additionalProperties': False,
116 }
117
118 def on_task_output(self, task, config):
119 if not config.get('enabled', True):
120 return
121 if not task.accepted:
122 return
123
124 self.add_entries(task, config)
125
126 @staticmethod
127 def get_version_from_packaging():
128 version = None
129 try:
130 from packaging import version
131 except ModuleNotFoundError:
132 logger.warning('packaging is not installed')
133 return version
134
135 def add_entries(self, task, config):
136 """Adds accepted entries"""
137
138 apiurl = config.get('api', self.DEFAULT_API)
139 api = PyloadApi(task.requests, apiurl)
140
141 try:
142 session = api.get_session(config)
143 except OSError:
144 raise plugin.PluginError('pyLoad not reachable', logger)
145 except plugin.PluginError:
146 raise
147 except Exception as e:
148 raise plugin.PluginError('Unknown error: %s' % str(e), logger)
149
150 remote_version = None
151 try:
152 remote_version = api.get('getServerVersion')
153 except RequestException as e:
154 if e.response is not None and e.response.status_code == 404:
155 remote_version = json.loads(api.get('get_server_version').content)
156 else:
157 raise e
158
159 parse_urls_command = 'parseURLs'
160 add_package_command = 'addPackage'
161 set_package_data_command = 'setPackageData'
162
163 is_pyload_ng = False
164 version = self.get_version_from_packaging()
165 if version and version.parse(remote_version) >= version.parse('0.5'):
166 parse_urls_command = 'parse_urls'
167 add_package_command = 'add_package'
168 set_package_data_command = 'set_package_date'
169 is_pyload_ng = True
170
171 hoster = config.get('hoster', self.DEFAULT_HOSTER)
172
173 for entry in task.accepted:
174 # bunch of urls now going to check
175 content = entry.get('description', '') + ' ' + quote(entry['url'])
176 content = json.dumps(content)
177
178 if is_pyload_ng:
179 url = entry['url'] if config.get('parse_url', self.DEFAULT_PARSE_URL) else ''
180 else:
181 url = (
182 json.dumps(entry['url'])
183 if config.get('parse_url', self.DEFAULT_PARSE_URL)
184 else "''"
185 )
186
187 logger.debug('Parsing url {}', url)
188
189 data = {'html': content, 'url': url}
190 if not is_pyload_ng:
191 data['session'] = session
192 result = api.post(parse_urls_command, data=data)
193
194 parsed = result.json()
195
196 urls = []
197
198 # check for preferred hoster
199 for name in hoster:
200 if name in parsed:
201 urls.extend(parsed[name])
202 if not config.get('multiple_hoster', self.DEFAULT_MULTIPLE_HOSTER):
203 break
204
205 # no preferred hoster and not preferred hoster only - add all recognized plugins
206 if not urls and not config.get(
207 'preferred_hoster_only', self.DEFAULT_PREFERRED_HOSTER_ONLY
208 ):
209 for name, purls in parsed.items():
210 if name != 'BasePlugin':
211 urls.extend(purls)
212
213 if task.options.test:
214 logger.info('Would add `{}` to pyload', urls)
215 continue
216
217 # no urls found
218 if not urls:
219 if config.get('handle_no_url_as_failure', self.DEFAULT_HANDLE_NO_URL_AS_FAILURE):
220 entry.fail('No suited urls in entry %s' % entry['title'])
221 else:
222 logger.info('No suited urls in entry {}', entry['title'])
223 continue
224
225 logger.debug('Add {} urls to pyLoad', len(urls))
226
227 try:
228 dest = 1 if config.get('queue', self.DEFAULT_QUEUE) else 0 # Destination.Queue = 1
229
230 # Use the title of the entry, if no naming schema for the package is defined.
231 name = config.get('package', entry['title'])
232
233 # If name has jinja template, render it
234 try:
235 name = entry.render(name)
236 except RenderError as e:
237 name = entry['title']
238 logger.error('Error rendering jinja event: {}', e)
239
240 if is_pyload_ng:
241 data = {
242 'name': name.encode('ascii', 'ignore').decode(),
243 'links': urls,
244 'dest': dest,
245 }
246 else:
247 data = {
248 'name': json.dumps(name.encode('ascii', 'ignore').decode()),
249 'links': json.dumps(urls),
250 'dest': json.dumps(dest),
251 'session': session,
252 }
253
254 pid = api.post(add_package_command, data=data).text
255 logger.debug('added package pid: {}', pid)
256
257 # Set Folder
258 folder = config.get('folder', self.DEFAULT_FOLDER)
259 folder = entry.get('path', folder)
260 if folder:
261 # If folder has jinja template, render it
262 try:
263 folder = entry.render(folder)
264 except RenderError as e:
265 folder = self.DEFAULT_FOLDER
266 logger.error('Error rendering jinja event: {}', e)
267 # set folder with api
268 data = json.dumps({'folder': folder})
269 post_data = {'pid': pid, 'data': data}
270 if not is_pyload_ng:
271 post_data['session'] = session
272 api.post(set_package_data_command, data=post_data)
273
274 # Set Package Password
275 package_password = config.get('package_password')
276 if package_password:
277 data = json.dumps({'password': package_password})
278 post_data = {'pid': pid, 'data': data}
279 if not is_pyload_ng:
280 post_data['session'] = session
281 api.post(set_package_data_command, data=post_data)
282
283 except Exception as e:
284 entry.fail(str(e))
285
286
287 @event('plugin.register')
288 def register_plugin():
289 plugin.register(PluginPyLoad, 'pyload', api_ver=2)
290
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flexget/plugins/clients/pyload.py b/flexget/plugins/clients/pyload.py
--- a/flexget/plugins/clients/pyload.py
+++ b/flexget/plugins/clients/pyload.py
@@ -123,15 +123,6 @@
self.add_entries(task, config)
- @staticmethod
- def get_version_from_packaging():
- version = None
- try:
- from packaging import version
- except ModuleNotFoundError:
- logger.warning('packaging is not installed')
- return version
-
def add_entries(self, task, config):
"""Adds accepted entries"""
@@ -147,26 +138,18 @@
except Exception as e:
raise plugin.PluginError('Unknown error: %s' % str(e), logger)
- remote_version = None
- try:
- remote_version = api.get('getServerVersion')
- except RequestException as e:
- if e.response is not None and e.response.status_code == 404:
- remote_version = json.loads(api.get('get_server_version').content)
- else:
- raise e
-
+ # old pyload (stable)
+ is_pyload_ng = False
parse_urls_command = 'parseURLs'
add_package_command = 'addPackage'
set_package_data_command = 'setPackageData'
- is_pyload_ng = False
- version = self.get_version_from_packaging()
- if version and version.parse(remote_version) >= version.parse('0.5'):
+ # pyload-ng is returning dict instead of session string on login
+ if isinstance(session, dict):
+ is_pyload_ng = True
parse_urls_command = 'parse_urls'
add_package_command = 'add_package'
set_package_data_command = 'set_package_date'
- is_pyload_ng = True
hoster = config.get('hoster', self.DEFAULT_HOSTER)
| {"golden_diff": "diff --git a/flexget/plugins/clients/pyload.py b/flexget/plugins/clients/pyload.py\n--- a/flexget/plugins/clients/pyload.py\n+++ b/flexget/plugins/clients/pyload.py\n@@ -123,15 +123,6 @@\n \n self.add_entries(task, config)\n \n- @staticmethod\n- def get_version_from_packaging():\n- version = None\n- try:\n- from packaging import version\n- except ModuleNotFoundError:\n- logger.warning('packaging is not installed')\n- return version\n-\n def add_entries(self, task, config):\n \"\"\"Adds accepted entries\"\"\"\n \n@@ -147,26 +138,18 @@\n except Exception as e:\n raise plugin.PluginError('Unknown error: %s' % str(e), logger)\n \n- remote_version = None\n- try:\n- remote_version = api.get('getServerVersion')\n- except RequestException as e:\n- if e.response is not None and e.response.status_code == 404:\n- remote_version = json.loads(api.get('get_server_version').content)\n- else:\n- raise e\n-\n+ # old pyload (stable)\n+ is_pyload_ng = False\n parse_urls_command = 'parseURLs'\n add_package_command = 'addPackage'\n set_package_data_command = 'setPackageData'\n \n- is_pyload_ng = False\n- version = self.get_version_from_packaging()\n- if version and version.parse(remote_version) >= version.parse('0.5'):\n+ # pyload-ng is returning dict instead of session string on login\n+ if isinstance(session, dict):\n+ is_pyload_ng = True\n parse_urls_command = 'parse_urls'\n add_package_command = 'add_package'\n set_package_data_command = 'set_package_date'\n- is_pyload_ng = True\n \n hoster = config.get('hoster', self.DEFAULT_HOSTER)\n", "issue": "Pyload, error when checking version.\n\r\n<!---\r\nBefore opening an issue, verify:\r\n\r\n- Is this a feature request? Post it on https://feathub.com/Flexget/Flexget\r\nNo.\r\n- Is this an issue with webui? Make an issue over on https://github.com/Flexget/webui\r\nNo.\r\n- Did you recently upgrade? Look at the Change Log and Upgrade Actions to make sure that you don't need to make any \r\nchanges to your config https://flexget.com/ChangeLog https://flexget.com/UpgradeActions\r\nNo.\r\n- Are you running FlexGet as a daemon? Stop it completely and then start it again https://flexget.com/CLI/daemon\r\nYes.\r\n- Did you search to see if the issue already exists? https://github.com/Flexget/Flexget/issues\r\nYes.\r\n- Did you fill out the issue template as completely as possible?\r\nYes!?\r\n\r\nThe issue template is here because it helps to ensure you submitted all the necessary information the first time, and allows us to more quickly review issues. Please fill it out correctly and do not ignore it, no matter how irrelevant you think it may be. Thanks in advance for your help with this!\r\n--->\r\nWhen there are accepted entries and the version check, an error occurs, I put it in the pastbin, the error aparently is related with this PR #2642\r\n\r\n### Expected behaviour:\r\nAdd entries and download.\r\n\r\n<!---\r\nPlease don't just say \"it doesn't crash\" or \"it works\". Explain what the expected result is.\r\n--->\r\n\r\n### Actual behaviour:\r\nPASTBIN: https://pastebin.com/R3Hk3zSh\r\n\r\n### Steps to reproduce:\r\n- Step 1: ...\r\nWhen I run a task with the pyload plugin and there are accepted entries.\r\n\r\n#### Config:\r\n```yaml--- config from task: pyload\r\naccept_all: true\r\nmetainfo_series: true\r\npyload:\r\n api: http://localhost:8000/api\r\n enabled: true\r\n folder: /media/luiz/HD/Downloads\r\n hoster:\r\n - MegauploadComFolder\r\n - MegauploadCom\r\n - ZippyshareCom\r\n package: '{{series_name}} - {{tvdb_series_name}}'\r\n parse_url: true\r\n password: admin\r\n queue: true\r\n username: luiz\r\nrss:\r\n all_entries: true\r\n ascii: false\r\n escape: false\r\n group_links: false\r\n silent: false\r\n url: /home/luiz/.flexget/rss.xml\r\nthetvdb_lookup: true\r\ntrakt_lookup: true\r\n\r\n---\r\n\r\n```\r\n \r\n#### Log:\r\n<details>\r\n <summary>https://pastebin.com/R3Hk3zSh</summary>\r\n\r\n```\r\npaste log output here\r\n```\r\n</details>\r\n\r\n### Additional information:\r\n\r\n- FlexGet version: 3.1.67.dev\r\n- Python version: Python 3.8.2\r\n- Installation method: git venv\r\n- Using daemon (yes/no): yes\r\n- OS and version: Linux HP 5.4.0-42-generic #46-Ubuntu SMP Fri Jul 10 00:24:02 UTC 2020 x86_64 x86_64 x86_64 GNU/Linux\r\n- Link to crash log: https://pastebin.com/R3Hk3zSh\r\n\r\n<!---\r\nIn config and debug/crash logs, remember to redact any personal or sensitive information such as passwords, API keys, private URLs and so on.\r\n\r\nPlease verify that the following data is present before submitting your issue:\r\n\r\n- Link to a paste service or paste above the relevant config (preferably full config, including templates if present). Please make sure the paste does not expire, if possible.\r\n- Link to a paste service or paste above debug-level logs of the relevant task/s (use `flexget -L debug execute --tasks <Task_name>`).\r\n- FlexGet version (use `flexget -V` to get it).\r\n- Full Python version, for example `2.7.11` (use `python -V` to get it). \r\n- Installation method (pip, git install, etc).\r\n- Whether or not you're running FlexGet as a daemon.\r\n- OS and version.\r\n\r\n- Attach crash log if one was generated, in addition to the debug-level log. It can be found in the directory with your config file.\r\n--->\r\n\n", "before_files": [{"content": "from urllib.parse import quote\n\nfrom loguru import logger\nfrom requests.exceptions import RequestException\n\nfrom flexget import plugin\nfrom flexget.config_schema import one_or_more\nfrom flexget.event import event\nfrom flexget.utils import json\nfrom flexget.utils.template import RenderError\n\nlogger = logger.bind(name='pyload')\n\n\nclass PyloadApi:\n def __init__(self, requests, url):\n self.requests = requests\n self.url = url\n\n def get_session(self, config):\n # Login\n data = {'username': config['username'], 'password': config['password']}\n result = self.post('login', data=data)\n response = result.json()\n if not response:\n raise plugin.PluginError('Login failed', logger)\n\n if isinstance(response, str):\n return response.replace('\"', '')\n else:\n return response\n\n def get(self, method):\n try:\n return self.requests.get(self.url.rstrip(\"/\") + \"/\" + method.strip(\"/\"))\n except RequestException as e:\n if e.response and e.response.status_code == 500:\n raise plugin.PluginError(\n 'Internal API Error: <%s> <%s>' % (method, self.url), logger\n )\n raise\n\n def post(self, method, data):\n try:\n return self.requests.post(self.url.rstrip(\"/\") + \"/\" + method.strip(\"/\"), data=data)\n except RequestException as e:\n if e.response and e.response.status_code == 500:\n raise plugin.PluginError(\n 'Internal API Error: <%s> <%s> <%s>' % (method, self.url, data), logger\n )\n raise\n\n\nclass PluginPyLoad:\n \"\"\"\n Parse task content or url for hoster links and adds them to pyLoad.\n\n Example::\n\n pyload:\n api: http://localhost:8000/api\n queue: yes\n username: my_username\n password: my_password\n folder: desired_folder\n package: desired_package_name (jinja2 supported)\n package_password: desired_package_password\n hoster:\n - YoutubeCom\n parse_url: no\n multiple_hoster: yes\n enabled: yes\n\n Default values for the config elements::\n\n pyload:\n api: http://localhost:8000/api\n queue: no\n hoster: ALL\n parse_url: no\n multiple_hoster: yes\n enabled: yes\n \"\"\"\n\n __author__ = 'http://pyload.org'\n __version__ = '0.5'\n\n DEFAULT_API = 'http://localhost:8000/api'\n DEFAULT_QUEUE = False\n DEFAULT_FOLDER = ''\n DEFAULT_HOSTER = []\n DEFAULT_PARSE_URL = False\n DEFAULT_MULTIPLE_HOSTER = True\n DEFAULT_PREFERRED_HOSTER_ONLY = False\n DEFAULT_HANDLE_NO_URL_AS_FAILURE = False\n\n schema = {\n 'type': 'object',\n 'properties': {\n 'api': {'type': 'string'},\n 'username': {'type': 'string'},\n 'password': {'type': 'string'},\n 'folder': {'type': 'string'},\n 'package': {'type': 'string'},\n 'package_password': {'type': 'string'},\n 'queue': {'type': 'boolean'},\n 'parse_url': {'type': 'boolean'},\n 'multiple_hoster': {'type': 'boolean'},\n 'hoster': one_or_more({'type': 'string'}),\n 'preferred_hoster_only': {'type': 'boolean'},\n 'handle_no_url_as_failure': {'type': 'boolean'},\n 'enabled': {'type': 'boolean'},\n },\n 'required': ['username', 'password'],\n 'additionalProperties': False,\n }\n\n def on_task_output(self, task, config):\n if not config.get('enabled', True):\n return\n if not task.accepted:\n return\n\n self.add_entries(task, config)\n\n @staticmethod\n def get_version_from_packaging():\n version = None\n try:\n from packaging import version\n except ModuleNotFoundError:\n logger.warning('packaging is not installed')\n return version\n\n def add_entries(self, task, config):\n \"\"\"Adds accepted entries\"\"\"\n\n apiurl = config.get('api', self.DEFAULT_API)\n api = PyloadApi(task.requests, apiurl)\n\n try:\n session = api.get_session(config)\n except OSError:\n raise plugin.PluginError('pyLoad not reachable', logger)\n except plugin.PluginError:\n raise\n except Exception as e:\n raise plugin.PluginError('Unknown error: %s' % str(e), logger)\n\n remote_version = None\n try:\n remote_version = api.get('getServerVersion')\n except RequestException as e:\n if e.response is not None and e.response.status_code == 404:\n remote_version = json.loads(api.get('get_server_version').content)\n else:\n raise e\n\n parse_urls_command = 'parseURLs'\n add_package_command = 'addPackage'\n set_package_data_command = 'setPackageData'\n\n is_pyload_ng = False\n version = self.get_version_from_packaging()\n if version and version.parse(remote_version) >= version.parse('0.5'):\n parse_urls_command = 'parse_urls'\n add_package_command = 'add_package'\n set_package_data_command = 'set_package_date'\n is_pyload_ng = True\n\n hoster = config.get('hoster', self.DEFAULT_HOSTER)\n\n for entry in task.accepted:\n # bunch of urls now going to check\n content = entry.get('description', '') + ' ' + quote(entry['url'])\n content = json.dumps(content)\n\n if is_pyload_ng:\n url = entry['url'] if config.get('parse_url', self.DEFAULT_PARSE_URL) else ''\n else:\n url = (\n json.dumps(entry['url'])\n if config.get('parse_url', self.DEFAULT_PARSE_URL)\n else \"''\"\n )\n\n logger.debug('Parsing url {}', url)\n\n data = {'html': content, 'url': url}\n if not is_pyload_ng:\n data['session'] = session\n result = api.post(parse_urls_command, data=data)\n\n parsed = result.json()\n\n urls = []\n\n # check for preferred hoster\n for name in hoster:\n if name in parsed:\n urls.extend(parsed[name])\n if not config.get('multiple_hoster', self.DEFAULT_MULTIPLE_HOSTER):\n break\n\n # no preferred hoster and not preferred hoster only - add all recognized plugins\n if not urls and not config.get(\n 'preferred_hoster_only', self.DEFAULT_PREFERRED_HOSTER_ONLY\n ):\n for name, purls in parsed.items():\n if name != 'BasePlugin':\n urls.extend(purls)\n\n if task.options.test:\n logger.info('Would add `{}` to pyload', urls)\n continue\n\n # no urls found\n if not urls:\n if config.get('handle_no_url_as_failure', self.DEFAULT_HANDLE_NO_URL_AS_FAILURE):\n entry.fail('No suited urls in entry %s' % entry['title'])\n else:\n logger.info('No suited urls in entry {}', entry['title'])\n continue\n\n logger.debug('Add {} urls to pyLoad', len(urls))\n\n try:\n dest = 1 if config.get('queue', self.DEFAULT_QUEUE) else 0 # Destination.Queue = 1\n\n # Use the title of the entry, if no naming schema for the package is defined.\n name = config.get('package', entry['title'])\n\n # If name has jinja template, render it\n try:\n name = entry.render(name)\n except RenderError as e:\n name = entry['title']\n logger.error('Error rendering jinja event: {}', e)\n\n if is_pyload_ng:\n data = {\n 'name': name.encode('ascii', 'ignore').decode(),\n 'links': urls,\n 'dest': dest,\n }\n else:\n data = {\n 'name': json.dumps(name.encode('ascii', 'ignore').decode()),\n 'links': json.dumps(urls),\n 'dest': json.dumps(dest),\n 'session': session,\n }\n\n pid = api.post(add_package_command, data=data).text\n logger.debug('added package pid: {}', pid)\n\n # Set Folder\n folder = config.get('folder', self.DEFAULT_FOLDER)\n folder = entry.get('path', folder)\n if folder:\n # If folder has jinja template, render it\n try:\n folder = entry.render(folder)\n except RenderError as e:\n folder = self.DEFAULT_FOLDER\n logger.error('Error rendering jinja event: {}', e)\n # set folder with api\n data = json.dumps({'folder': folder})\n post_data = {'pid': pid, 'data': data}\n if not is_pyload_ng:\n post_data['session'] = session\n api.post(set_package_data_command, data=post_data)\n\n # Set Package Password\n package_password = config.get('package_password')\n if package_password:\n data = json.dumps({'password': package_password})\n post_data = {'pid': pid, 'data': data}\n if not is_pyload_ng:\n post_data['session'] = session\n api.post(set_package_data_command, data=post_data)\n\n except Exception as e:\n entry.fail(str(e))\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(PluginPyLoad, 'pyload', api_ver=2)\n", "path": "flexget/plugins/clients/pyload.py"}], "after_files": [{"content": "from urllib.parse import quote\n\nfrom loguru import logger\nfrom requests.exceptions import RequestException\n\nfrom flexget import plugin\nfrom flexget.config_schema import one_or_more\nfrom flexget.event import event\nfrom flexget.utils import json\nfrom flexget.utils.template import RenderError\n\nlogger = logger.bind(name='pyload')\n\n\nclass PyloadApi:\n def __init__(self, requests, url):\n self.requests = requests\n self.url = url\n\n def get_session(self, config):\n # Login\n data = {'username': config['username'], 'password': config['password']}\n result = self.post('login', data=data)\n response = result.json()\n if not response:\n raise plugin.PluginError('Login failed', logger)\n\n if isinstance(response, str):\n return response.replace('\"', '')\n else:\n return response\n\n def get(self, method):\n try:\n return self.requests.get(self.url.rstrip(\"/\") + \"/\" + method.strip(\"/\"))\n except RequestException as e:\n if e.response and e.response.status_code == 500:\n raise plugin.PluginError(\n 'Internal API Error: <%s> <%s>' % (method, self.url), logger\n )\n raise\n\n def post(self, method, data):\n try:\n return self.requests.post(self.url.rstrip(\"/\") + \"/\" + method.strip(\"/\"), data=data)\n except RequestException as e:\n if e.response and e.response.status_code == 500:\n raise plugin.PluginError(\n 'Internal API Error: <%s> <%s> <%s>' % (method, self.url, data), logger\n )\n raise\n\n\nclass PluginPyLoad:\n \"\"\"\n Parse task content or url for hoster links and adds them to pyLoad.\n\n Example::\n\n pyload:\n api: http://localhost:8000/api\n queue: yes\n username: my_username\n password: my_password\n folder: desired_folder\n package: desired_package_name (jinja2 supported)\n package_password: desired_package_password\n hoster:\n - YoutubeCom\n parse_url: no\n multiple_hoster: yes\n enabled: yes\n\n Default values for the config elements::\n\n pyload:\n api: http://localhost:8000/api\n queue: no\n hoster: ALL\n parse_url: no\n multiple_hoster: yes\n enabled: yes\n \"\"\"\n\n __author__ = 'http://pyload.org'\n __version__ = '0.5'\n\n DEFAULT_API = 'http://localhost:8000/api'\n DEFAULT_QUEUE = False\n DEFAULT_FOLDER = ''\n DEFAULT_HOSTER = []\n DEFAULT_PARSE_URL = False\n DEFAULT_MULTIPLE_HOSTER = True\n DEFAULT_PREFERRED_HOSTER_ONLY = False\n DEFAULT_HANDLE_NO_URL_AS_FAILURE = False\n\n schema = {\n 'type': 'object',\n 'properties': {\n 'api': {'type': 'string'},\n 'username': {'type': 'string'},\n 'password': {'type': 'string'},\n 'folder': {'type': 'string'},\n 'package': {'type': 'string'},\n 'package_password': {'type': 'string'},\n 'queue': {'type': 'boolean'},\n 'parse_url': {'type': 'boolean'},\n 'multiple_hoster': {'type': 'boolean'},\n 'hoster': one_or_more({'type': 'string'}),\n 'preferred_hoster_only': {'type': 'boolean'},\n 'handle_no_url_as_failure': {'type': 'boolean'},\n 'enabled': {'type': 'boolean'},\n },\n 'required': ['username', 'password'],\n 'additionalProperties': False,\n }\n\n def on_task_output(self, task, config):\n if not config.get('enabled', True):\n return\n if not task.accepted:\n return\n\n self.add_entries(task, config)\n\n def add_entries(self, task, config):\n \"\"\"Adds accepted entries\"\"\"\n\n apiurl = config.get('api', self.DEFAULT_API)\n api = PyloadApi(task.requests, apiurl)\n\n try:\n session = api.get_session(config)\n except OSError:\n raise plugin.PluginError('pyLoad not reachable', logger)\n except plugin.PluginError:\n raise\n except Exception as e:\n raise plugin.PluginError('Unknown error: %s' % str(e), logger)\n\n # old pyload (stable)\n is_pyload_ng = False\n parse_urls_command = 'parseURLs'\n add_package_command = 'addPackage'\n set_package_data_command = 'setPackageData'\n\n # pyload-ng is returning dict instead of session string on login\n if isinstance(session, dict):\n is_pyload_ng = True\n parse_urls_command = 'parse_urls'\n add_package_command = 'add_package'\n set_package_data_command = 'set_package_date'\n\n hoster = config.get('hoster', self.DEFAULT_HOSTER)\n\n for entry in task.accepted:\n # bunch of urls now going to check\n content = entry.get('description', '') + ' ' + quote(entry['url'])\n content = json.dumps(content)\n\n if is_pyload_ng:\n url = entry['url'] if config.get('parse_url', self.DEFAULT_PARSE_URL) else ''\n else:\n url = (\n json.dumps(entry['url'])\n if config.get('parse_url', self.DEFAULT_PARSE_URL)\n else \"''\"\n )\n\n logger.debug('Parsing url {}', url)\n\n data = {'html': content, 'url': url}\n if not is_pyload_ng:\n data['session'] = session\n result = api.post(parse_urls_command, data=data)\n\n parsed = result.json()\n\n urls = []\n\n # check for preferred hoster\n for name in hoster:\n if name in parsed:\n urls.extend(parsed[name])\n if not config.get('multiple_hoster', self.DEFAULT_MULTIPLE_HOSTER):\n break\n\n # no preferred hoster and not preferred hoster only - add all recognized plugins\n if not urls and not config.get(\n 'preferred_hoster_only', self.DEFAULT_PREFERRED_HOSTER_ONLY\n ):\n for name, purls in parsed.items():\n if name != 'BasePlugin':\n urls.extend(purls)\n\n if task.options.test:\n logger.info('Would add `{}` to pyload', urls)\n continue\n\n # no urls found\n if not urls:\n if config.get('handle_no_url_as_failure', self.DEFAULT_HANDLE_NO_URL_AS_FAILURE):\n entry.fail('No suited urls in entry %s' % entry['title'])\n else:\n logger.info('No suited urls in entry {}', entry['title'])\n continue\n\n logger.debug('Add {} urls to pyLoad', len(urls))\n\n try:\n dest = 1 if config.get('queue', self.DEFAULT_QUEUE) else 0 # Destination.Queue = 1\n\n # Use the title of the entry, if no naming schema for the package is defined.\n name = config.get('package', entry['title'])\n\n # If name has jinja template, render it\n try:\n name = entry.render(name)\n except RenderError as e:\n name = entry['title']\n logger.error('Error rendering jinja event: {}', e)\n\n if is_pyload_ng:\n data = {\n 'name': name.encode('ascii', 'ignore').decode(),\n 'links': urls,\n 'dest': dest,\n }\n else:\n data = {\n 'name': json.dumps(name.encode('ascii', 'ignore').decode()),\n 'links': json.dumps(urls),\n 'dest': json.dumps(dest),\n 'session': session,\n }\n\n pid = api.post(add_package_command, data=data).text\n logger.debug('added package pid: {}', pid)\n\n # Set Folder\n folder = config.get('folder', self.DEFAULT_FOLDER)\n folder = entry.get('path', folder)\n if folder:\n # If folder has jinja template, render it\n try:\n folder = entry.render(folder)\n except RenderError as e:\n folder = self.DEFAULT_FOLDER\n logger.error('Error rendering jinja event: {}', e)\n # set folder with api\n data = json.dumps({'folder': folder})\n post_data = {'pid': pid, 'data': data}\n if not is_pyload_ng:\n post_data['session'] = session\n api.post(set_package_data_command, data=post_data)\n\n # Set Package Password\n package_password = config.get('package_password')\n if package_password:\n data = json.dumps({'password': package_password})\n post_data = {'pid': pid, 'data': data}\n if not is_pyload_ng:\n post_data['session'] = session\n api.post(set_package_data_command, data=post_data)\n\n except Exception as e:\n entry.fail(str(e))\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(PluginPyLoad, 'pyload', api_ver=2)\n", "path": "flexget/plugins/clients/pyload.py"}]} | 4,042 | 434 |
gh_patches_debug_21740 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-503 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
New CSV imports use autogenerated column names.
## Description
<!-- A clear and concise description of what the bug is. -->
According to #459, newly imported CSVs are supposed to use the first row as headers by default. However, newly uploaded CSVs are showing autogenerated column names.
## Expected behavior
<!-- A clear and concise description of what you expected to happen. -->
The first row of the CSV should be used as header names by default.
## To Reproduce
<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->
Upload a CSV from the Mathesar UI. Column names will be of the form `column_0`, etc.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mathesar/serializers.py`
Content:
```
1 from django.urls import reverse
2 from rest_framework import serializers
3
4 from mathesar.models import Table, Schema, DataFile, Database
5
6
7 class NestedTableSerializer(serializers.HyperlinkedModelSerializer):
8 url = serializers.SerializerMethodField()
9
10 class Meta:
11 model = Table
12 fields = ['id', 'name', 'url']
13
14 def get_url(self, obj):
15 request = self.context['request']
16 return request.build_absolute_uri(reverse('table-detail', kwargs={'pk': obj.pk}))
17
18
19 class ModelNameField(serializers.CharField):
20 """
21 De-serializes the request field as a string, but serializes the response field as
22 `model.name`. Required to support passing and returing a model name from the
23 endpoint, while also storing the model as a related field.
24 """
25 def to_representation(self, value):
26 return value.name
27
28
29 class SchemaSerializer(serializers.HyperlinkedModelSerializer):
30 tables = NestedTableSerializer(many=True, read_only=True)
31 name = serializers.CharField()
32 database = ModelNameField(max_length=128)
33
34 class Meta:
35 model = Schema
36 fields = ['id', 'name', 'tables', 'database', 'has_dependencies']
37
38
39 class SimpleColumnSerializer(serializers.Serializer):
40 name = serializers.CharField()
41 type = serializers.CharField()
42
43
44 class ColumnSerializer(SimpleColumnSerializer):
45 index = serializers.IntegerField(source='column_index', read_only=True)
46 nullable = serializers.BooleanField(default=True)
47 primary_key = serializers.BooleanField(default=False)
48 valid_target_types = serializers.ListField(read_only=True)
49
50
51 class TableSerializer(serializers.ModelSerializer):
52 columns = SimpleColumnSerializer(many=True, read_only=True, source='sa_columns')
53 records = serializers.SerializerMethodField()
54 name = serializers.CharField()
55 data_files = serializers.PrimaryKeyRelatedField(required=False, many=True, queryset=DataFile.objects.all())
56
57 class Meta:
58 model = Table
59 fields = ['id', 'name', 'schema', 'created_at', 'updated_at',
60 'columns', 'records', 'data_files', 'has_dependencies']
61
62 def get_records(self, obj):
63 if isinstance(obj, Table):
64 # Only get records if we are serializing an existing table
65 request = self.context['request']
66 return request.build_absolute_uri(reverse('table-record-list', kwargs={'table_pk': obj.pk}))
67 else:
68 return None
69
70
71 class RecordSerializer(serializers.BaseSerializer):
72 def to_representation(self, instance):
73 return instance._asdict()
74
75
76 class RecordListParameterSerializer(serializers.Serializer):
77 filters = serializers.JSONField(required=False, default=[])
78 order_by = serializers.JSONField(required=False, default=[])
79 group_count_by = serializers.JSONField(required=False, default=[])
80
81
82 class DatabaseSerializer(serializers.ModelSerializer):
83 supported_types = serializers.ListField(child=serializers.CharField())
84
85 class Meta:
86 model = Database
87 fields = ['id', 'name', 'deleted', 'supported_types']
88 read_only_fields = ['id', 'name', 'deleted', 'supported_types']
89
90
91 class DataFileSerializer(serializers.ModelSerializer):
92 user = serializers.PrimaryKeyRelatedField(
93 default=serializers.CurrentUserDefault(), read_only=True
94 )
95
96 class Meta:
97 model = DataFile
98 fields = [
99 'id', 'file', 'table_imported_to', 'user', 'header', 'delimiter', 'escapechar', 'quotechar'
100 ]
101 extra_kwargs = {'delimiter': {'trim_whitespace': False},
102 'escapechar': {'trim_whitespace': False},
103 'quotechar': {'trim_whitespace': False},
104 'header': {'required': True}}
105 # We only currently support importing to a new table, so setting a table via API is invalid.
106 # User should be set automatically, not submitted via the API.
107 read_only_fields = ['table_imported_to']
108
109 def save(self, **kwargs):
110 """
111 Set user to current user while saving the data file.
112 """
113 current_user = self.fields['user'].get_default()
114 if current_user.is_authenticated:
115 kwargs['user'] = current_user
116 return super().save(**kwargs)
117
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mathesar/serializers.py b/mathesar/serializers.py
--- a/mathesar/serializers.py
+++ b/mathesar/serializers.py
@@ -92,6 +92,7 @@
user = serializers.PrimaryKeyRelatedField(
default=serializers.CurrentUserDefault(), read_only=True
)
+ header = serializers.BooleanField(default=True)
class Meta:
model = DataFile
@@ -100,8 +101,8 @@
]
extra_kwargs = {'delimiter': {'trim_whitespace': False},
'escapechar': {'trim_whitespace': False},
- 'quotechar': {'trim_whitespace': False},
- 'header': {'required': True}}
+ 'quotechar': {'trim_whitespace': False}
+ }
# We only currently support importing to a new table, so setting a table via API is invalid.
# User should be set automatically, not submitted via the API.
read_only_fields = ['table_imported_to']
| {"golden_diff": "diff --git a/mathesar/serializers.py b/mathesar/serializers.py\n--- a/mathesar/serializers.py\n+++ b/mathesar/serializers.py\n@@ -92,6 +92,7 @@\n user = serializers.PrimaryKeyRelatedField(\n default=serializers.CurrentUserDefault(), read_only=True\n )\n+ header = serializers.BooleanField(default=True)\n \n class Meta:\n model = DataFile\n@@ -100,8 +101,8 @@\n ]\n extra_kwargs = {'delimiter': {'trim_whitespace': False},\n 'escapechar': {'trim_whitespace': False},\n- 'quotechar': {'trim_whitespace': False},\n- 'header': {'required': True}}\n+ 'quotechar': {'trim_whitespace': False}\n+ }\n # We only currently support importing to a new table, so setting a table via API is invalid.\n # User should be set automatically, not submitted via the API.\n read_only_fields = ['table_imported_to']\n", "issue": "New CSV imports use autogenerated column names.\n## Description\r\n<!-- A clear and concise description of what the bug is. -->\r\nAccording to #459, newly imported CSVs are supposed to use the first row as headers by default. However, newly uploaded CSVs are showing autogenerated column names.\r\n\r\n## Expected behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nThe first row of the CSV should be used as header names by default.\r\n\r\n## To Reproduce\r\n<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->\r\nUpload a CSV from the Mathesar UI. Column names will be of the form `column_0`, etc.\n", "before_files": [{"content": "from django.urls import reverse\nfrom rest_framework import serializers\n\nfrom mathesar.models import Table, Schema, DataFile, Database\n\n\nclass NestedTableSerializer(serializers.HyperlinkedModelSerializer):\n url = serializers.SerializerMethodField()\n\n class Meta:\n model = Table\n fields = ['id', 'name', 'url']\n\n def get_url(self, obj):\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-detail', kwargs={'pk': obj.pk}))\n\n\nclass ModelNameField(serializers.CharField):\n \"\"\"\n De-serializes the request field as a string, but serializes the response field as\n `model.name`. Required to support passing and returing a model name from the\n endpoint, while also storing the model as a related field.\n \"\"\"\n def to_representation(self, value):\n return value.name\n\n\nclass SchemaSerializer(serializers.HyperlinkedModelSerializer):\n tables = NestedTableSerializer(many=True, read_only=True)\n name = serializers.CharField()\n database = ModelNameField(max_length=128)\n\n class Meta:\n model = Schema\n fields = ['id', 'name', 'tables', 'database', 'has_dependencies']\n\n\nclass SimpleColumnSerializer(serializers.Serializer):\n name = serializers.CharField()\n type = serializers.CharField()\n\n\nclass ColumnSerializer(SimpleColumnSerializer):\n index = serializers.IntegerField(source='column_index', read_only=True)\n nullable = serializers.BooleanField(default=True)\n primary_key = serializers.BooleanField(default=False)\n valid_target_types = serializers.ListField(read_only=True)\n\n\nclass TableSerializer(serializers.ModelSerializer):\n columns = SimpleColumnSerializer(many=True, read_only=True, source='sa_columns')\n records = serializers.SerializerMethodField()\n name = serializers.CharField()\n data_files = serializers.PrimaryKeyRelatedField(required=False, many=True, queryset=DataFile.objects.all())\n\n class Meta:\n model = Table\n fields = ['id', 'name', 'schema', 'created_at', 'updated_at',\n 'columns', 'records', 'data_files', 'has_dependencies']\n\n def get_records(self, obj):\n if isinstance(obj, Table):\n # Only get records if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-record-list', kwargs={'table_pk': obj.pk}))\n else:\n return None\n\n\nclass RecordSerializer(serializers.BaseSerializer):\n def to_representation(self, instance):\n return instance._asdict()\n\n\nclass RecordListParameterSerializer(serializers.Serializer):\n filters = serializers.JSONField(required=False, default=[])\n order_by = serializers.JSONField(required=False, default=[])\n group_count_by = serializers.JSONField(required=False, default=[])\n\n\nclass DatabaseSerializer(serializers.ModelSerializer):\n supported_types = serializers.ListField(child=serializers.CharField())\n\n class Meta:\n model = Database\n fields = ['id', 'name', 'deleted', 'supported_types']\n read_only_fields = ['id', 'name', 'deleted', 'supported_types']\n\n\nclass DataFileSerializer(serializers.ModelSerializer):\n user = serializers.PrimaryKeyRelatedField(\n default=serializers.CurrentUserDefault(), read_only=True\n )\n\n class Meta:\n model = DataFile\n fields = [\n 'id', 'file', 'table_imported_to', 'user', 'header', 'delimiter', 'escapechar', 'quotechar'\n ]\n extra_kwargs = {'delimiter': {'trim_whitespace': False},\n 'escapechar': {'trim_whitespace': False},\n 'quotechar': {'trim_whitespace': False},\n 'header': {'required': True}}\n # We only currently support importing to a new table, so setting a table via API is invalid.\n # User should be set automatically, not submitted via the API.\n read_only_fields = ['table_imported_to']\n\n def save(self, **kwargs):\n \"\"\"\n Set user to current user while saving the data file.\n \"\"\"\n current_user = self.fields['user'].get_default()\n if current_user.is_authenticated:\n kwargs['user'] = current_user\n return super().save(**kwargs)\n", "path": "mathesar/serializers.py"}], "after_files": [{"content": "from django.urls import reverse\nfrom rest_framework import serializers\n\nfrom mathesar.models import Table, Schema, DataFile, Database\n\n\nclass NestedTableSerializer(serializers.HyperlinkedModelSerializer):\n url = serializers.SerializerMethodField()\n\n class Meta:\n model = Table\n fields = ['id', 'name', 'url']\n\n def get_url(self, obj):\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-detail', kwargs={'pk': obj.pk}))\n\n\nclass ModelNameField(serializers.CharField):\n \"\"\"\n De-serializes the request field as a string, but serializes the response field as\n `model.name`. Required to support passing and returing a model name from the\n endpoint, while also storing the model as a related field.\n \"\"\"\n def to_representation(self, value):\n return value.name\n\n\nclass SchemaSerializer(serializers.HyperlinkedModelSerializer):\n tables = NestedTableSerializer(many=True, read_only=True)\n name = serializers.CharField()\n database = ModelNameField(max_length=128)\n\n class Meta:\n model = Schema\n fields = ['id', 'name', 'tables', 'database', 'has_dependencies']\n\n\nclass SimpleColumnSerializer(serializers.Serializer):\n name = serializers.CharField()\n type = serializers.CharField()\n\n\nclass ColumnSerializer(SimpleColumnSerializer):\n index = serializers.IntegerField(source='column_index', read_only=True)\n nullable = serializers.BooleanField(default=True)\n primary_key = serializers.BooleanField(default=False)\n valid_target_types = serializers.ListField(read_only=True)\n\n\nclass TableSerializer(serializers.ModelSerializer):\n columns = SimpleColumnSerializer(many=True, read_only=True, source='sa_columns')\n records = serializers.SerializerMethodField()\n name = serializers.CharField()\n data_files = serializers.PrimaryKeyRelatedField(required=False, many=True, queryset=DataFile.objects.all())\n\n class Meta:\n model = Table\n fields = ['id', 'name', 'schema', 'created_at', 'updated_at',\n 'columns', 'records', 'data_files', 'has_dependencies']\n\n def get_records(self, obj):\n if isinstance(obj, Table):\n # Only get records if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-record-list', kwargs={'table_pk': obj.pk}))\n else:\n return None\n\n\nclass RecordSerializer(serializers.BaseSerializer):\n def to_representation(self, instance):\n return instance._asdict()\n\n\nclass RecordListParameterSerializer(serializers.Serializer):\n filters = serializers.JSONField(required=False, default=[])\n order_by = serializers.JSONField(required=False, default=[])\n group_count_by = serializers.JSONField(required=False, default=[])\n\n\nclass DatabaseSerializer(serializers.ModelSerializer):\n supported_types = serializers.ListField(child=serializers.CharField())\n\n class Meta:\n model = Database\n fields = ['id', 'name', 'deleted', 'supported_types']\n read_only_fields = ['id', 'name', 'deleted', 'supported_types']\n\n\nclass DataFileSerializer(serializers.ModelSerializer):\n user = serializers.PrimaryKeyRelatedField(\n default=serializers.CurrentUserDefault(), read_only=True\n )\n header = serializers.BooleanField(default=True)\n\n class Meta:\n model = DataFile\n fields = [\n 'id', 'file', 'table_imported_to', 'user', 'header', 'delimiter', 'escapechar', 'quotechar'\n ]\n extra_kwargs = {'delimiter': {'trim_whitespace': False},\n 'escapechar': {'trim_whitespace': False},\n 'quotechar': {'trim_whitespace': False}\n }\n # We only currently support importing to a new table, so setting a table via API is invalid.\n # User should be set automatically, not submitted via the API.\n read_only_fields = ['table_imported_to']\n\n def save(self, **kwargs):\n \"\"\"\n Set user to current user while saving the data file.\n \"\"\"\n current_user = self.fields['user'].get_default()\n if current_user.is_authenticated:\n kwargs['user'] = current_user\n return super().save(**kwargs)\n", "path": "mathesar/serializers.py"}]} | 1,529 | 214 |
gh_patches_debug_47851 | rasdani/github-patches | git_diff | searx__searx-2256 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make secret_key default one that will fail if not set to a custom value
Currently, the `secret_key` default value is `ultrasecretkey` which is a valid value. Would it not be better to let the default value of this setting be one that will make searx fail to start? This will force the user to conciously change this setting to a secure value instead of accidentally forgetting to set this to something random and secure.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/__init__.py`
Content:
```
1 '''
2 searx is free software: you can redistribute it and/or modify
3 it under the terms of the GNU Affero General Public License as published by
4 the Free Software Foundation, either version 3 of the License, or
5 (at your option) any later version.
6
7 searx is distributed in the hope that it will be useful,
8 but WITHOUT ANY WARRANTY; without even the implied warranty of
9 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 GNU Affero General Public License for more details.
11
12 You should have received a copy of the GNU Affero General Public License
13 along with searx. If not, see < http://www.gnu.org/licenses/ >.
14
15 (C) 2013- by Adam Tauber, <[email protected]>
16 '''
17
18 import logging
19 from os import environ
20 from os.path import realpath, dirname, join, abspath, isfile
21 from io import open
22 from yaml import safe_load
23
24
25 searx_dir = abspath(dirname(__file__))
26 engine_dir = dirname(realpath(__file__))
27 static_path = abspath(join(dirname(__file__), 'static'))
28
29
30 def check_settings_yml(file_name):
31 if isfile(file_name):
32 return file_name
33 else:
34 return None
35
36
37 # find location of settings.yml
38 if 'SEARX_SETTINGS_PATH' in environ:
39 # if possible set path to settings using the
40 # enviroment variable SEARX_SETTINGS_PATH
41 settings_path = check_settings_yml(environ['SEARX_SETTINGS_PATH'])
42 else:
43 # if not, get it from searx code base or last solution from /etc/searx
44 settings_path = check_settings_yml(join(searx_dir, 'settings.yml')) or check_settings_yml('/etc/searx/settings.yml')
45
46 if not settings_path:
47 raise Exception('settings.yml not found')
48
49 # load settings
50 with open(settings_path, 'r', encoding='utf-8') as settings_yaml:
51 settings = safe_load(settings_yaml)
52
53 if settings['ui']['static_path']:
54 static_path = settings['ui']['static_path']
55
56 '''
57 enable debug if
58 the environnement variable SEARX_DEBUG is 1 or true
59 (whatever the value in settings.yml)
60 or general.debug=True in settings.yml
61
62 disable debug if
63 the environnement variable SEARX_DEBUG is 0 or false
64 (whatever the value in settings.yml)
65 or general.debug=False in settings.yml
66 '''
67 searx_debug_env = environ.get('SEARX_DEBUG', '').lower()
68 if searx_debug_env == 'true' or searx_debug_env == '1':
69 searx_debug = True
70 elif searx_debug_env == 'false' or searx_debug_env == '0':
71 searx_debug = False
72 else:
73 searx_debug = settings.get('general', {}).get('debug')
74
75 if searx_debug:
76 logging.basicConfig(level=logging.DEBUG)
77 else:
78 logging.basicConfig(level=logging.WARNING)
79
80 logger = logging.getLogger('searx')
81 logger.debug('read configuration from %s', settings_path)
82 logger.info('Initialisation done')
83
84 if 'SEARX_SECRET' in environ:
85 settings['server']['secret_key'] = environ['SEARX_SECRET']
86 if 'SEARX_BIND_ADDRESS' in environ:
87 settings['server']['bind_address'] = environ['SEARX_BIND_ADDRESS']
88
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/searx/__init__.py b/searx/__init__.py
--- a/searx/__init__.py
+++ b/searx/__init__.py
@@ -85,3 +85,7 @@
settings['server']['secret_key'] = environ['SEARX_SECRET']
if 'SEARX_BIND_ADDRESS' in environ:
settings['server']['bind_address'] = environ['SEARX_BIND_ADDRESS']
+
+if not searx_debug and settings['server']['secret_key'] == 'ultrasecretkey':
+ logger.error('server.secret_key is not changed. Please use something else instead of ultrasecretkey.')
+ exit(1)
| {"golden_diff": "diff --git a/searx/__init__.py b/searx/__init__.py\n--- a/searx/__init__.py\n+++ b/searx/__init__.py\n@@ -85,3 +85,7 @@\n settings['server']['secret_key'] = environ['SEARX_SECRET']\n if 'SEARX_BIND_ADDRESS' in environ:\n settings['server']['bind_address'] = environ['SEARX_BIND_ADDRESS']\n+\n+if not searx_debug and settings['server']['secret_key'] == 'ultrasecretkey':\n+ logger.error('server.secret_key is not changed. Please use something else instead of ultrasecretkey.')\n+ exit(1)\n", "issue": "Make secret_key default one that will fail if not set to a custom value\nCurrently, the `secret_key` default value is `ultrasecretkey` which is a valid value. Would it not be better to let the default value of this setting be one that will make searx fail to start? This will force the user to conciously change this setting to a secure value instead of accidentally forgetting to set this to something random and secure.\n", "before_files": [{"content": "'''\nsearx is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nsearx is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with searx. If not, see < http://www.gnu.org/licenses/ >.\n\n(C) 2013- by Adam Tauber, <[email protected]>\n'''\n\nimport logging\nfrom os import environ\nfrom os.path import realpath, dirname, join, abspath, isfile\nfrom io import open\nfrom yaml import safe_load\n\n\nsearx_dir = abspath(dirname(__file__))\nengine_dir = dirname(realpath(__file__))\nstatic_path = abspath(join(dirname(__file__), 'static'))\n\n\ndef check_settings_yml(file_name):\n if isfile(file_name):\n return file_name\n else:\n return None\n\n\n# find location of settings.yml\nif 'SEARX_SETTINGS_PATH' in environ:\n # if possible set path to settings using the\n # enviroment variable SEARX_SETTINGS_PATH\n settings_path = check_settings_yml(environ['SEARX_SETTINGS_PATH'])\nelse:\n # if not, get it from searx code base or last solution from /etc/searx\n settings_path = check_settings_yml(join(searx_dir, 'settings.yml')) or check_settings_yml('/etc/searx/settings.yml')\n\nif not settings_path:\n raise Exception('settings.yml not found')\n\n# load settings\nwith open(settings_path, 'r', encoding='utf-8') as settings_yaml:\n settings = safe_load(settings_yaml)\n\nif settings['ui']['static_path']:\n static_path = settings['ui']['static_path']\n\n'''\nenable debug if\nthe environnement variable SEARX_DEBUG is 1 or true\n(whatever the value in settings.yml)\nor general.debug=True in settings.yml\n\ndisable debug if\nthe environnement variable SEARX_DEBUG is 0 or false\n(whatever the value in settings.yml)\nor general.debug=False in settings.yml\n'''\nsearx_debug_env = environ.get('SEARX_DEBUG', '').lower()\nif searx_debug_env == 'true' or searx_debug_env == '1':\n searx_debug = True\nelif searx_debug_env == 'false' or searx_debug_env == '0':\n searx_debug = False\nelse:\n searx_debug = settings.get('general', {}).get('debug')\n\nif searx_debug:\n logging.basicConfig(level=logging.DEBUG)\nelse:\n logging.basicConfig(level=logging.WARNING)\n\nlogger = logging.getLogger('searx')\nlogger.debug('read configuration from %s', settings_path)\nlogger.info('Initialisation done')\n\nif 'SEARX_SECRET' in environ:\n settings['server']['secret_key'] = environ['SEARX_SECRET']\nif 'SEARX_BIND_ADDRESS' in environ:\n settings['server']['bind_address'] = environ['SEARX_BIND_ADDRESS']\n", "path": "searx/__init__.py"}], "after_files": [{"content": "'''\nsearx is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nsearx is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with searx. If not, see < http://www.gnu.org/licenses/ >.\n\n(C) 2013- by Adam Tauber, <[email protected]>\n'''\n\nimport logging\nfrom os import environ\nfrom os.path import realpath, dirname, join, abspath, isfile\nfrom io import open\nfrom yaml import safe_load\n\n\nsearx_dir = abspath(dirname(__file__))\nengine_dir = dirname(realpath(__file__))\nstatic_path = abspath(join(dirname(__file__), 'static'))\n\n\ndef check_settings_yml(file_name):\n if isfile(file_name):\n return file_name\n else:\n return None\n\n\n# find location of settings.yml\nif 'SEARX_SETTINGS_PATH' in environ:\n # if possible set path to settings using the\n # enviroment variable SEARX_SETTINGS_PATH\n settings_path = check_settings_yml(environ['SEARX_SETTINGS_PATH'])\nelse:\n # if not, get it from searx code base or last solution from /etc/searx\n settings_path = check_settings_yml(join(searx_dir, 'settings.yml')) or check_settings_yml('/etc/searx/settings.yml')\n\nif not settings_path:\n raise Exception('settings.yml not found')\n\n# load settings\nwith open(settings_path, 'r', encoding='utf-8') as settings_yaml:\n settings = safe_load(settings_yaml)\n\nif settings['ui']['static_path']:\n static_path = settings['ui']['static_path']\n\n'''\nenable debug if\nthe environnement variable SEARX_DEBUG is 1 or true\n(whatever the value in settings.yml)\nor general.debug=True in settings.yml\n\ndisable debug if\nthe environnement variable SEARX_DEBUG is 0 or false\n(whatever the value in settings.yml)\nor general.debug=False in settings.yml\n'''\nsearx_debug_env = environ.get('SEARX_DEBUG', '').lower()\nif searx_debug_env == 'true' or searx_debug_env == '1':\n searx_debug = True\nelif searx_debug_env == 'false' or searx_debug_env == '0':\n searx_debug = False\nelse:\n searx_debug = settings.get('general', {}).get('debug')\n\nif searx_debug:\n logging.basicConfig(level=logging.DEBUG)\nelse:\n logging.basicConfig(level=logging.WARNING)\n\nlogger = logging.getLogger('searx')\nlogger.debug('read configuration from %s', settings_path)\nlogger.info('Initialisation done')\n\nif 'SEARX_SECRET' in environ:\n settings['server']['secret_key'] = environ['SEARX_SECRET']\nif 'SEARX_BIND_ADDRESS' in environ:\n settings['server']['bind_address'] = environ['SEARX_BIND_ADDRESS']\n\nif not searx_debug and settings['server']['secret_key'] == 'ultrasecretkey':\n logger.error('server.secret_key is not changed. Please use something else instead of ultrasecretkey.')\n exit(1)\n", "path": "searx/__init__.py"}]} | 1,234 | 149 |
gh_patches_debug_33620 | rasdani/github-patches | git_diff | weecology__retriever-160 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Final page of connection wizard reports nothing on Mac & Windows
The final page of the connection wizard is blank on both Mac & Windows. This happens regardless of success or failure and predates the recent changes to the GUI in #157.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/connect_wizard.py`
Content:
```
1 """Connection setup wizard.
2
3 """
4
5 import os
6 import sys
7 import wx
8 import wx.html
9 import wx.wizard
10 from retriever.lib.models import Engine
11 from retriever.lib.tools import get_saved_connection, save_connection
12 from retriever.app.controls import *
13 from retriever.app.images import icon
14
15 from retriever import VERSION
16
17
18 class ConnectWizard(wx.wizard.Wizard):
19 def __init__(self, lists, engine_list, selected=None):
20 wx.wizard.Wizard.__init__(self, None, -1, "EcoData Retriever")
21
22 #self.SetIcon(icon.GetIcon())
23
24 welcome = """<h2>Connection Wizard</h2>
25
26 <p>The EcoData Retriever downloads raw data files, stores them on your
27 computer, and imports the data into your own local database.</p>
28 <p>To begin, you'll need to set up your own database. Once you've done
29 that, this wizard will walk you through the steps to connect to your
30 database.</p>
31 <p>Supported database systems currently include:</p>
32 <ul>"""
33
34 for db in engine_list:
35 welcome += "<li>" + db.name + "</li>"
36
37 welcome += "</ul>"
38
39 self.pages = []
40 self.lists = lists
41 self.engine_list = engine_list
42 self.selected = selected
43
44 self.pages.append(TitledPage(self, "", ""))
45
46 self.pages.append(ChooseDbPage(self, "Database",
47 "What kind of database are you using?"))
48
49 self.pages.append(ConnectPage(self,
50 "Connection Info",
51 ""))
52
53 self.pages.append(ConfirmPage(self, "Connection", ""))
54
55 (self.TITLE, self.CHOOSEDB,
56 self.CONNECTION, self.CONFIRM) = [self.pages[i]
57 for i in range(len(self.pages))]
58
59 self.TITLE.welcome = HtmlWindow(self.TITLE)
60 self.TITLE.welcome.SetSize((450,400))
61 self.TITLE.welcome.SetHtml(welcome)
62 self.TITLE.sizer.Add(self.TITLE.welcome, 1, wx.EXPAND)
63
64 self.CHOOSEDB.Bind(wx.wizard.EVT_WIZARD_PAGE_CHANGING, self.CONNECTION.Draw)
65 self.CONNECTION.Bind(wx.wizard.EVT_WIZARD_PAGE_CHANGING, self.CONFIRM.Draw)
66 self.CONFIRM.Bind(wx.wizard.EVT_WIZARD_PAGE_CHANGING, self.CONNECTION.Draw)
67
68 for i in range(len(self.pages) - 1):
69 wx.wizard.WizardPageSimple_Chain(self.pages[i], self.pages[i + 1])
70
71 for page in self.pages:
72 self.FitToPage(page)
73
74
75 def check_for_updates(self):
76 reload(retriever)
77
78
79 class ChooseDbPage(TitledPage):
80 def __init__(self, parent, title, label):
81 TitledPage.__init__(self, parent, title, label)
82 engine_list = parent.engine_list
83
84 dblist = ListBox(self, -1,
85 choices=[db.name for db in engine_list],
86 style=wx.LB_SINGLE,
87 size=(-1,150))
88 self.dblist = dblist
89 if parent.selected:
90 index = 0
91 for i in range(len(engine_list)):
92 if engine_list[i].name == parent.selected:
93 index = i
94 self.dblist.SetSelection(index)
95 else:
96 self.dblist.SetSelection(0)
97 self.sizer.Add(self.dblist, -1, wx.EXPAND)
98
99 #TODO: This is a hack to force the wizard to be large enough to not hide
100 #the Choose button on the Connect Page when the engine has a file
101 #attribute. This should be fixed properly by figuring out how to make wx
102 #work with the fact that the connect page has varying fields depending
103 #on the engine.
104 longspace = StaticText(self, -1, "", wx.Size(375, -1))
105 self.sizer.Add(longspace, -1)
106
107 def dirbtn_click(self, evt):
108 dialog = wx.DirDialog(None, message="Choose a directory to " +
109 "download your data files.")
110 if dialog.ShowModal() == wx.ID_OK:
111 self.raw_data_dir.SetValue(dialog.GetPath())
112 else:
113 pass
114 dialog.Destroy()
115
116
117 class ConnectPage(TitledPage):
118 """The connection info page."""
119 def __init__(self, parent, title, label):
120 TitledPage.__init__(self, parent, title, label)
121 self.option = dict()
122 self.sel = None
123 self.fields = wx.BoxSizer(wx.VERTICAL)
124 self.parent = parent
125
126 def Draw(self, evt):
127 """When the page is drawn, it may need to update its fields if
128 the selected database has changed."""
129
130 if not evt.GetDirection():
131 btn = self.parent.FindWindowById(wx.ID_FORWARD)
132 if btn: btn.Enable()
133
134 if len(self.Parent.CHOOSEDB.dblist.GetStringSelection()) == 0 and evt.Direction:
135 evt.Veto()
136 else:
137 if self.sel != self.Parent.CHOOSEDB.dblist.GetStringSelection():
138 self.sel = self.Parent.CHOOSEDB.dblist.GetStringSelection()
139 self.engine = Engine()
140 for db in self.Parent.engine_list:
141 if db.name == self.sel:
142 self.engine = db
143 self.fields.Clear(True)
144 self.fields = wx.BoxSizer(wx.VERTICAL)
145 if self.engine.instructions:
146 self.fields.Add(StaticText(self, -1, '\n' + self.engine.instructions + '\n\n'))
147 self.fieldset = dict()
148 self.option = dict()
149 saved_opts = get_saved_connection(self.engine.name)
150 for opt in self.engine.required_opts:
151 if opt[0] in saved_opts.keys():
152 default = saved_opts[opt[0]]
153 else:
154 default = opt[2]
155 self.fieldset[opt[0]] = wx.BoxSizer(wx.HORIZONTAL)
156 label = StaticText(self, -1, opt[0] + ": ",
157 size=wx.Size(90,35))
158 style = wx.TE_PASSWORD if opt[0] == "password" else 0
159 txt = TextCtrl(self, -1, str(default), size=wx.Size(200,-1), style=style)
160 self.option[opt[0]] = txt
161 self.fieldset[opt[0]].AddMany([label,
162 self.option[opt[0]]])
163 if opt[0] == "file":
164 file_opt = opt
165 def open_file_dialog(evt):
166 filter = ""
167 if file_opt[3]:
168 filter = file_opt[3] + "|"
169 filter += "All files (*.*)|*.*"
170 dialog = wx.FileDialog(None, style = wx.OPEN,
171 wildcard = filter)
172 if dialog.ShowModal() == wx.ID_OK:
173 self.option[file_opt[0]].SetValue(dialog.GetPath())
174 self.browse = wx.Button(self, -1, "Choose...")
175 self.fieldset[file_opt[0]].Add(self.browse)
176 self.browse.Bind(wx.EVT_BUTTON, open_file_dialog)
177 self.fieldset[opt[0]].Layout()
178 self.fields.Add(self.fieldset[opt[0]])
179 self.sizer.Add(self.fields)
180 self.sizer.Layout()
181
182
183 class ConfirmPage(TitledPage):
184 """The final confirmation page."""
185 def __init__(self, parent, title, label):
186 TitledPage.__init__(self, parent, title, label)
187 self.fields = wx.BoxSizer(wx.VERTICAL)
188 self.parent = parent
189
190 def Draw(self, evt):
191 if not evt.GetDirection(): return
192
193 self.fields.Clear(True)
194 self.fields = wx.BoxSizer(wx.VERTICAL)
195
196 self.values_dict = dict()
197 connect = self.parent.CONNECTION
198 for key in connect.option.keys():
199 self.values_dict[key] = str(connect.option[key].Value)
200
201 try:
202 connect.engine.opts = self.values_dict
203 connect.engine.connect(force_reconnect=True)
204 message = '''<p><b>Success!</b>Your connection has been saved.</p>
205 <p>Click Finish to continue.</p>'''
206 save_connection(connect.engine.name, self.values_dict)
207
208 except Exception as e:
209 message = message = '''<p><b>Error.</b>There was a problem with your
210 connection:</p><p>%s</p>
211 <p>Click Back to try again, or Cancel.</p>''' % e
212
213 btn = self.parent.FindWindowById(wx.ID_FORWARD)
214 if btn: btn.Disable()
215
216 self.message = HtmlWindow(self)
217 self.message.SetSize((450,400))
218 self.message.SetHtml(message)
219 self.fields.Add(self.message, 1, wx.EXPAND)
220 self.sizer.Add(self.fields)
221
222 self.sizer.Layout()
223
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/connect_wizard.py b/app/connect_wizard.py
--- a/app/connect_wizard.py
+++ b/app/connect_wizard.py
@@ -184,15 +184,11 @@
"""The final confirmation page."""
def __init__(self, parent, title, label):
TitledPage.__init__(self, parent, title, label)
- self.fields = wx.BoxSizer(wx.VERTICAL)
self.parent = parent
def Draw(self, evt):
if not evt.GetDirection(): return
- self.fields.Clear(True)
- self.fields = wx.BoxSizer(wx.VERTICAL)
-
self.values_dict = dict()
connect = self.parent.CONNECTION
for key in connect.option.keys():
@@ -201,12 +197,12 @@
try:
connect.engine.opts = self.values_dict
connect.engine.connect(force_reconnect=True)
- message = '''<p><b>Success!</b>Your connection has been saved.</p>
+ message = '''<p><b>Success!</b> Your connection has been saved.</p>
<p>Click Finish to continue.</p>'''
save_connection(connect.engine.name, self.values_dict)
except Exception as e:
- message = message = '''<p><b>Error.</b>There was a problem with your
+ message = message = '''<p><b>Error.</b> There was a problem with your
connection:</p><p>%s</p>
<p>Click Back to try again, or Cancel.</p>''' % e
@@ -216,7 +212,5 @@
self.message = HtmlWindow(self)
self.message.SetSize((450,400))
self.message.SetHtml(message)
- self.fields.Add(self.message, 1, wx.EXPAND)
- self.sizer.Add(self.fields)
-
+ self.sizer.Add(self.message, 1, wx.EXPAND)
self.sizer.Layout()
| {"golden_diff": "diff --git a/app/connect_wizard.py b/app/connect_wizard.py\n--- a/app/connect_wizard.py\n+++ b/app/connect_wizard.py\n@@ -184,15 +184,11 @@\n \"\"\"The final confirmation page.\"\"\"\n def __init__(self, parent, title, label):\n TitledPage.__init__(self, parent, title, label)\n- self.fields = wx.BoxSizer(wx.VERTICAL)\n self.parent = parent\n \n def Draw(self, evt):\n if not evt.GetDirection(): return\n \n- self.fields.Clear(True)\n- self.fields = wx.BoxSizer(wx.VERTICAL)\n- \n self.values_dict = dict()\n connect = self.parent.CONNECTION\n for key in connect.option.keys():\n@@ -201,12 +197,12 @@\n try:\n connect.engine.opts = self.values_dict\n connect.engine.connect(force_reconnect=True)\n- message = '''<p><b>Success!</b>Your connection has been saved.</p>\n+ message = '''<p><b>Success!</b> Your connection has been saved.</p>\n <p>Click Finish to continue.</p>'''\n save_connection(connect.engine.name, self.values_dict)\n \n except Exception as e:\n- message = message = '''<p><b>Error.</b>There was a problem with your\n+ message = message = '''<p><b>Error.</b> There was a problem with your\n connection:</p><p>%s</p>\n <p>Click Back to try again, or Cancel.</p>''' % e\n \n@@ -216,7 +212,5 @@\n self.message = HtmlWindow(self)\n self.message.SetSize((450,400))\n self.message.SetHtml(message)\n- self.fields.Add(self.message, 1, wx.EXPAND)\n- self.sizer.Add(self.fields)\n- \n+ self.sizer.Add(self.message, 1, wx.EXPAND)\n self.sizer.Layout()\n", "issue": "Final page of connection wizard reports nothing on Mac & Windows\nThe final page of the connection wizard is blank on both Mac & Windows. This happens regardless of success or failure and predates the recent changes to the GUI in #157.\n\n", "before_files": [{"content": "\"\"\"Connection setup wizard.\n\n\"\"\"\n\nimport os\nimport sys\nimport wx\nimport wx.html\nimport wx.wizard\nfrom retriever.lib.models import Engine\nfrom retriever.lib.tools import get_saved_connection, save_connection\nfrom retriever.app.controls import *\nfrom retriever.app.images import icon\n\nfrom retriever import VERSION\n\n\nclass ConnectWizard(wx.wizard.Wizard):\n def __init__(self, lists, engine_list, selected=None):\n wx.wizard.Wizard.__init__(self, None, -1, \"EcoData Retriever\")\n \n #self.SetIcon(icon.GetIcon())\n \n welcome = \"\"\"<h2>Connection Wizard</h2>\n \n <p>The EcoData Retriever downloads raw data files, stores them on your\n computer, and imports the data into your own local database.</p>\n <p>To begin, you'll need to set up your own database. Once you've done\n that, this wizard will walk you through the steps to connect to your\n database.</p>\n <p>Supported database systems currently include:</p>\n <ul>\"\"\"\n \n for db in engine_list:\n welcome += \"<li>\" + db.name + \"</li>\" \n \n welcome += \"</ul>\" \n \n self.pages = []\n self.lists = lists\n self.engine_list = engine_list\n self.selected = selected\n \n self.pages.append(TitledPage(self, \"\", \"\"))\n \n self.pages.append(ChooseDbPage(self, \"Database\", \n \"What kind of database are you using?\"))\n \n self.pages.append(ConnectPage(self, \n \"Connection Info\", \n \"\"))\n \n self.pages.append(ConfirmPage(self, \"Connection\", \"\"))\n\n (self.TITLE, self.CHOOSEDB, \n self.CONNECTION, self.CONFIRM) = [self.pages[i] \n for i in range(len(self.pages))]\n \n self.TITLE.welcome = HtmlWindow(self.TITLE)\n self.TITLE.welcome.SetSize((450,400))\n self.TITLE.welcome.SetHtml(welcome)\n self.TITLE.sizer.Add(self.TITLE.welcome, 1, wx.EXPAND)\n \n self.CHOOSEDB.Bind(wx.wizard.EVT_WIZARD_PAGE_CHANGING, self.CONNECTION.Draw)\n self.CONNECTION.Bind(wx.wizard.EVT_WIZARD_PAGE_CHANGING, self.CONFIRM.Draw)\n self.CONFIRM.Bind(wx.wizard.EVT_WIZARD_PAGE_CHANGING, self.CONNECTION.Draw)\n \n for i in range(len(self.pages) - 1):\n wx.wizard.WizardPageSimple_Chain(self.pages[i], self.pages[i + 1])\n \n for page in self.pages:\n self.FitToPage(page)\n \n \n def check_for_updates(self):\n reload(retriever)\n\n\nclass ChooseDbPage(TitledPage):\n def __init__(self, parent, title, label):\n TitledPage.__init__(self, parent, title, label)\n engine_list = parent.engine_list\n \n dblist = ListBox(self, -1, \n choices=[db.name for db in engine_list], \n style=wx.LB_SINGLE,\n size=(-1,150))\n self.dblist = dblist\n if parent.selected:\n index = 0\n for i in range(len(engine_list)):\n if engine_list[i].name == parent.selected:\n index = i \n self.dblist.SetSelection(index)\n else:\n self.dblist.SetSelection(0)\n self.sizer.Add(self.dblist, -1, wx.EXPAND)\n\n #TODO: This is a hack to force the wizard to be large enough to not hide\n #the Choose button on the Connect Page when the engine has a file\n #attribute. This should be fixed properly by figuring out how to make wx\n #work with the fact that the connect page has varying fields depending\n #on the engine.\n longspace = StaticText(self, -1, \"\", wx.Size(375, -1))\n self.sizer.Add(longspace, -1)\n \n def dirbtn_click(self, evt):\n dialog = wx.DirDialog(None, message=\"Choose a directory to \" +\n \"download your data files.\") \n if dialog.ShowModal() == wx.ID_OK: \n self.raw_data_dir.SetValue(dialog.GetPath()) \n else:\n pass\n dialog.Destroy()\n \n\nclass ConnectPage(TitledPage):\n \"\"\"The connection info page.\"\"\"\n def __init__(self, parent, title, label):\n TitledPage.__init__(self, parent, title, label)\n self.option = dict()\n self.sel = None\n self.fields = wx.BoxSizer(wx.VERTICAL)\n self.parent = parent\n \n def Draw(self, evt):\n \"\"\"When the page is drawn, it may need to update its fields if \n the selected database has changed.\"\"\"\n \n if not evt.GetDirection():\n btn = self.parent.FindWindowById(wx.ID_FORWARD) \n if btn: btn.Enable()\n \n if len(self.Parent.CHOOSEDB.dblist.GetStringSelection()) == 0 and evt.Direction:\n evt.Veto() \n else:\n if self.sel != self.Parent.CHOOSEDB.dblist.GetStringSelection():\n self.sel = self.Parent.CHOOSEDB.dblist.GetStringSelection()\n self.engine = Engine()\n for db in self.Parent.engine_list:\n if db.name == self.sel:\n self.engine = db\n self.fields.Clear(True) \n self.fields = wx.BoxSizer(wx.VERTICAL)\n if self.engine.instructions:\n self.fields.Add(StaticText(self, -1, '\\n' + self.engine.instructions + '\\n\\n'))\n self.fieldset = dict()\n self.option = dict()\n saved_opts = get_saved_connection(self.engine.name)\n for opt in self.engine.required_opts:\n if opt[0] in saved_opts.keys():\n default = saved_opts[opt[0]]\n else:\n default = opt[2]\n self.fieldset[opt[0]] = wx.BoxSizer(wx.HORIZONTAL)\n label = StaticText(self, -1, opt[0] + \": \", \n size=wx.Size(90,35))\n style = wx.TE_PASSWORD if opt[0] == \"password\" else 0\n txt = TextCtrl(self, -1, str(default), size=wx.Size(200,-1), style=style)\n self.option[opt[0]] = txt\n self.fieldset[opt[0]].AddMany([label, \n self.option[opt[0]]])\n if opt[0] == \"file\":\n file_opt = opt\n def open_file_dialog(evt):\n filter = \"\"\n if file_opt[3]:\n filter = file_opt[3] + \"|\"\n filter += \"All files (*.*)|*.*\" \n dialog = wx.FileDialog(None, style = wx.OPEN,\n wildcard = filter)\n if dialog.ShowModal() == wx.ID_OK:\n self.option[file_opt[0]].SetValue(dialog.GetPath())\n self.browse = wx.Button(self, -1, \"Choose...\")\n self.fieldset[file_opt[0]].Add(self.browse)\n self.browse.Bind(wx.EVT_BUTTON, open_file_dialog) \n self.fieldset[opt[0]].Layout()\n self.fields.Add(self.fieldset[opt[0]])\n self.sizer.Add(self.fields)\n self.sizer.Layout()\n \n \nclass ConfirmPage(TitledPage):\n \"\"\"The final confirmation page.\"\"\"\n def __init__(self, parent, title, label):\n TitledPage.__init__(self, parent, title, label)\n self.fields = wx.BoxSizer(wx.VERTICAL)\n self.parent = parent\n \n def Draw(self, evt):\n if not evt.GetDirection(): return\n \n self.fields.Clear(True)\n self.fields = wx.BoxSizer(wx.VERTICAL)\n \n self.values_dict = dict()\n connect = self.parent.CONNECTION\n for key in connect.option.keys():\n self.values_dict[key] = str(connect.option[key].Value)\n \n try:\n connect.engine.opts = self.values_dict\n connect.engine.connect(force_reconnect=True)\n message = '''<p><b>Success!</b>Your connection has been saved.</p>\n <p>Click Finish to continue.</p>'''\n save_connection(connect.engine.name, self.values_dict)\n \n except Exception as e:\n message = message = '''<p><b>Error.</b>There was a problem with your\n connection:</p><p>%s</p>\n <p>Click Back to try again, or Cancel.</p>''' % e\n \n btn = self.parent.FindWindowById(wx.ID_FORWARD) \n if btn: btn.Disable()\n \n self.message = HtmlWindow(self)\n self.message.SetSize((450,400))\n self.message.SetHtml(message)\n self.fields.Add(self.message, 1, wx.EXPAND)\n self.sizer.Add(self.fields)\n \n self.sizer.Layout()\n", "path": "app/connect_wizard.py"}], "after_files": [{"content": "\"\"\"Connection setup wizard.\n\n\"\"\"\n\nimport os\nimport sys\nimport wx\nimport wx.html\nimport wx.wizard\nfrom retriever.lib.models import Engine\nfrom retriever.lib.tools import get_saved_connection, save_connection\nfrom retriever.app.controls import *\nfrom retriever.app.images import icon\n\nfrom retriever import VERSION\n\n\nclass ConnectWizard(wx.wizard.Wizard):\n def __init__(self, lists, engine_list, selected=None):\n wx.wizard.Wizard.__init__(self, None, -1, \"EcoData Retriever\")\n \n #self.SetIcon(icon.GetIcon())\n \n welcome = \"\"\"<h2>Connection Wizard</h2>\n \n <p>The EcoData Retriever downloads raw data files, stores them on your\n computer, and imports the data into your own local database.</p>\n <p>To begin, you'll need to set up your own database. Once you've done\n that, this wizard will walk you through the steps to connect to your\n database.</p>\n <p>Supported database systems currently include:</p>\n <ul>\"\"\"\n \n for db in engine_list:\n welcome += \"<li>\" + db.name + \"</li>\" \n \n welcome += \"</ul>\" \n \n self.pages = []\n self.lists = lists\n self.engine_list = engine_list\n self.selected = selected\n \n self.pages.append(TitledPage(self, \"\", \"\"))\n \n self.pages.append(ChooseDbPage(self, \"Database\", \n \"What kind of database are you using?\"))\n \n self.pages.append(ConnectPage(self, \n \"Connection Info\", \n \"\"))\n \n self.pages.append(ConfirmPage(self, \"Connection\", \"\"))\n\n (self.TITLE, self.CHOOSEDB, \n self.CONNECTION, self.CONFIRM) = [self.pages[i] \n for i in range(len(self.pages))]\n \n self.TITLE.welcome = HtmlWindow(self.TITLE)\n self.TITLE.welcome.SetSize((450,400))\n self.TITLE.welcome.SetHtml(welcome)\n self.TITLE.sizer.Add(self.TITLE.welcome, 1, wx.EXPAND)\n \n self.CHOOSEDB.Bind(wx.wizard.EVT_WIZARD_PAGE_CHANGING, self.CONNECTION.Draw)\n self.CONNECTION.Bind(wx.wizard.EVT_WIZARD_PAGE_CHANGING, self.CONFIRM.Draw)\n self.CONFIRM.Bind(wx.wizard.EVT_WIZARD_PAGE_CHANGING, self.CONNECTION.Draw)\n \n for i in range(len(self.pages) - 1):\n wx.wizard.WizardPageSimple_Chain(self.pages[i], self.pages[i + 1])\n \n for page in self.pages:\n self.FitToPage(page)\n \n \n def check_for_updates(self):\n reload(retriever)\n\n\nclass ChooseDbPage(TitledPage):\n def __init__(self, parent, title, label):\n TitledPage.__init__(self, parent, title, label)\n engine_list = parent.engine_list\n \n dblist = ListBox(self, -1, \n choices=[db.name for db in engine_list], \n style=wx.LB_SINGLE,\n size=(-1,150))\n self.dblist = dblist\n if parent.selected:\n index = 0\n for i in range(len(engine_list)):\n if engine_list[i].name == parent.selected:\n index = i \n self.dblist.SetSelection(index)\n else:\n self.dblist.SetSelection(0)\n self.sizer.Add(self.dblist, -1, wx.EXPAND)\n\n #TODO: This is a hack to force the wizard to be large enough to not hide\n #the Choose button on the Connect Page when the engine has a file\n #attribute. This should be fixed properly by figuring out how to make wx\n #work with the fact that the connect page has varying fields depending\n #on the engine.\n longspace = StaticText(self, -1, \"\", wx.Size(375, -1))\n self.sizer.Add(longspace, -1)\n \n def dirbtn_click(self, evt):\n dialog = wx.DirDialog(None, message=\"Choose a directory to \" +\n \"download your data files.\") \n if dialog.ShowModal() == wx.ID_OK: \n self.raw_data_dir.SetValue(dialog.GetPath()) \n else:\n pass\n dialog.Destroy()\n \n\nclass ConnectPage(TitledPage):\n \"\"\"The connection info page.\"\"\"\n def __init__(self, parent, title, label):\n TitledPage.__init__(self, parent, title, label)\n self.option = dict()\n self.sel = None\n self.fields = wx.BoxSizer(wx.VERTICAL)\n self.parent = parent\n \n def Draw(self, evt):\n \"\"\"When the page is drawn, it may need to update its fields if \n the selected database has changed.\"\"\"\n \n if not evt.GetDirection():\n btn = self.parent.FindWindowById(wx.ID_FORWARD) \n if btn: btn.Enable()\n \n if len(self.Parent.CHOOSEDB.dblist.GetStringSelection()) == 0 and evt.Direction:\n evt.Veto() \n else:\n if self.sel != self.Parent.CHOOSEDB.dblist.GetStringSelection():\n self.sel = self.Parent.CHOOSEDB.dblist.GetStringSelection()\n self.engine = Engine()\n for db in self.Parent.engine_list:\n if db.name == self.sel:\n self.engine = db\n self.fields.Clear(True) \n self.fields = wx.BoxSizer(wx.VERTICAL)\n if self.engine.instructions:\n self.fields.Add(StaticText(self, -1, '\\n' + self.engine.instructions + '\\n\\n'))\n self.fieldset = dict()\n self.option = dict()\n saved_opts = get_saved_connection(self.engine.name)\n for opt in self.engine.required_opts:\n if opt[0] in saved_opts.keys():\n default = saved_opts[opt[0]]\n else:\n default = opt[2]\n self.fieldset[opt[0]] = wx.BoxSizer(wx.HORIZONTAL)\n label = StaticText(self, -1, opt[0] + \": \", \n size=wx.Size(90,35))\n style = wx.TE_PASSWORD if opt[0] == \"password\" else 0\n txt = TextCtrl(self, -1, str(default), size=wx.Size(200,-1), style=style)\n self.option[opt[0]] = txt\n self.fieldset[opt[0]].AddMany([label, \n self.option[opt[0]]])\n if opt[0] == \"file\":\n file_opt = opt\n def open_file_dialog(evt):\n filter = \"\"\n if file_opt[3]:\n filter = file_opt[3] + \"|\"\n filter += \"All files (*.*)|*.*\" \n dialog = wx.FileDialog(None, style = wx.OPEN,\n wildcard = filter)\n if dialog.ShowModal() == wx.ID_OK:\n self.option[file_opt[0]].SetValue(dialog.GetPath())\n self.browse = wx.Button(self, -1, \"Choose...\")\n self.fieldset[file_opt[0]].Add(self.browse)\n self.browse.Bind(wx.EVT_BUTTON, open_file_dialog) \n self.fieldset[opt[0]].Layout()\n self.fields.Add(self.fieldset[opt[0]])\n self.sizer.Add(self.fields)\n self.sizer.Layout()\n \n \nclass ConfirmPage(TitledPage):\n \"\"\"The final confirmation page.\"\"\"\n def __init__(self, parent, title, label):\n TitledPage.__init__(self, parent, title, label)\n self.parent = parent\n \n def Draw(self, evt):\n if not evt.GetDirection(): return\n \n self.values_dict = dict()\n connect = self.parent.CONNECTION\n for key in connect.option.keys():\n self.values_dict[key] = str(connect.option[key].Value)\n \n try:\n connect.engine.opts = self.values_dict\n connect.engine.connect(force_reconnect=True)\n message = '''<p><b>Success!</b> Your connection has been saved.</p>\n <p>Click Finish to continue.</p>'''\n save_connection(connect.engine.name, self.values_dict)\n \n except Exception as e:\n message = message = '''<p><b>Error.</b> There was a problem with your\n connection:</p><p>%s</p>\n <p>Click Back to try again, or Cancel.</p>''' % e\n \n btn = self.parent.FindWindowById(wx.ID_FORWARD) \n if btn: btn.Disable()\n \n self.message = HtmlWindow(self)\n self.message.SetSize((450,400))\n self.message.SetHtml(message)\n self.sizer.Add(self.message, 1, wx.EXPAND)\n self.sizer.Layout()\n", "path": "app/connect_wizard.py"}]} | 2,780 | 437 |
gh_patches_debug_20566 | rasdani/github-patches | git_diff | vispy__vispy-930 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Importing gloo should not automatically load the OpenGL library
I'm trying to run vispy on a headless server with docker, to use the ipynb webgl backend exclusively. I cannot `import vispy.gloo`:
```
File "/opt/conda/lib/python3.4/site-packages/vispy/gloo/__init__.py", line 47, in <module>
from . import gl # noqa
File "/opt/conda/lib/python3.4/site-packages/vispy/gloo/gl/__init__.py", line 213, in <module>
from . import gl2 as default_backend # noqa
File "/opt/conda/lib/python3.4/site-packages/vispy/gloo/gl/gl2.py", line 46, in <module>
raise RuntimeError('Could not load OpenGL library.')
RuntimeError: Could not load OpenGL library.
```
I should not need to have the OpenGL library on a headless server when using a remote backend.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `vispy/gloo/gl/gl2.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright (c) 2015, Vispy Development Team.
3 # Distributed under the (new) BSD License. See LICENSE.txt for more info.
4
5 """ GL ES 2.0 API implemented via desktop GL (i.e subset of normal OpenGL).
6 """
7
8 import os
9 import sys
10 import ctypes.util
11
12 from . import _copy_gl_functions
13 from ._constants import * # noqa
14
15 # Ctypes stuff
16
17
18 # Load the OpenGL library. We more or less follow the same approach
19 # as PyOpenGL does internally
20
21 _have_get_proc_address = False
22 _lib = os.getenv('VISPY_GL_LIB', '')
23 if _lib != '':
24 if sys.platform.startswith('win'):
25 _lib = ctypes.windll.LoadLibrary(_lib)
26 else:
27 _lib = ctypes.cdll.LoadLibrary(_lib)
28 elif sys.platform.startswith('win'):
29 # Windows
30 _lib = ctypes.windll.opengl32
31 try:
32 wglGetProcAddress = _lib.wglGetProcAddress
33 wglGetProcAddress.restype = ctypes.CFUNCTYPE(
34 ctypes.POINTER(ctypes.c_int))
35 wglGetProcAddress.argtypes = [ctypes.c_char_p]
36 _have_get_proc_address = True
37 except AttributeError:
38 pass
39 else:
40 # Unix-ish
41 if sys.platform.startswith('darwin'):
42 _fname = ctypes.util.find_library('OpenGL')
43 else:
44 _fname = ctypes.util.find_library('GL')
45 if not _fname:
46 raise RuntimeError('Could not load OpenGL library.')
47 # Load lib
48 _lib = ctypes.cdll.LoadLibrary(_fname)
49
50
51 def _have_context():
52 return _lib.glGetError() != 1282 # GL_INVALID_OPERATION
53
54
55 def _get_gl_version(_lib):
56 """Helper to get the GL version string"""
57 try:
58 return _lib.glGetString(7938).decode('utf-8')
59 except Exception:
60 return 'unknown'
61
62
63 def _get_gl_func(name, restype, argtypes):
64 # Based on a function in Pyglet
65 try:
66 # Try using normal ctypes stuff
67 func = getattr(_lib, name)
68 func.restype = restype
69 func.argtypes = argtypes
70 return func
71 except AttributeError:
72 if sys.platform.startswith('win'):
73 # Ask for a pointer to the function, this is the approach
74 # for OpenGL extensions on Windows
75 fargs = (restype,) + argtypes
76 ftype = ctypes.WINFUNCTYPE(*fargs)
77 if not _have_get_proc_address:
78 raise RuntimeError('Function %s not available '
79 '(OpenGL version is %s).'
80 % (name, _get_gl_version(_lib)))
81 if not _have_context():
82 raise RuntimeError('Using %s with no OpenGL context.' % name)
83 address = wglGetProcAddress(name.encode('utf-8'))
84 if address:
85 return ctypes.cast(address, ftype)
86 # If not Windows or if we did not return function object on Windows:
87 raise RuntimeError('Function %s not present in context '
88 '(OpenGL version is %s).'
89 % (name, _get_gl_version(_lib)))
90
91
92 # Inject
93
94 from . import _gl2 # noqa
95 _copy_gl_functions(_gl2, globals())
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/vispy/gloo/gl/gl2.py b/vispy/gloo/gl/gl2.py
--- a/vispy/gloo/gl/gl2.py
+++ b/vispy/gloo/gl/gl2.py
@@ -11,6 +11,7 @@
from . import _copy_gl_functions
from ._constants import * # noqa
+from ...util import logger
# Ctypes stuff
@@ -43,9 +44,11 @@
else:
_fname = ctypes.util.find_library('GL')
if not _fname:
- raise RuntimeError('Could not load OpenGL library.')
- # Load lib
- _lib = ctypes.cdll.LoadLibrary(_fname)
+ logger.warning('Could not load OpenGL library.')
+ _lib = None
+ else:
+ # Load lib
+ _lib = ctypes.cdll.LoadLibrary(_fname)
def _have_context():
@@ -62,6 +65,8 @@
def _get_gl_func(name, restype, argtypes):
# Based on a function in Pyglet
+ if _lib is None:
+ raise RuntimeError('Could not load OpenGL library, gl cannot be used')
try:
# Try using normal ctypes stuff
func = getattr(_lib, name)
| {"golden_diff": "diff --git a/vispy/gloo/gl/gl2.py b/vispy/gloo/gl/gl2.py\n--- a/vispy/gloo/gl/gl2.py\n+++ b/vispy/gloo/gl/gl2.py\n@@ -11,6 +11,7 @@\n \n from . import _copy_gl_functions\n from ._constants import * # noqa\n+from ...util import logger\n \n # Ctypes stuff\n \n@@ -43,9 +44,11 @@\n else:\n _fname = ctypes.util.find_library('GL')\n if not _fname:\n- raise RuntimeError('Could not load OpenGL library.')\n- # Load lib\n- _lib = ctypes.cdll.LoadLibrary(_fname)\n+ logger.warning('Could not load OpenGL library.')\n+ _lib = None\n+ else:\n+ # Load lib\n+ _lib = ctypes.cdll.LoadLibrary(_fname)\n \n \n def _have_context():\n@@ -62,6 +65,8 @@\n \n def _get_gl_func(name, restype, argtypes):\n # Based on a function in Pyglet\n+ if _lib is None:\n+ raise RuntimeError('Could not load OpenGL library, gl cannot be used')\n try:\n # Try using normal ctypes stuff\n func = getattr(_lib, name)\n", "issue": "Importing gloo should not automatically load the OpenGL library\nI'm trying to run vispy on a headless server with docker, to use the ipynb webgl backend exclusively. I cannot `import vispy.gloo`:\n\n```\n File \"/opt/conda/lib/python3.4/site-packages/vispy/gloo/__init__.py\", line 47, in <module>\n from . import gl # noqa\n File \"/opt/conda/lib/python3.4/site-packages/vispy/gloo/gl/__init__.py\", line 213, in <module>\n from . import gl2 as default_backend # noqa\n File \"/opt/conda/lib/python3.4/site-packages/vispy/gloo/gl/gl2.py\", line 46, in <module>\n raise RuntimeError('Could not load OpenGL library.')\nRuntimeError: Could not load OpenGL library.\n```\n\nI should not need to have the OpenGL library on a headless server when using a remote backend.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2015, Vispy Development Team.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\n\"\"\" GL ES 2.0 API implemented via desktop GL (i.e subset of normal OpenGL).\n\"\"\"\n\nimport os\nimport sys\nimport ctypes.util\n\nfrom . import _copy_gl_functions\nfrom ._constants import * # noqa\n\n# Ctypes stuff\n\n\n# Load the OpenGL library. We more or less follow the same approach\n# as PyOpenGL does internally\n\n_have_get_proc_address = False\n_lib = os.getenv('VISPY_GL_LIB', '')\nif _lib != '':\n if sys.platform.startswith('win'):\n _lib = ctypes.windll.LoadLibrary(_lib)\n else:\n _lib = ctypes.cdll.LoadLibrary(_lib)\nelif sys.platform.startswith('win'):\n # Windows\n _lib = ctypes.windll.opengl32\n try:\n wglGetProcAddress = _lib.wglGetProcAddress\n wglGetProcAddress.restype = ctypes.CFUNCTYPE(\n ctypes.POINTER(ctypes.c_int))\n wglGetProcAddress.argtypes = [ctypes.c_char_p]\n _have_get_proc_address = True\n except AttributeError:\n pass\nelse:\n # Unix-ish\n if sys.platform.startswith('darwin'):\n _fname = ctypes.util.find_library('OpenGL')\n else:\n _fname = ctypes.util.find_library('GL')\n if not _fname:\n raise RuntimeError('Could not load OpenGL library.')\n # Load lib\n _lib = ctypes.cdll.LoadLibrary(_fname)\n\n\ndef _have_context():\n return _lib.glGetError() != 1282 # GL_INVALID_OPERATION\n\n\ndef _get_gl_version(_lib):\n \"\"\"Helper to get the GL version string\"\"\"\n try:\n return _lib.glGetString(7938).decode('utf-8')\n except Exception:\n return 'unknown'\n\n\ndef _get_gl_func(name, restype, argtypes):\n # Based on a function in Pyglet\n try:\n # Try using normal ctypes stuff\n func = getattr(_lib, name)\n func.restype = restype\n func.argtypes = argtypes\n return func\n except AttributeError:\n if sys.platform.startswith('win'):\n # Ask for a pointer to the function, this is the approach\n # for OpenGL extensions on Windows\n fargs = (restype,) + argtypes\n ftype = ctypes.WINFUNCTYPE(*fargs)\n if not _have_get_proc_address:\n raise RuntimeError('Function %s not available '\n '(OpenGL version is %s).'\n % (name, _get_gl_version(_lib)))\n if not _have_context():\n raise RuntimeError('Using %s with no OpenGL context.' % name)\n address = wglGetProcAddress(name.encode('utf-8'))\n if address:\n return ctypes.cast(address, ftype)\n # If not Windows or if we did not return function object on Windows:\n raise RuntimeError('Function %s not present in context '\n '(OpenGL version is %s).'\n % (name, _get_gl_version(_lib)))\n\n\n# Inject\n\nfrom . import _gl2 # noqa\n_copy_gl_functions(_gl2, globals())\n", "path": "vispy/gloo/gl/gl2.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2015, Vispy Development Team.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\n\"\"\" GL ES 2.0 API implemented via desktop GL (i.e subset of normal OpenGL).\n\"\"\"\n\nimport os\nimport sys\nimport ctypes.util\n\nfrom . import _copy_gl_functions\nfrom ._constants import * # noqa\nfrom ...util import logger\n\n# Ctypes stuff\n\n\n# Load the OpenGL library. We more or less follow the same approach\n# as PyOpenGL does internally\n\n_have_get_proc_address = False\n_lib = os.getenv('VISPY_GL_LIB', '')\nif _lib != '':\n if sys.platform.startswith('win'):\n _lib = ctypes.windll.LoadLibrary(_lib)\n else:\n _lib = ctypes.cdll.LoadLibrary(_lib)\nelif sys.platform.startswith('win'):\n # Windows\n _lib = ctypes.windll.opengl32\n try:\n wglGetProcAddress = _lib.wglGetProcAddress\n wglGetProcAddress.restype = ctypes.CFUNCTYPE(\n ctypes.POINTER(ctypes.c_int))\n wglGetProcAddress.argtypes = [ctypes.c_char_p]\n _have_get_proc_address = True\n except AttributeError:\n pass\nelse:\n # Unix-ish\n if sys.platform.startswith('darwin'):\n _fname = ctypes.util.find_library('OpenGL')\n else:\n _fname = ctypes.util.find_library('GL')\n if not _fname:\n logger.warning('Could not load OpenGL library.')\n _lib = None\n else:\n # Load lib\n _lib = ctypes.cdll.LoadLibrary(_fname)\n\n\ndef _have_context():\n return _lib.glGetError() != 1282 # GL_INVALID_OPERATION\n\n\ndef _get_gl_version(_lib):\n \"\"\"Helper to get the GL version string\"\"\"\n try:\n return _lib.glGetString(7938).decode('utf-8')\n except Exception:\n return 'unknown'\n\n\ndef _get_gl_func(name, restype, argtypes):\n # Based on a function in Pyglet\n if _lib is None:\n raise RuntimeError('Could not load OpenGL library, gl cannot be used')\n try:\n # Try using normal ctypes stuff\n func = getattr(_lib, name)\n func.restype = restype\n func.argtypes = argtypes\n return func\n except AttributeError:\n if sys.platform.startswith('win'):\n # Ask for a pointer to the function, this is the approach\n # for OpenGL extensions on Windows\n fargs = (restype,) + argtypes\n ftype = ctypes.WINFUNCTYPE(*fargs)\n if not _have_get_proc_address:\n raise RuntimeError('Function %s not available '\n '(OpenGL version is %s).'\n % (name, _get_gl_version(_lib)))\n if not _have_context():\n raise RuntimeError('Using %s with no OpenGL context.' % name)\n address = wglGetProcAddress(name.encode('utf-8'))\n if address:\n return ctypes.cast(address, ftype)\n # If not Windows or if we did not return function object on Windows:\n raise RuntimeError('Function %s not present in context '\n '(OpenGL version is %s).'\n % (name, _get_gl_version(_lib)))\n\n\n# Inject\n\nfrom . import _gl2 # noqa\n_copy_gl_functions(_gl2, globals())\n", "path": "vispy/gloo/gl/gl2.py"}]} | 1,365 | 281 |
gh_patches_debug_5755 | rasdani/github-patches | git_diff | pypi__warehouse-3820 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Expose long_description_content_type in JSON API
Feature request: Add 'long_description_content_type' to JSON API in 'info': section, alongside the existing 'description'
Now that PyPI supports Markdown and more than one kind of content type for long descriptions, it would be helpful to be able to see what the content type is with the JSON API. Otherwise it is more difficult to correctly interpret the 'description', since it isn't immediately obvious if it's reStructuredText or Markdown with which variant.
I'm willing to try this if I'm pointed in the right general direction :) If someone else can get it done trivially that's fine too.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/legacy/api/json.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 from pyramid.httpexceptions import HTTPMovedPermanently, HTTPNotFound
14 from pyramid.view import view_config
15 from sqlalchemy.orm import Load
16 from sqlalchemy.orm.exc import NoResultFound
17
18 from warehouse.cache.http import cache_control
19 from warehouse.cache.origin import origin_cache
20 from warehouse.packaging.models import File, Release, Project
21
22
23 # Generate appropriate CORS headers for the JSON endpoint.
24 # We want to allow Cross-Origin requests here so that users can interact
25 # with these endpoints via XHR/Fetch APIs in the browser.
26 _CORS_HEADERS = {
27 "Access-Control-Allow-Origin": "*",
28 "Access-Control-Allow-Headers": ", ".join([
29 "Content-Type",
30 "If-Match",
31 "If-Modified-Since",
32 "If-None-Match",
33 "If-Unmodified-Since",
34 ]),
35 "Access-Control-Allow-Methods": "GET",
36 "Access-Control-Max-Age": "86400", # 1 day.
37 "Access-Control-Expose-Headers": ", ".join([
38 "X-PyPI-Last-Serial",
39 ]),
40 }
41
42
43 @view_config(
44 route_name="legacy.api.json.project",
45 context=Project,
46 renderer="json",
47 decorator=[
48 cache_control(15 * 60), # 15 minutes
49 origin_cache(
50 1 * 24 * 60 * 60, # 1 day
51 stale_while_revalidate=5 * 60, # 5 minutes
52 stale_if_error=1 * 24 * 60 * 60, # 1 day
53 ),
54 ],
55 )
56 def json_project(project, request):
57 if project.name != request.matchdict.get("name", project.name):
58 return HTTPMovedPermanently(
59 request.current_route_path(name=project.name),
60 headers=_CORS_HEADERS
61 )
62
63 try:
64 release = (
65 request.db.query(Release)
66 .filter(Release.project == project)
67 .order_by(
68 Release.is_prerelease.nullslast(),
69 Release._pypi_ordering.desc())
70 .limit(1)
71 .one()
72 )
73 except NoResultFound:
74 return HTTPNotFound(headers=_CORS_HEADERS)
75
76 return json_release(release, request)
77
78
79 @view_config(
80 route_name="legacy.api.json.release",
81 context=Release,
82 renderer="json",
83 decorator=[
84 cache_control(15 * 60), # 15 minutes
85 origin_cache(
86 1 * 24 * 60 * 60, # 1 day
87 stale_while_revalidate=5 * 60, # 5 minutes
88 stale_if_error=1 * 24 * 60 * 60, # 1 day
89 ),
90 ],
91 )
92 def json_release(release, request):
93 project = release.project
94
95 if project.name != request.matchdict.get("name", project.name):
96 return HTTPMovedPermanently(
97 request.current_route_path(name=project.name),
98 headers=_CORS_HEADERS
99 )
100
101 # Apply CORS headers.
102 request.response.headers.update(_CORS_HEADERS)
103
104 # Get the latest serial number for this project.
105 request.response.headers["X-PyPI-Last-Serial"] = str(project.last_serial)
106
107 # Get all of the releases and files for this project.
108 release_files = (
109 request.db.query(Release, File)
110 .options(Load(Release).load_only('version'))
111 .outerjoin(File)
112 .filter(Release.project == project)
113 .order_by(Release._pypi_ordering.desc(), File.filename)
114 .all()
115 )
116
117 # Map our releases + files into a dictionary that maps each release to a
118 # list of all its files.
119 releases = {}
120 for r, file_ in release_files:
121 files = releases.setdefault(r, [])
122 if file_ is not None:
123 files.append(file_)
124
125 # Serialize our database objects to match the way that PyPI legacy
126 # presented this data.
127 releases = {
128 r.version: [
129 {
130 "filename": f.filename,
131 "packagetype": f.packagetype,
132 "python_version": f.python_version,
133 "has_sig": f.has_signature,
134 "comment_text": f.comment_text,
135 "md5_digest": f.md5_digest,
136 "digests": {
137 "md5": f.md5_digest,
138 "sha256": f.sha256_digest,
139 },
140 "size": f.size,
141 # TODO: Remove this once we've had a long enough time with it
142 # here to consider it no longer in use.
143 "downloads": -1,
144 "upload_time": f.upload_time.strftime("%Y-%m-%dT%H:%M:%S"),
145 "url": request.route_url("packaging.file", path=f.path),
146 }
147 for f in fs
148 ]
149 for r, fs in releases.items()
150 }
151
152 return {
153 "info": {
154 "name": project.name,
155 "version": release.version,
156 "summary": release.summary,
157 "description": release.description,
158 "keywords": release.keywords,
159 "license": release.license,
160 "classifiers": list(release.classifiers),
161 "author": release.author,
162 "author_email": release.author_email,
163 "maintainer": release.maintainer,
164 "maintainer_email": release.maintainer_email,
165 "requires_python": release.requires_python,
166 "platform": release.platform,
167 "downloads": {
168 "last_day": -1,
169 "last_week": -1,
170 "last_month": -1,
171 },
172 "package_url": request.route_url(
173 "packaging.project",
174 name=project.name,
175 ),
176 "project_url": request.route_url(
177 "packaging.project",
178 name=project.name,
179 ),
180 "release_url": request.route_url(
181 "packaging.release",
182 name=project.name,
183 version=release.version,
184 ),
185 "requires_dist": (list(release.requires_dist)
186 if release.requires_dist else None),
187 "docs_url": project.documentation_url,
188 "bugtrack_url": project.bugtrack_url,
189 "home_page": release.home_page,
190 "download_url": release.download_url,
191 },
192 "urls": releases[release.version],
193 "releases": releases,
194 "last_serial": project.last_serial,
195 }
196
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/warehouse/legacy/api/json.py b/warehouse/legacy/api/json.py
--- a/warehouse/legacy/api/json.py
+++ b/warehouse/legacy/api/json.py
@@ -154,6 +154,7 @@
"name": project.name,
"version": release.version,
"summary": release.summary,
+ "description_content_type": release.description_content_type,
"description": release.description,
"keywords": release.keywords,
"license": release.license,
| {"golden_diff": "diff --git a/warehouse/legacy/api/json.py b/warehouse/legacy/api/json.py\n--- a/warehouse/legacy/api/json.py\n+++ b/warehouse/legacy/api/json.py\n@@ -154,6 +154,7 @@\n \"name\": project.name,\n \"version\": release.version,\n \"summary\": release.summary,\n+ \"description_content_type\": release.description_content_type,\n \"description\": release.description,\n \"keywords\": release.keywords,\n \"license\": release.license,\n", "issue": "Expose long_description_content_type in JSON API\nFeature request: Add 'long_description_content_type' to JSON API in 'info': section, alongside the existing 'description'\r\n\r\nNow that PyPI supports Markdown and more than one kind of content type for long descriptions, it would be helpful to be able to see what the content type is with the JSON API. Otherwise it is more difficult to correctly interpret the 'description', since it isn't immediately obvious if it's reStructuredText or Markdown with which variant.\r\n\r\nI'm willing to try this if I'm pointed in the right general direction :) If someone else can get it done trivially that's fine too.\r\n\r\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom pyramid.httpexceptions import HTTPMovedPermanently, HTTPNotFound\nfrom pyramid.view import view_config\nfrom sqlalchemy.orm import Load\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom warehouse.cache.http import cache_control\nfrom warehouse.cache.origin import origin_cache\nfrom warehouse.packaging.models import File, Release, Project\n\n\n# Generate appropriate CORS headers for the JSON endpoint.\n# We want to allow Cross-Origin requests here so that users can interact\n# with these endpoints via XHR/Fetch APIs in the browser.\n_CORS_HEADERS = {\n \"Access-Control-Allow-Origin\": \"*\",\n \"Access-Control-Allow-Headers\": \", \".join([\n \"Content-Type\",\n \"If-Match\",\n \"If-Modified-Since\",\n \"If-None-Match\",\n \"If-Unmodified-Since\",\n ]),\n \"Access-Control-Allow-Methods\": \"GET\",\n \"Access-Control-Max-Age\": \"86400\", # 1 day.\n \"Access-Control-Expose-Headers\": \", \".join([\n \"X-PyPI-Last-Serial\",\n ]),\n}\n\n\n@view_config(\n route_name=\"legacy.api.json.project\",\n context=Project,\n renderer=\"json\",\n decorator=[\n cache_control(15 * 60), # 15 minutes\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=5 * 60, # 5 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef json_project(project, request):\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.name),\n headers=_CORS_HEADERS\n )\n\n try:\n release = (\n request.db.query(Release)\n .filter(Release.project == project)\n .order_by(\n Release.is_prerelease.nullslast(),\n Release._pypi_ordering.desc())\n .limit(1)\n .one()\n )\n except NoResultFound:\n return HTTPNotFound(headers=_CORS_HEADERS)\n\n return json_release(release, request)\n\n\n@view_config(\n route_name=\"legacy.api.json.release\",\n context=Release,\n renderer=\"json\",\n decorator=[\n cache_control(15 * 60), # 15 minutes\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=5 * 60, # 5 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef json_release(release, request):\n project = release.project\n\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.name),\n headers=_CORS_HEADERS\n )\n\n # Apply CORS headers.\n request.response.headers.update(_CORS_HEADERS)\n\n # Get the latest serial number for this project.\n request.response.headers[\"X-PyPI-Last-Serial\"] = str(project.last_serial)\n\n # Get all of the releases and files for this project.\n release_files = (\n request.db.query(Release, File)\n .options(Load(Release).load_only('version'))\n .outerjoin(File)\n .filter(Release.project == project)\n .order_by(Release._pypi_ordering.desc(), File.filename)\n .all()\n )\n\n # Map our releases + files into a dictionary that maps each release to a\n # list of all its files.\n releases = {}\n for r, file_ in release_files:\n files = releases.setdefault(r, [])\n if file_ is not None:\n files.append(file_)\n\n # Serialize our database objects to match the way that PyPI legacy\n # presented this data.\n releases = {\n r.version: [\n {\n \"filename\": f.filename,\n \"packagetype\": f.packagetype,\n \"python_version\": f.python_version,\n \"has_sig\": f.has_signature,\n \"comment_text\": f.comment_text,\n \"md5_digest\": f.md5_digest,\n \"digests\": {\n \"md5\": f.md5_digest,\n \"sha256\": f.sha256_digest,\n },\n \"size\": f.size,\n # TODO: Remove this once we've had a long enough time with it\n # here to consider it no longer in use.\n \"downloads\": -1,\n \"upload_time\": f.upload_time.strftime(\"%Y-%m-%dT%H:%M:%S\"),\n \"url\": request.route_url(\"packaging.file\", path=f.path),\n }\n for f in fs\n ]\n for r, fs in releases.items()\n }\n\n return {\n \"info\": {\n \"name\": project.name,\n \"version\": release.version,\n \"summary\": release.summary,\n \"description\": release.description,\n \"keywords\": release.keywords,\n \"license\": release.license,\n \"classifiers\": list(release.classifiers),\n \"author\": release.author,\n \"author_email\": release.author_email,\n \"maintainer\": release.maintainer,\n \"maintainer_email\": release.maintainer_email,\n \"requires_python\": release.requires_python,\n \"platform\": release.platform,\n \"downloads\": {\n \"last_day\": -1,\n \"last_week\": -1,\n \"last_month\": -1,\n },\n \"package_url\": request.route_url(\n \"packaging.project\",\n name=project.name,\n ),\n \"project_url\": request.route_url(\n \"packaging.project\",\n name=project.name,\n ),\n \"release_url\": request.route_url(\n \"packaging.release\",\n name=project.name,\n version=release.version,\n ),\n \"requires_dist\": (list(release.requires_dist)\n if release.requires_dist else None),\n \"docs_url\": project.documentation_url,\n \"bugtrack_url\": project.bugtrack_url,\n \"home_page\": release.home_page,\n \"download_url\": release.download_url,\n },\n \"urls\": releases[release.version],\n \"releases\": releases,\n \"last_serial\": project.last_serial,\n }\n", "path": "warehouse/legacy/api/json.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom pyramid.httpexceptions import HTTPMovedPermanently, HTTPNotFound\nfrom pyramid.view import view_config\nfrom sqlalchemy.orm import Load\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom warehouse.cache.http import cache_control\nfrom warehouse.cache.origin import origin_cache\nfrom warehouse.packaging.models import File, Release, Project\n\n\n# Generate appropriate CORS headers for the JSON endpoint.\n# We want to allow Cross-Origin requests here so that users can interact\n# with these endpoints via XHR/Fetch APIs in the browser.\n_CORS_HEADERS = {\n \"Access-Control-Allow-Origin\": \"*\",\n \"Access-Control-Allow-Headers\": \", \".join([\n \"Content-Type\",\n \"If-Match\",\n \"If-Modified-Since\",\n \"If-None-Match\",\n \"If-Unmodified-Since\",\n ]),\n \"Access-Control-Allow-Methods\": \"GET\",\n \"Access-Control-Max-Age\": \"86400\", # 1 day.\n \"Access-Control-Expose-Headers\": \", \".join([\n \"X-PyPI-Last-Serial\",\n ]),\n}\n\n\n@view_config(\n route_name=\"legacy.api.json.project\",\n context=Project,\n renderer=\"json\",\n decorator=[\n cache_control(15 * 60), # 15 minutes\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=5 * 60, # 5 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef json_project(project, request):\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.name),\n headers=_CORS_HEADERS\n )\n\n try:\n release = (\n request.db.query(Release)\n .filter(Release.project == project)\n .order_by(\n Release.is_prerelease.nullslast(),\n Release._pypi_ordering.desc())\n .limit(1)\n .one()\n )\n except NoResultFound:\n return HTTPNotFound(headers=_CORS_HEADERS)\n\n return json_release(release, request)\n\n\n@view_config(\n route_name=\"legacy.api.json.release\",\n context=Release,\n renderer=\"json\",\n decorator=[\n cache_control(15 * 60), # 15 minutes\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=5 * 60, # 5 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n ],\n)\ndef json_release(release, request):\n project = release.project\n\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.name),\n headers=_CORS_HEADERS\n )\n\n # Apply CORS headers.\n request.response.headers.update(_CORS_HEADERS)\n\n # Get the latest serial number for this project.\n request.response.headers[\"X-PyPI-Last-Serial\"] = str(project.last_serial)\n\n # Get all of the releases and files for this project.\n release_files = (\n request.db.query(Release, File)\n .options(Load(Release).load_only('version'))\n .outerjoin(File)\n .filter(Release.project == project)\n .order_by(Release._pypi_ordering.desc(), File.filename)\n .all()\n )\n\n # Map our releases + files into a dictionary that maps each release to a\n # list of all its files.\n releases = {}\n for r, file_ in release_files:\n files = releases.setdefault(r, [])\n if file_ is not None:\n files.append(file_)\n\n # Serialize our database objects to match the way that PyPI legacy\n # presented this data.\n releases = {\n r.version: [\n {\n \"filename\": f.filename,\n \"packagetype\": f.packagetype,\n \"python_version\": f.python_version,\n \"has_sig\": f.has_signature,\n \"comment_text\": f.comment_text,\n \"md5_digest\": f.md5_digest,\n \"digests\": {\n \"md5\": f.md5_digest,\n \"sha256\": f.sha256_digest,\n },\n \"size\": f.size,\n # TODO: Remove this once we've had a long enough time with it\n # here to consider it no longer in use.\n \"downloads\": -1,\n \"upload_time\": f.upload_time.strftime(\"%Y-%m-%dT%H:%M:%S\"),\n \"url\": request.route_url(\"packaging.file\", path=f.path),\n }\n for f in fs\n ]\n for r, fs in releases.items()\n }\n\n return {\n \"info\": {\n \"name\": project.name,\n \"version\": release.version,\n \"summary\": release.summary,\n \"description_content_type\": release.description_content_type,\n \"description\": release.description,\n \"keywords\": release.keywords,\n \"license\": release.license,\n \"classifiers\": list(release.classifiers),\n \"author\": release.author,\n \"author_email\": release.author_email,\n \"maintainer\": release.maintainer,\n \"maintainer_email\": release.maintainer_email,\n \"requires_python\": release.requires_python,\n \"platform\": release.platform,\n \"downloads\": {\n \"last_day\": -1,\n \"last_week\": -1,\n \"last_month\": -1,\n },\n \"package_url\": request.route_url(\n \"packaging.project\",\n name=project.name,\n ),\n \"project_url\": request.route_url(\n \"packaging.project\",\n name=project.name,\n ),\n \"release_url\": request.route_url(\n \"packaging.release\",\n name=project.name,\n version=release.version,\n ),\n \"requires_dist\": (list(release.requires_dist)\n if release.requires_dist else None),\n \"docs_url\": project.documentation_url,\n \"bugtrack_url\": project.bugtrack_url,\n \"home_page\": release.home_page,\n \"download_url\": release.download_url,\n },\n \"urls\": releases[release.version],\n \"releases\": releases,\n \"last_serial\": project.last_serial,\n }\n", "path": "warehouse/legacy/api/json.py"}]} | 2,373 | 109 |
gh_patches_debug_29995 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-1360 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Petsmart Canada
https://www.petsmart.ca/store-locator/all/
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/petsmart.py`
Content:
```
1 import datetime
2 import re
3 import scrapy
4 from locations.items import GeojsonPointItem
5 from locations.hours import OpeningHours
6
7 day_mapping = {'MON': 'Mo','TUE': 'Tu','WED': 'We','THU': 'Th',
8 'FRI': 'Fr','SAT': 'Sa','SUN': 'Su'}
9
10 def convert_24hour(time):
11 """
12 Takes 12 hour time as a string and converts it to 24 hour time.
13 """
14
15 if len(time[:-2].split(':')) < 2:
16 hour = time[:-2]
17 minute = '00'
18 else:
19 hour, minute = time[:-2].split(':')
20
21 if time[-2:] == 'AM':
22 time_formatted = hour + ':' + minute
23 elif time[-2:] == 'PM':
24 time_formatted = str(int(hour)+ 12) + ':' + minute
25
26 if time_formatted in ['24:00','0:00','00:00']:
27 time_formatted = '23:59'
28
29 return time_formatted
30
31 class PetSmartSpider(scrapy.Spider):
32 download_delay = 0.2
33 name = "petsmart"
34 item_attributes = { 'brand': "Petsmart" }
35 allowed_domains = ["petsmart.com"]
36 start_urls = (
37 'https://www.petsmart.com/store-locator/all/',
38 )
39
40 def parse(self, response):
41 state_urls = response.xpath('//li[@class="col-sm-12 col-md-4"]/a/@href').extract()
42 is_store_details_urls = response.xpath('//a[@class="store-details-link"]/@href').extract()
43
44 if not state_urls and is_store_details_urls:
45 for url in is_store_details_urls:
46 yield scrapy.Request(response.urljoin(url), callback=self.parse_store)
47 else:
48 for url in state_urls:
49 yield scrapy.Request(response.urljoin(url))
50
51 def parse_store(self, response):
52 ref = re.search(r'.+/?\?(.+)', response.url).group(1)
53
54 properties = {
55 'name': response.xpath('//span[@itemprop="name"]/text()').extract_first().strip(),
56 'addr_full': response.xpath('//div[@itemprop="streetAddress"]/text()').extract_first(),
57 'city': response.xpath('//span[@itemprop="addressLocality"][1]/text()').extract_first().title(),
58 'state': response.xpath('//span[@itemprop="addressLocality"][2]/text()').extract_first(),
59 'postcode': response.xpath('//span[@itemprop="postalCode"]/text()').extract_first(),
60 'lat': float(response.xpath('//input[@name="storeLatitudeVal"]/@value').extract_first()),
61 'lon': float(response.xpath('//input[@name="storeLongitudeVal"]/@value').extract_first()),
62 'phone': response.xpath('//a[@class="store-contact-info"]/text()').extract_first(),
63 'ref': ref,
64 'website': response.url
65 }
66
67 hours = self.parse_hours(response.xpath('//div[@class="store-detail-address"]'))
68
69 if hours:
70 properties['opening_hours'] = hours
71
72 yield GeojsonPointItem(**properties)
73
74 def parse_hours(self, elements):
75 opening_hours = OpeningHours()
76
77 days = elements.xpath('//span[@itemprop="dayOfWeek"]/text()').extract()
78 today = (set(day_mapping) - set(days)).pop()
79 days.remove('TODAY')
80 days.insert(0,today)
81 open_hours = elements.xpath('//div[@class="store-hours"]/time[@itemprop="opens"]/@content').extract()
82 close_hours = elements.xpath('//div[@class="store-hours"]/time[@itemprop="closes"]/@content').extract()
83
84 store_hours = dict((z[0],list(z[1:])) for z in zip(days, open_hours, close_hours))
85
86 for day, hours in store_hours.items():
87 if 'CLOSED' in hours:
88 continue
89 opening_hours.add_range(day=day_mapping[day],
90 open_time=convert_24hour(hours[0]),
91 close_time=convert_24hour(hours[1]))
92 return opening_hours.as_opening_hours()
93
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/petsmart.py b/locations/spiders/petsmart.py
--- a/locations/spiders/petsmart.py
+++ b/locations/spiders/petsmart.py
@@ -32,9 +32,10 @@
download_delay = 0.2
name = "petsmart"
item_attributes = { 'brand': "Petsmart" }
- allowed_domains = ["petsmart.com"]
+ allowed_domains = ["petsmart.com", "petsmart.ca"]
start_urls = (
'https://www.petsmart.com/store-locator/all/',
+ 'https://www.petsmart.ca/store-locator/all/'
)
def parse(self, response):
@@ -50,6 +51,10 @@
def parse_store(self, response):
ref = re.search(r'.+/?\?(.+)', response.url).group(1)
+ if 'petsmart.ca' in response.url:
+ country = 'CA'
+ elif 'petsmart.com' in response.url:
+ country = 'US'
properties = {
'name': response.xpath('//span[@itemprop="name"]/text()').extract_first().strip(),
@@ -60,6 +65,7 @@
'lat': float(response.xpath('//input[@name="storeLatitudeVal"]/@value').extract_first()),
'lon': float(response.xpath('//input[@name="storeLongitudeVal"]/@value').extract_first()),
'phone': response.xpath('//a[@class="store-contact-info"]/text()').extract_first(),
+ 'country': country,
'ref': ref,
'website': response.url
}
| {"golden_diff": "diff --git a/locations/spiders/petsmart.py b/locations/spiders/petsmart.py\n--- a/locations/spiders/petsmart.py\n+++ b/locations/spiders/petsmart.py\n@@ -32,9 +32,10 @@\n download_delay = 0.2\n name = \"petsmart\"\n item_attributes = { 'brand': \"Petsmart\" }\n- allowed_domains = [\"petsmart.com\"]\n+ allowed_domains = [\"petsmart.com\", \"petsmart.ca\"]\n start_urls = (\n 'https://www.petsmart.com/store-locator/all/',\n+ 'https://www.petsmart.ca/store-locator/all/'\n )\n \n def parse(self, response):\n@@ -50,6 +51,10 @@\n \n def parse_store(self, response):\n ref = re.search(r'.+/?\\?(.+)', response.url).group(1)\n+ if 'petsmart.ca' in response.url:\n+ country = 'CA'\n+ elif 'petsmart.com' in response.url:\n+ country = 'US'\n \n properties = {\n 'name': response.xpath('//span[@itemprop=\"name\"]/text()').extract_first().strip(),\n@@ -60,6 +65,7 @@\n 'lat': float(response.xpath('//input[@name=\"storeLatitudeVal\"]/@value').extract_first()),\n 'lon': float(response.xpath('//input[@name=\"storeLongitudeVal\"]/@value').extract_first()),\n 'phone': response.xpath('//a[@class=\"store-contact-info\"]/text()').extract_first(),\n+ 'country': country,\n 'ref': ref,\n 'website': response.url\n }\n", "issue": "Petsmart Canada\nhttps://www.petsmart.ca/store-locator/all/\n", "before_files": [{"content": "import datetime\nimport re\nimport scrapy\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\nday_mapping = {'MON': 'Mo','TUE': 'Tu','WED': 'We','THU': 'Th',\n 'FRI': 'Fr','SAT': 'Sa','SUN': 'Su'}\n\ndef convert_24hour(time):\n \"\"\"\n Takes 12 hour time as a string and converts it to 24 hour time.\n \"\"\"\n\n if len(time[:-2].split(':')) < 2:\n hour = time[:-2]\n minute = '00'\n else:\n hour, minute = time[:-2].split(':')\n\n if time[-2:] == 'AM':\n time_formatted = hour + ':' + minute\n elif time[-2:] == 'PM':\n time_formatted = str(int(hour)+ 12) + ':' + minute\n\n if time_formatted in ['24:00','0:00','00:00']:\n time_formatted = '23:59'\n\n return time_formatted\n\nclass PetSmartSpider(scrapy.Spider):\n download_delay = 0.2\n name = \"petsmart\"\n item_attributes = { 'brand': \"Petsmart\" }\n allowed_domains = [\"petsmart.com\"]\n start_urls = (\n 'https://www.petsmart.com/store-locator/all/',\n )\n\n def parse(self, response):\n state_urls = response.xpath('//li[@class=\"col-sm-12 col-md-4\"]/a/@href').extract()\n is_store_details_urls = response.xpath('//a[@class=\"store-details-link\"]/@href').extract()\n\n if not state_urls and is_store_details_urls:\n for url in is_store_details_urls:\n yield scrapy.Request(response.urljoin(url), callback=self.parse_store)\n else:\n for url in state_urls:\n yield scrapy.Request(response.urljoin(url))\n\n def parse_store(self, response):\n ref = re.search(r'.+/?\\?(.+)', response.url).group(1)\n\n properties = {\n 'name': response.xpath('//span[@itemprop=\"name\"]/text()').extract_first().strip(),\n 'addr_full': response.xpath('//div[@itemprop=\"streetAddress\"]/text()').extract_first(),\n 'city': response.xpath('//span[@itemprop=\"addressLocality\"][1]/text()').extract_first().title(),\n 'state': response.xpath('//span[@itemprop=\"addressLocality\"][2]/text()').extract_first(),\n 'postcode': response.xpath('//span[@itemprop=\"postalCode\"]/text()').extract_first(),\n 'lat': float(response.xpath('//input[@name=\"storeLatitudeVal\"]/@value').extract_first()),\n 'lon': float(response.xpath('//input[@name=\"storeLongitudeVal\"]/@value').extract_first()),\n 'phone': response.xpath('//a[@class=\"store-contact-info\"]/text()').extract_first(),\n 'ref': ref,\n 'website': response.url\n }\n\n hours = self.parse_hours(response.xpath('//div[@class=\"store-detail-address\"]'))\n\n if hours:\n properties['opening_hours'] = hours\n\n yield GeojsonPointItem(**properties)\n\n def parse_hours(self, elements):\n opening_hours = OpeningHours()\n\n days = elements.xpath('//span[@itemprop=\"dayOfWeek\"]/text()').extract()\n today = (set(day_mapping) - set(days)).pop()\n days.remove('TODAY')\n days.insert(0,today)\n open_hours = elements.xpath('//div[@class=\"store-hours\"]/time[@itemprop=\"opens\"]/@content').extract()\n close_hours = elements.xpath('//div[@class=\"store-hours\"]/time[@itemprop=\"closes\"]/@content').extract()\n\n store_hours = dict((z[0],list(z[1:])) for z in zip(days, open_hours, close_hours))\n\n for day, hours in store_hours.items():\n if 'CLOSED' in hours:\n continue\n opening_hours.add_range(day=day_mapping[day],\n open_time=convert_24hour(hours[0]),\n close_time=convert_24hour(hours[1]))\n return opening_hours.as_opening_hours()\n", "path": "locations/spiders/petsmart.py"}], "after_files": [{"content": "import datetime\nimport re\nimport scrapy\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\nday_mapping = {'MON': 'Mo','TUE': 'Tu','WED': 'We','THU': 'Th',\n 'FRI': 'Fr','SAT': 'Sa','SUN': 'Su'}\n\ndef convert_24hour(time):\n \"\"\"\n Takes 12 hour time as a string and converts it to 24 hour time.\n \"\"\"\n\n if len(time[:-2].split(':')) < 2:\n hour = time[:-2]\n minute = '00'\n else:\n hour, minute = time[:-2].split(':')\n\n if time[-2:] == 'AM':\n time_formatted = hour + ':' + minute\n elif time[-2:] == 'PM':\n time_formatted = str(int(hour)+ 12) + ':' + minute\n\n if time_formatted in ['24:00','0:00','00:00']:\n time_formatted = '23:59'\n\n return time_formatted\n\nclass PetSmartSpider(scrapy.Spider):\n download_delay = 0.2\n name = \"petsmart\"\n item_attributes = { 'brand': \"Petsmart\" }\n allowed_domains = [\"petsmart.com\", \"petsmart.ca\"]\n start_urls = (\n 'https://www.petsmart.com/store-locator/all/',\n 'https://www.petsmart.ca/store-locator/all/'\n )\n\n def parse(self, response):\n state_urls = response.xpath('//li[@class=\"col-sm-12 col-md-4\"]/a/@href').extract()\n is_store_details_urls = response.xpath('//a[@class=\"store-details-link\"]/@href').extract()\n\n if not state_urls and is_store_details_urls:\n for url in is_store_details_urls:\n yield scrapy.Request(response.urljoin(url), callback=self.parse_store)\n else:\n for url in state_urls:\n yield scrapy.Request(response.urljoin(url))\n\n def parse_store(self, response):\n ref = re.search(r'.+/?\\?(.+)', response.url).group(1)\n if 'petsmart.ca' in response.url:\n country = 'CA'\n elif 'petsmart.com' in response.url:\n country = 'US'\n\n properties = {\n 'name': response.xpath('//span[@itemprop=\"name\"]/text()').extract_first().strip(),\n 'addr_full': response.xpath('//div[@itemprop=\"streetAddress\"]/text()').extract_first(),\n 'city': response.xpath('//span[@itemprop=\"addressLocality\"][1]/text()').extract_first().title(),\n 'state': response.xpath('//span[@itemprop=\"addressLocality\"][2]/text()').extract_first(),\n 'postcode': response.xpath('//span[@itemprop=\"postalCode\"]/text()').extract_first(),\n 'lat': float(response.xpath('//input[@name=\"storeLatitudeVal\"]/@value').extract_first()),\n 'lon': float(response.xpath('//input[@name=\"storeLongitudeVal\"]/@value').extract_first()),\n 'phone': response.xpath('//a[@class=\"store-contact-info\"]/text()').extract_first(),\n 'country': country,\n 'ref': ref,\n 'website': response.url\n }\n\n hours = self.parse_hours(response.xpath('//div[@class=\"store-detail-address\"]'))\n\n if hours:\n properties['opening_hours'] = hours\n\n yield GeojsonPointItem(**properties)\n\n def parse_hours(self, elements):\n opening_hours = OpeningHours()\n\n days = elements.xpath('//span[@itemprop=\"dayOfWeek\"]/text()').extract()\n today = (set(day_mapping) - set(days)).pop()\n days.remove('TODAY')\n days.insert(0,today)\n open_hours = elements.xpath('//div[@class=\"store-hours\"]/time[@itemprop=\"opens\"]/@content').extract()\n close_hours = elements.xpath('//div[@class=\"store-hours\"]/time[@itemprop=\"closes\"]/@content').extract()\n\n store_hours = dict((z[0],list(z[1:])) for z in zip(days, open_hours, close_hours))\n\n for day, hours in store_hours.items():\n if 'CLOSED' in hours:\n continue\n opening_hours.add_range(day=day_mapping[day],\n open_time=convert_24hour(hours[0]),\n close_time=convert_24hour(hours[1]))\n return opening_hours.as_opening_hours()\n", "path": "locations/spiders/petsmart.py"}]} | 1,362 | 361 |
gh_patches_debug_36927 | rasdani/github-patches | git_diff | mdn__kuma-7800 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
🧹 Remove hompage related frontend code
Once https://github.com/mdn/yari/issues/2438 lands, we need to go into the Kuma forest and find all things related to the homepage frontend code, chop it down, and clear it out.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kuma/landing/views.py`
Content:
```
1 from django.conf import settings
2 from django.http import HttpResponse
3 from django.shortcuts import redirect, render
4 from django.views import static
5 from django.views.decorators.cache import never_cache
6 from django.views.generic import RedirectView
7
8 from kuma.core.decorators import ensure_wiki_domain, shared_cache_control
9 from kuma.core.utils import is_wiki
10 from kuma.feeder.models import Bundle
11 from kuma.feeder.sections import SECTION_HACKS
12 from kuma.search.models import Filter
13
14 from .utils import favicon_url
15
16
17 @shared_cache_control
18 def contribute_json(request):
19 return static.serve(request, "contribute.json", document_root=settings.ROOT)
20
21
22 @shared_cache_control
23 def home(request):
24 """Home page."""
25 context = {}
26 # Need for both wiki and react homepage
27 context["updates"] = list(Bundle.objects.recent_entries(SECTION_HACKS.updates)[:5])
28
29 # The default template name
30 template_name = "landing/react_homepage.html"
31 if is_wiki(request):
32 template_name = "landing/homepage.html"
33 context["default_filters"] = Filter.objects.default_filters()
34 return render(request, template_name, context)
35
36
37 @ensure_wiki_domain
38 @never_cache
39 def maintenance_mode(request):
40 if settings.MAINTENANCE_MODE:
41 return render(request, "landing/maintenance-mode.html")
42 else:
43 return redirect("home")
44
45
46 @ensure_wiki_domain
47 @shared_cache_control
48 def promote_buttons(request):
49 """Bug 646192: MDN affiliate buttons"""
50 return render(request, "landing/promote_buttons.html")
51
52
53 ROBOTS_ALL_ALLOWED_TXT = """\
54 User-agent: *
55 Sitemap: https://wiki.developer.mozilla.org/sitemap.xml
56
57 Disallow:
58 """
59
60 ROBOTS_ALLOWED_TXT = """\
61 User-agent: *
62 Sitemap: https://developer.mozilla.org/sitemap.xml
63
64 Disallow: /api/
65 Disallow: /*docs/get-documents
66 Disallow: /*docs/Experiment:*
67 Disallow: /*$children
68 Disallow: /*docs.json
69 Disallow: /*/files/
70 Disallow: /media
71 Disallow: /*profiles*/edit
72 """ + "\n".join(
73 "Disallow: /{locale}/search".format(locale=locale)
74 for locale in settings.ENABLED_LOCALES
75 )
76
77 ROBOTS_GO_AWAY_TXT = """\
78 User-Agent: *
79 Disallow: /
80 """
81
82
83 @shared_cache_control
84 def robots_txt(request):
85 """Serve robots.txt that allows or forbids robots."""
86 host = request.get_host()
87 if host in settings.ALLOW_ROBOTS_DOMAINS:
88 robots = ""
89 elif host in settings.ALLOW_ROBOTS_WEB_DOMAINS:
90 if host == settings.WIKI_HOST:
91 robots = ROBOTS_ALL_ALLOWED_TXT
92 else:
93 robots = ROBOTS_ALLOWED_TXT
94 else:
95 robots = ROBOTS_GO_AWAY_TXT
96 return HttpResponse(robots, content_type="text/plain")
97
98
99 class FaviconRedirect(RedirectView):
100 """Redirect to the favicon in the static img folder (bug 1402497)"""
101
102 def get_redirect_url(self, *args, **kwargs):
103 return favicon_url()
104
```
Path: `kuma/landing/urls.py`
Content:
```
1 from django.urls import re_path
2
3 from kuma.core.decorators import shared_cache_control
4
5 from . import views
6
7
8 MONTH = 60 * 60 * 24 * 30
9
10
11 lang_urlpatterns = [
12 re_path(r"^$", views.home, name="home"),
13 re_path(r"^maintenance-mode/?$", views.maintenance_mode, name="maintenance_mode"),
14 re_path(r"^promote/?$", views.promote_buttons, name="promote"),
15 re_path(r"^promote/buttons/?$", views.promote_buttons, name="promote_buttons"),
16 ]
17
18 urlpatterns = [
19 re_path(r"^contribute\.json$", views.contribute_json, name="contribute_json"),
20 re_path(r"^robots.txt$", views.robots_txt, name="robots_txt"),
21 re_path(
22 r"^favicon.ico$",
23 shared_cache_control(views.FaviconRedirect.as_view(), s_maxage=MONTH),
24 name="favicon_ico",
25 ),
26 ]
27
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kuma/landing/urls.py b/kuma/landing/urls.py
--- a/kuma/landing/urls.py
+++ b/kuma/landing/urls.py
@@ -11,12 +11,9 @@
lang_urlpatterns = [
re_path(r"^$", views.home, name="home"),
re_path(r"^maintenance-mode/?$", views.maintenance_mode, name="maintenance_mode"),
- re_path(r"^promote/?$", views.promote_buttons, name="promote"),
- re_path(r"^promote/buttons/?$", views.promote_buttons, name="promote_buttons"),
]
urlpatterns = [
- re_path(r"^contribute\.json$", views.contribute_json, name="contribute_json"),
re_path(r"^robots.txt$", views.robots_txt, name="robots_txt"),
re_path(
r"^favicon.ico$",
diff --git a/kuma/landing/views.py b/kuma/landing/views.py
--- a/kuma/landing/views.py
+++ b/kuma/landing/views.py
@@ -1,37 +1,25 @@
from django.conf import settings
from django.http import HttpResponse
from django.shortcuts import redirect, render
-from django.views import static
from django.views.decorators.cache import never_cache
from django.views.generic import RedirectView
from kuma.core.decorators import ensure_wiki_domain, shared_cache_control
-from kuma.core.utils import is_wiki
-from kuma.feeder.models import Bundle
-from kuma.feeder.sections import SECTION_HACKS
-from kuma.search.models import Filter
from .utils import favicon_url
-@shared_cache_control
-def contribute_json(request):
- return static.serve(request, "contribute.json", document_root=settings.ROOT)
-
-
-@shared_cache_control
def home(request):
"""Home page."""
- context = {}
- # Need for both wiki and react homepage
- context["updates"] = list(Bundle.objects.recent_entries(SECTION_HACKS.updates)[:5])
-
- # The default template name
- template_name = "landing/react_homepage.html"
- if is_wiki(request):
- template_name = "landing/homepage.html"
- context["default_filters"] = Filter.objects.default_filters()
- return render(request, template_name, context)
+ return HttpResponse(
+ """
+ <html>
+ End of an era. Kuma's no longer rendering a home page.<br>
+ See project Yari.
+ </html>
+ """,
+ content_type="text/html",
+ )
@ensure_wiki_domain
@@ -43,13 +31,6 @@
return redirect("home")
-@ensure_wiki_domain
-@shared_cache_control
-def promote_buttons(request):
- """Bug 646192: MDN affiliate buttons"""
- return render(request, "landing/promote_buttons.html")
-
-
ROBOTS_ALL_ALLOWED_TXT = """\
User-agent: *
Sitemap: https://wiki.developer.mozilla.org/sitemap.xml
| {"golden_diff": "diff --git a/kuma/landing/urls.py b/kuma/landing/urls.py\n--- a/kuma/landing/urls.py\n+++ b/kuma/landing/urls.py\n@@ -11,12 +11,9 @@\n lang_urlpatterns = [\n re_path(r\"^$\", views.home, name=\"home\"),\n re_path(r\"^maintenance-mode/?$\", views.maintenance_mode, name=\"maintenance_mode\"),\n- re_path(r\"^promote/?$\", views.promote_buttons, name=\"promote\"),\n- re_path(r\"^promote/buttons/?$\", views.promote_buttons, name=\"promote_buttons\"),\n ]\n \n urlpatterns = [\n- re_path(r\"^contribute\\.json$\", views.contribute_json, name=\"contribute_json\"),\n re_path(r\"^robots.txt$\", views.robots_txt, name=\"robots_txt\"),\n re_path(\n r\"^favicon.ico$\",\ndiff --git a/kuma/landing/views.py b/kuma/landing/views.py\n--- a/kuma/landing/views.py\n+++ b/kuma/landing/views.py\n@@ -1,37 +1,25 @@\n from django.conf import settings\n from django.http import HttpResponse\n from django.shortcuts import redirect, render\n-from django.views import static\n from django.views.decorators.cache import never_cache\n from django.views.generic import RedirectView\n \n from kuma.core.decorators import ensure_wiki_domain, shared_cache_control\n-from kuma.core.utils import is_wiki\n-from kuma.feeder.models import Bundle\n-from kuma.feeder.sections import SECTION_HACKS\n-from kuma.search.models import Filter\n \n from .utils import favicon_url\n \n \n-@shared_cache_control\n-def contribute_json(request):\n- return static.serve(request, \"contribute.json\", document_root=settings.ROOT)\n-\n-\n-@shared_cache_control\n def home(request):\n \"\"\"Home page.\"\"\"\n- context = {}\n- # Need for both wiki and react homepage\n- context[\"updates\"] = list(Bundle.objects.recent_entries(SECTION_HACKS.updates)[:5])\n-\n- # The default template name\n- template_name = \"landing/react_homepage.html\"\n- if is_wiki(request):\n- template_name = \"landing/homepage.html\"\n- context[\"default_filters\"] = Filter.objects.default_filters()\n- return render(request, template_name, context)\n+ return HttpResponse(\n+ \"\"\"\n+ <html>\n+ End of an era. Kuma's no longer rendering a home page.<br>\n+ See project Yari.\n+ </html>\n+ \"\"\",\n+ content_type=\"text/html\",\n+ )\n \n \n @ensure_wiki_domain\n@@ -43,13 +31,6 @@\n return redirect(\"home\")\n \n \n-@ensure_wiki_domain\n-@shared_cache_control\n-def promote_buttons(request):\n- \"\"\"Bug 646192: MDN affiliate buttons\"\"\"\n- return render(request, \"landing/promote_buttons.html\")\n-\n-\n ROBOTS_ALL_ALLOWED_TXT = \"\"\"\\\n User-agent: *\n Sitemap: https://wiki.developer.mozilla.org/sitemap.xml\n", "issue": "\ud83e\uddf9 Remove hompage related frontend code\nOnce https://github.com/mdn/yari/issues/2438 lands, we need to go into the Kuma forest and find all things related to the homepage frontend code, chop it down, and clear it out.\n", "before_files": [{"content": "from django.conf import settings\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect, render\nfrom django.views import static\nfrom django.views.decorators.cache import never_cache\nfrom django.views.generic import RedirectView\n\nfrom kuma.core.decorators import ensure_wiki_domain, shared_cache_control\nfrom kuma.core.utils import is_wiki\nfrom kuma.feeder.models import Bundle\nfrom kuma.feeder.sections import SECTION_HACKS\nfrom kuma.search.models import Filter\n\nfrom .utils import favicon_url\n\n\n@shared_cache_control\ndef contribute_json(request):\n return static.serve(request, \"contribute.json\", document_root=settings.ROOT)\n\n\n@shared_cache_control\ndef home(request):\n \"\"\"Home page.\"\"\"\n context = {}\n # Need for both wiki and react homepage\n context[\"updates\"] = list(Bundle.objects.recent_entries(SECTION_HACKS.updates)[:5])\n\n # The default template name\n template_name = \"landing/react_homepage.html\"\n if is_wiki(request):\n template_name = \"landing/homepage.html\"\n context[\"default_filters\"] = Filter.objects.default_filters()\n return render(request, template_name, context)\n\n\n@ensure_wiki_domain\n@never_cache\ndef maintenance_mode(request):\n if settings.MAINTENANCE_MODE:\n return render(request, \"landing/maintenance-mode.html\")\n else:\n return redirect(\"home\")\n\n\n@ensure_wiki_domain\n@shared_cache_control\ndef promote_buttons(request):\n \"\"\"Bug 646192: MDN affiliate buttons\"\"\"\n return render(request, \"landing/promote_buttons.html\")\n\n\nROBOTS_ALL_ALLOWED_TXT = \"\"\"\\\nUser-agent: *\nSitemap: https://wiki.developer.mozilla.org/sitemap.xml\n\nDisallow:\n\"\"\"\n\nROBOTS_ALLOWED_TXT = \"\"\"\\\nUser-agent: *\nSitemap: https://developer.mozilla.org/sitemap.xml\n\nDisallow: /api/\nDisallow: /*docs/get-documents\nDisallow: /*docs/Experiment:*\nDisallow: /*$children\nDisallow: /*docs.json\nDisallow: /*/files/\nDisallow: /media\nDisallow: /*profiles*/edit\n\"\"\" + \"\\n\".join(\n \"Disallow: /{locale}/search\".format(locale=locale)\n for locale in settings.ENABLED_LOCALES\n)\n\nROBOTS_GO_AWAY_TXT = \"\"\"\\\nUser-Agent: *\nDisallow: /\n\"\"\"\n\n\n@shared_cache_control\ndef robots_txt(request):\n \"\"\"Serve robots.txt that allows or forbids robots.\"\"\"\n host = request.get_host()\n if host in settings.ALLOW_ROBOTS_DOMAINS:\n robots = \"\"\n elif host in settings.ALLOW_ROBOTS_WEB_DOMAINS:\n if host == settings.WIKI_HOST:\n robots = ROBOTS_ALL_ALLOWED_TXT\n else:\n robots = ROBOTS_ALLOWED_TXT\n else:\n robots = ROBOTS_GO_AWAY_TXT\n return HttpResponse(robots, content_type=\"text/plain\")\n\n\nclass FaviconRedirect(RedirectView):\n \"\"\"Redirect to the favicon in the static img folder (bug 1402497)\"\"\"\n\n def get_redirect_url(self, *args, **kwargs):\n return favicon_url()\n", "path": "kuma/landing/views.py"}, {"content": "from django.urls import re_path\n\nfrom kuma.core.decorators import shared_cache_control\n\nfrom . import views\n\n\nMONTH = 60 * 60 * 24 * 30\n\n\nlang_urlpatterns = [\n re_path(r\"^$\", views.home, name=\"home\"),\n re_path(r\"^maintenance-mode/?$\", views.maintenance_mode, name=\"maintenance_mode\"),\n re_path(r\"^promote/?$\", views.promote_buttons, name=\"promote\"),\n re_path(r\"^promote/buttons/?$\", views.promote_buttons, name=\"promote_buttons\"),\n]\n\nurlpatterns = [\n re_path(r\"^contribute\\.json$\", views.contribute_json, name=\"contribute_json\"),\n re_path(r\"^robots.txt$\", views.robots_txt, name=\"robots_txt\"),\n re_path(\n r\"^favicon.ico$\",\n shared_cache_control(views.FaviconRedirect.as_view(), s_maxage=MONTH),\n name=\"favicon_ico\",\n ),\n]\n", "path": "kuma/landing/urls.py"}], "after_files": [{"content": "from django.conf import settings\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect, render\nfrom django.views.decorators.cache import never_cache\nfrom django.views.generic import RedirectView\n\nfrom kuma.core.decorators import ensure_wiki_domain, shared_cache_control\n\nfrom .utils import favicon_url\n\n\ndef home(request):\n \"\"\"Home page.\"\"\"\n return HttpResponse(\n \"\"\"\n <html>\n End of an era. Kuma's no longer rendering a home page.<br>\n See project Yari.\n </html>\n \"\"\",\n content_type=\"text/html\",\n )\n\n\n@ensure_wiki_domain\n@never_cache\ndef maintenance_mode(request):\n if settings.MAINTENANCE_MODE:\n return render(request, \"landing/maintenance-mode.html\")\n else:\n return redirect(\"home\")\n\n\nROBOTS_ALL_ALLOWED_TXT = \"\"\"\\\nUser-agent: *\nSitemap: https://wiki.developer.mozilla.org/sitemap.xml\n\nDisallow:\n\"\"\"\n\nROBOTS_ALLOWED_TXT = \"\"\"\\\nUser-agent: *\nSitemap: https://developer.mozilla.org/sitemap.xml\n\nDisallow: /api/\nDisallow: /*docs/get-documents\nDisallow: /*docs/Experiment:*\nDisallow: /*$children\nDisallow: /*docs.json\nDisallow: /*/files/\nDisallow: /media\nDisallow: /*profiles*/edit\n\"\"\" + \"\\n\".join(\n \"Disallow: /{locale}/search\".format(locale=locale)\n for locale in settings.ENABLED_LOCALES\n)\n\nROBOTS_GO_AWAY_TXT = \"\"\"\\\nUser-Agent: *\nDisallow: /\n\"\"\"\n\n\n@shared_cache_control\ndef robots_txt(request):\n \"\"\"Serve robots.txt that allows or forbids robots.\"\"\"\n host = request.get_host()\n if host in settings.ALLOW_ROBOTS_DOMAINS:\n robots = \"\"\n elif host in settings.ALLOW_ROBOTS_WEB_DOMAINS:\n if host == settings.WIKI_HOST:\n robots = ROBOTS_ALL_ALLOWED_TXT\n else:\n robots = ROBOTS_ALLOWED_TXT\n else:\n robots = ROBOTS_GO_AWAY_TXT\n return HttpResponse(robots, content_type=\"text/plain\")\n\n\nclass FaviconRedirect(RedirectView):\n \"\"\"Redirect to the favicon in the static img folder (bug 1402497)\"\"\"\n\n def get_redirect_url(self, *args, **kwargs):\n return favicon_url()\n", "path": "kuma/landing/views.py"}, {"content": "from django.urls import re_path\n\nfrom kuma.core.decorators import shared_cache_control\n\nfrom . import views\n\n\nMONTH = 60 * 60 * 24 * 30\n\n\nlang_urlpatterns = [\n re_path(r\"^$\", views.home, name=\"home\"),\n re_path(r\"^maintenance-mode/?$\", views.maintenance_mode, name=\"maintenance_mode\"),\n]\n\nurlpatterns = [\n re_path(r\"^robots.txt$\", views.robots_txt, name=\"robots_txt\"),\n re_path(\n r\"^favicon.ico$\",\n shared_cache_control(views.FaviconRedirect.as_view(), s_maxage=MONTH),\n name=\"favicon_ico\",\n ),\n]\n", "path": "kuma/landing/urls.py"}]} | 1,474 | 654 |
gh_patches_debug_41527 | rasdani/github-patches | git_diff | streamlink__streamlink-2643 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ITV Player unable to open URL
- [x] This is a plugin issue and I have read the contribution guidelines.
### Description
ITV Player/Hub for the main ITV channel do not open and give an error message.
### Reproduction steps / Explicit stream URLs to test
Type "streamlink itv.com/hub/itv".
Other ITV Hub URL's such as hub/itv2 or hub/citv continue to work.
### Log output
[cli][info] Found matching plugin itvplayer for URL itv.com/hub/itv
error: Unable to open URL: http://cdn.dar.itv.com/private/aHR0cDovL2NzbS1lLnBsYXkuZGFyLml0di5jb20vY3NtL2V4dGxpdmUvMjQ3MTYyMTUubTN1OD95by5hYz10cnVlJnlvLmFkPWZhbHNlJnBtPWZyZWUmcmFuZG9tPTU2OTk0ODU2ODc0ODA1NzE0MjAmYXJlYT1pdHZwbGF5ZXIuc2ltdWxjYXN0JnNvdXJjZT1zaW11bGNhc3QmaGRldmlkPXgmc2l6ZT1wYXR0ZXJuJnN1cGVydGFnPWRmbHQsc2ltJnVzPWFubyZzaXRlPWl0diZvc3Zlcj0xMCZjaGFuYnJhbmQ9aXR2MSZvcz13aW5kb3dzJmhtb2Q9NjYmcHY9YnJvd3Nlci40LjEmcGxheWVyPWh0bWw1LmRlc2t0b3AmZG09bmxpJmhtYW49Y2hyb21lJnRkdXI9MjE2MDAmdHBhcnRzPTEmYXJwPXgmZGVmYXVsdD1EZWZhdWx0UGF0dGVybiZwbGlzdD1zaW1hZHJlcGxhY2VtZW50JnBsZmNpZD1zaW1hZHJlcGxhY2VtZW50/endpoint?hdnea=st=1563543051~exp=1563564651~acl=/*~data=nohubplus~hmac=0ebaf6d4f22f101378a6c7ed3903c1999f6b28efd29ee05763e5ec7bf0d27896&yo.channel=24716215 (404 Client Error: for url: http://csm-e.play.dar.itv.com/csm/extlive/24716215.m3u8?yo.ac=true&yo.ad=false&pm=free&random=5699485687480571420&area=itvplayer.simulcast&source=simulcast&hdevid=x&size=pattern&supertag=dflt,sim&us=ano&site=itv&osver=10&chanbrand=itv1&os=windows&hmod=66&pv=browser.4.1&player=html5.desktop&dm=nli&hman=chrome&tdur=21600&tparts=1&arp=x&default=DefaultPattern&plist=simadreplacement&plfcid=simadreplacement)
### Additional comments, screenshots, etc.
I have noticed the site plays a ITV Hub promo when starting a livestream on the website itself before loading that stream. This does not occur for the other channels.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/itvplayer.py`
Content:
```
1 import json
2 import logging
3 import re
4
5 from streamlink.compat import urljoin
6 from streamlink.plugin import Plugin
7 from streamlink.plugin.api import useragents, validate
8 from streamlink.plugin.api.utils import itertags
9 from streamlink.stream import HLSStream
10
11 log = logging.getLogger(__name__)
12
13
14 class ITVPlayer(Plugin):
15 _url_re = re.compile(r"https?://(?:www.)?itv.com/hub/(?P<stream>.+)")
16 _video_info_schema = validate.Schema({
17 "StatusCode": 200,
18 "AdditionalInfo": {
19 "Message": validate.any(None, validate.text)
20 },
21 "Playlist": {
22 "VideoType": validate.text,
23 "Video": {
24 "Subtitles": validate.any(None, [{
25 "Href": validate.url(),
26 }]),
27 "Base": validate.url(),
28 "MediaFiles": [
29 {"Href": validate.text,
30 "KeyServiceUrl": validate.any(None, validate.url())}
31 ]
32 }
33 }
34 })
35
36 @classmethod
37 def can_handle_url(cls, url):
38 match = cls._url_re.match(url)
39 return match is not None
40
41 @property
42 def device_info(self):
43 return {"user": {},
44 "device": {"manufacturer": "Chrome", "model": "66",
45 "os": {"name": "Windows", "version": "10", "type": "desktop"}},
46 "client": {"version": "4.1", "id": "browser"},
47 "variantAvailability": {"featureset": {"min": ["hls", "aes"],
48 "max": ["hls", "aes"]},
49 "platformTag": "dotcom"}}
50
51 def video_info(self):
52 page = self.session.http.get(self.url)
53 for div in itertags(page.text, 'div'):
54 if div.attributes.get("id") == "video":
55 return div.attributes
56
57 def _get_streams(self):
58 """
59 Find all the streams for the ITV url
60 :return: Mapping of quality to stream
61 """
62 self.session.http.headers.update({"User-Agent": useragents.FIREFOX})
63 video_info = self.video_info()
64 video_info_url = video_info.get("data-html5-playlist") or video_info.get("data-video-id")
65
66 res = self.session.http.post(video_info_url,
67 data=json.dumps(self.device_info),
68 headers={"hmac": video_info.get("data-video-hmac")})
69 data = self.session.http.json(res, schema=self._video_info_schema)
70
71 log.debug("Video ID info response: {0}".format(data))
72
73 stype = data['Playlist']['VideoType']
74
75 for media in data['Playlist']['Video']['MediaFiles']:
76 url = urljoin(data['Playlist']['Video']['Base'], media['Href'])
77 name_fmt = "{pixels}_{bitrate}" if stype == "CATCHUP" else None
78 for s in HLSStream.parse_variant_playlist(self.session, url, name_fmt=name_fmt).items():
79 yield s
80
81
82
83 __plugin__ = ITVPlayer
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/plugins/itvplayer.py b/src/streamlink/plugins/itvplayer.py
--- a/src/streamlink/plugins/itvplayer.py
+++ b/src/streamlink/plugins/itvplayer.py
@@ -6,13 +6,14 @@
from streamlink.plugin import Plugin
from streamlink.plugin.api import useragents, validate
from streamlink.plugin.api.utils import itertags
-from streamlink.stream import HLSStream
+from streamlink.stream import HLSStream, RTMPStream
log = logging.getLogger(__name__)
class ITVPlayer(Plugin):
_url_re = re.compile(r"https?://(?:www.)?itv.com/hub/(?P<stream>.+)")
+ swf_url = "https://mediaplayer.itv.com/2.19.5%2Bbuild.a23aa62b1e/ITVMediaPlayer.swf"
_video_info_schema = validate.Schema({
"StatusCode": 200,
"AdditionalInfo": {
@@ -35,8 +36,7 @@
@classmethod
def can_handle_url(cls, url):
- match = cls._url_re.match(url)
- return match is not None
+ return cls._url_re.match(url) is not None
@property
def device_info(self):
@@ -54,18 +54,11 @@
if div.attributes.get("id") == "video":
return div.attributes
- def _get_streams(self):
- """
- Find all the streams for the ITV url
- :return: Mapping of quality to stream
- """
- self.session.http.headers.update({"User-Agent": useragents.FIREFOX})
+ def _get_html5_streams(self, video_info_url):
video_info = self.video_info()
- video_info_url = video_info.get("data-html5-playlist") or video_info.get("data-video-id")
-
res = self.session.http.post(video_info_url,
- data=json.dumps(self.device_info),
- headers={"hmac": video_info.get("data-video-hmac")})
+ data=json.dumps(self.device_info),
+ headers={"hmac": video_info.get("data-video-hmac")})
data = self.session.http.json(res, schema=self._video_info_schema)
log.debug("Video ID info response: {0}".format(data))
@@ -78,6 +71,31 @@
for s in HLSStream.parse_variant_playlist(self.session, url, name_fmt=name_fmt).items():
yield s
+ def _get_rtmp_streams(self, video_info_url):
+ log.debug("XML data path: {0}".format(video_info_url))
+ res = self.session.http.get(video_info_url)
+ playlist = self.session.http.xml(res, ignore_ns=True)
+ mediafiles = playlist.find(".//Playlist/VideoEntries/Video/MediaFiles")
+ playpath = mediafiles.find("./MediaFile/URL")
+ return {"live": RTMPStream(self.session, {"rtmp": mediafiles.attrib.get("base"),
+ "playpath": playpath.text,
+ "live": True,
+ "swfVfy": self.swf_url
+ })}
+
+ def _get_streams(self):
+ """
+ Find all the streams for the ITV url
+ :return: Mapping of quality to stream
+ """
+ self.session.http.headers.update({"User-Agent": useragents.FIREFOX})
+ stream = self._url_re.match(self.url).group("stream")
+ video_info = self.video_info()
+ video_info_url = video_info.get("data-video-id" if stream.lower() in ("itv", "itv4") else "data-html5-playlist")
+ if video_info_url.endswith(".xml"):
+ return self._get_rtmp_streams(video_info_url)
+ else:
+ return self._get_html5_streams(video_info_url)
__plugin__ = ITVPlayer
| {"golden_diff": "diff --git a/src/streamlink/plugins/itvplayer.py b/src/streamlink/plugins/itvplayer.py\n--- a/src/streamlink/plugins/itvplayer.py\n+++ b/src/streamlink/plugins/itvplayer.py\n@@ -6,13 +6,14 @@\n from streamlink.plugin import Plugin\n from streamlink.plugin.api import useragents, validate\n from streamlink.plugin.api.utils import itertags\n-from streamlink.stream import HLSStream\n+from streamlink.stream import HLSStream, RTMPStream\n \n log = logging.getLogger(__name__)\n \n \n class ITVPlayer(Plugin):\n _url_re = re.compile(r\"https?://(?:www.)?itv.com/hub/(?P<stream>.+)\")\n+ swf_url = \"https://mediaplayer.itv.com/2.19.5%2Bbuild.a23aa62b1e/ITVMediaPlayer.swf\"\n _video_info_schema = validate.Schema({\n \"StatusCode\": 200,\n \"AdditionalInfo\": {\n@@ -35,8 +36,7 @@\n \n @classmethod\n def can_handle_url(cls, url):\n- match = cls._url_re.match(url)\n- return match is not None\n+ return cls._url_re.match(url) is not None\n \n @property\n def device_info(self):\n@@ -54,18 +54,11 @@\n if div.attributes.get(\"id\") == \"video\":\n return div.attributes\n \n- def _get_streams(self):\n- \"\"\"\n- Find all the streams for the ITV url\n- :return: Mapping of quality to stream\n- \"\"\"\n- self.session.http.headers.update({\"User-Agent\": useragents.FIREFOX})\n+ def _get_html5_streams(self, video_info_url):\n video_info = self.video_info()\n- video_info_url = video_info.get(\"data-html5-playlist\") or video_info.get(\"data-video-id\")\n-\n res = self.session.http.post(video_info_url,\n- data=json.dumps(self.device_info),\n- headers={\"hmac\": video_info.get(\"data-video-hmac\")})\n+ data=json.dumps(self.device_info),\n+ headers={\"hmac\": video_info.get(\"data-video-hmac\")})\n data = self.session.http.json(res, schema=self._video_info_schema)\n \n log.debug(\"Video ID info response: {0}\".format(data))\n@@ -78,6 +71,31 @@\n for s in HLSStream.parse_variant_playlist(self.session, url, name_fmt=name_fmt).items():\n yield s\n \n+ def _get_rtmp_streams(self, video_info_url):\n+ log.debug(\"XML data path: {0}\".format(video_info_url))\n+ res = self.session.http.get(video_info_url)\n+ playlist = self.session.http.xml(res, ignore_ns=True)\n+ mediafiles = playlist.find(\".//Playlist/VideoEntries/Video/MediaFiles\")\n+ playpath = mediafiles.find(\"./MediaFile/URL\")\n+ return {\"live\": RTMPStream(self.session, {\"rtmp\": mediafiles.attrib.get(\"base\"),\n+ \"playpath\": playpath.text,\n+ \"live\": True,\n+ \"swfVfy\": self.swf_url\n+ })}\n+\n+ def _get_streams(self):\n+ \"\"\"\n+ Find all the streams for the ITV url\n+ :return: Mapping of quality to stream\n+ \"\"\"\n+ self.session.http.headers.update({\"User-Agent\": useragents.FIREFOX})\n+ stream = self._url_re.match(self.url).group(\"stream\")\n+ video_info = self.video_info()\n+ video_info_url = video_info.get(\"data-video-id\" if stream.lower() in (\"itv\", \"itv4\") else \"data-html5-playlist\")\n+ if video_info_url.endswith(\".xml\"):\n+ return self._get_rtmp_streams(video_info_url)\n+ else:\n+ return self._get_html5_streams(video_info_url)\n \n \n __plugin__ = ITVPlayer\n", "issue": "ITV Player unable to open URL\n- [x] This is a plugin issue and I have read the contribution guidelines.\r\n\r\n\r\n### Description\r\n\r\nITV Player/Hub for the main ITV channel do not open and give an error message.\r\n\r\n\r\n### Reproduction steps / Explicit stream URLs to test\r\n\r\nType \"streamlink itv.com/hub/itv\".\r\n\r\nOther ITV Hub URL's such as hub/itv2 or hub/citv continue to work.\r\n\r\n### Log output\r\n\r\n[cli][info] Found matching plugin itvplayer for URL itv.com/hub/itv\r\nerror: Unable to open URL: http://cdn.dar.itv.com/private/aHR0cDovL2NzbS1lLnBsYXkuZGFyLml0di5jb20vY3NtL2V4dGxpdmUvMjQ3MTYyMTUubTN1OD95by5hYz10cnVlJnlvLmFkPWZhbHNlJnBtPWZyZWUmcmFuZG9tPTU2OTk0ODU2ODc0ODA1NzE0MjAmYXJlYT1pdHZwbGF5ZXIuc2ltdWxjYXN0JnNvdXJjZT1zaW11bGNhc3QmaGRldmlkPXgmc2l6ZT1wYXR0ZXJuJnN1cGVydGFnPWRmbHQsc2ltJnVzPWFubyZzaXRlPWl0diZvc3Zlcj0xMCZjaGFuYnJhbmQ9aXR2MSZvcz13aW5kb3dzJmhtb2Q9NjYmcHY9YnJvd3Nlci40LjEmcGxheWVyPWh0bWw1LmRlc2t0b3AmZG09bmxpJmhtYW49Y2hyb21lJnRkdXI9MjE2MDAmdHBhcnRzPTEmYXJwPXgmZGVmYXVsdD1EZWZhdWx0UGF0dGVybiZwbGlzdD1zaW1hZHJlcGxhY2VtZW50JnBsZmNpZD1zaW1hZHJlcGxhY2VtZW50/endpoint?hdnea=st=1563543051~exp=1563564651~acl=/*~data=nohubplus~hmac=0ebaf6d4f22f101378a6c7ed3903c1999f6b28efd29ee05763e5ec7bf0d27896&yo.channel=24716215 (404 Client Error: for url: http://csm-e.play.dar.itv.com/csm/extlive/24716215.m3u8?yo.ac=true&yo.ad=false&pm=free&random=5699485687480571420&area=itvplayer.simulcast&source=simulcast&hdevid=x&size=pattern&supertag=dflt,sim&us=ano&site=itv&osver=10&chanbrand=itv1&os=windows&hmod=66&pv=browser.4.1&player=html5.desktop&dm=nli&hman=chrome&tdur=21600&tparts=1&arp=x&default=DefaultPattern&plist=simadreplacement&plfcid=simadreplacement)\r\n\r\n\r\n### Additional comments, screenshots, etc.\r\n\r\nI have noticed the site plays a ITV Hub promo when starting a livestream on the website itself before loading that stream. This does not occur for the other channels.\n", "before_files": [{"content": "import json\nimport logging\nimport re\n\nfrom streamlink.compat import urljoin\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import useragents, validate\nfrom streamlink.plugin.api.utils import itertags\nfrom streamlink.stream import HLSStream\n\nlog = logging.getLogger(__name__)\n\n\nclass ITVPlayer(Plugin):\n _url_re = re.compile(r\"https?://(?:www.)?itv.com/hub/(?P<stream>.+)\")\n _video_info_schema = validate.Schema({\n \"StatusCode\": 200,\n \"AdditionalInfo\": {\n \"Message\": validate.any(None, validate.text)\n },\n \"Playlist\": {\n \"VideoType\": validate.text,\n \"Video\": {\n \"Subtitles\": validate.any(None, [{\n \"Href\": validate.url(),\n }]),\n \"Base\": validate.url(),\n \"MediaFiles\": [\n {\"Href\": validate.text,\n \"KeyServiceUrl\": validate.any(None, validate.url())}\n ]\n }\n }\n })\n\n @classmethod\n def can_handle_url(cls, url):\n match = cls._url_re.match(url)\n return match is not None\n\n @property\n def device_info(self):\n return {\"user\": {},\n \"device\": {\"manufacturer\": \"Chrome\", \"model\": \"66\",\n \"os\": {\"name\": \"Windows\", \"version\": \"10\", \"type\": \"desktop\"}},\n \"client\": {\"version\": \"4.1\", \"id\": \"browser\"},\n \"variantAvailability\": {\"featureset\": {\"min\": [\"hls\", \"aes\"],\n \"max\": [\"hls\", \"aes\"]},\n \"platformTag\": \"dotcom\"}}\n\n def video_info(self):\n page = self.session.http.get(self.url)\n for div in itertags(page.text, 'div'):\n if div.attributes.get(\"id\") == \"video\":\n return div.attributes\n\n def _get_streams(self):\n \"\"\"\n Find all the streams for the ITV url\n :return: Mapping of quality to stream\n \"\"\"\n self.session.http.headers.update({\"User-Agent\": useragents.FIREFOX})\n video_info = self.video_info()\n video_info_url = video_info.get(\"data-html5-playlist\") or video_info.get(\"data-video-id\")\n\n res = self.session.http.post(video_info_url,\n data=json.dumps(self.device_info),\n headers={\"hmac\": video_info.get(\"data-video-hmac\")})\n data = self.session.http.json(res, schema=self._video_info_schema)\n\n log.debug(\"Video ID info response: {0}\".format(data))\n\n stype = data['Playlist']['VideoType']\n\n for media in data['Playlist']['Video']['MediaFiles']:\n url = urljoin(data['Playlist']['Video']['Base'], media['Href'])\n name_fmt = \"{pixels}_{bitrate}\" if stype == \"CATCHUP\" else None\n for s in HLSStream.parse_variant_playlist(self.session, url, name_fmt=name_fmt).items():\n yield s\n\n\n\n__plugin__ = ITVPlayer\n", "path": "src/streamlink/plugins/itvplayer.py"}], "after_files": [{"content": "import json\nimport logging\nimport re\n\nfrom streamlink.compat import urljoin\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import useragents, validate\nfrom streamlink.plugin.api.utils import itertags\nfrom streamlink.stream import HLSStream, RTMPStream\n\nlog = logging.getLogger(__name__)\n\n\nclass ITVPlayer(Plugin):\n _url_re = re.compile(r\"https?://(?:www.)?itv.com/hub/(?P<stream>.+)\")\n swf_url = \"https://mediaplayer.itv.com/2.19.5%2Bbuild.a23aa62b1e/ITVMediaPlayer.swf\"\n _video_info_schema = validate.Schema({\n \"StatusCode\": 200,\n \"AdditionalInfo\": {\n \"Message\": validate.any(None, validate.text)\n },\n \"Playlist\": {\n \"VideoType\": validate.text,\n \"Video\": {\n \"Subtitles\": validate.any(None, [{\n \"Href\": validate.url(),\n }]),\n \"Base\": validate.url(),\n \"MediaFiles\": [\n {\"Href\": validate.text,\n \"KeyServiceUrl\": validate.any(None, validate.url())}\n ]\n }\n }\n })\n\n @classmethod\n def can_handle_url(cls, url):\n return cls._url_re.match(url) is not None\n\n @property\n def device_info(self):\n return {\"user\": {},\n \"device\": {\"manufacturer\": \"Chrome\", \"model\": \"66\",\n \"os\": {\"name\": \"Windows\", \"version\": \"10\", \"type\": \"desktop\"}},\n \"client\": {\"version\": \"4.1\", \"id\": \"browser\"},\n \"variantAvailability\": {\"featureset\": {\"min\": [\"hls\", \"aes\"],\n \"max\": [\"hls\", \"aes\"]},\n \"platformTag\": \"dotcom\"}}\n\n def video_info(self):\n page = self.session.http.get(self.url)\n for div in itertags(page.text, 'div'):\n if div.attributes.get(\"id\") == \"video\":\n return div.attributes\n\n def _get_html5_streams(self, video_info_url):\n video_info = self.video_info()\n res = self.session.http.post(video_info_url,\n data=json.dumps(self.device_info),\n headers={\"hmac\": video_info.get(\"data-video-hmac\")})\n data = self.session.http.json(res, schema=self._video_info_schema)\n\n log.debug(\"Video ID info response: {0}\".format(data))\n\n stype = data['Playlist']['VideoType']\n\n for media in data['Playlist']['Video']['MediaFiles']:\n url = urljoin(data['Playlist']['Video']['Base'], media['Href'])\n name_fmt = \"{pixels}_{bitrate}\" if stype == \"CATCHUP\" else None\n for s in HLSStream.parse_variant_playlist(self.session, url, name_fmt=name_fmt).items():\n yield s\n\n def _get_rtmp_streams(self, video_info_url):\n log.debug(\"XML data path: {0}\".format(video_info_url))\n res = self.session.http.get(video_info_url)\n playlist = self.session.http.xml(res, ignore_ns=True)\n mediafiles = playlist.find(\".//Playlist/VideoEntries/Video/MediaFiles\")\n playpath = mediafiles.find(\"./MediaFile/URL\")\n return {\"live\": RTMPStream(self.session, {\"rtmp\": mediafiles.attrib.get(\"base\"),\n \"playpath\": playpath.text,\n \"live\": True,\n \"swfVfy\": self.swf_url\n })}\n\n def _get_streams(self):\n \"\"\"\n Find all the streams for the ITV url\n :return: Mapping of quality to stream\n \"\"\"\n self.session.http.headers.update({\"User-Agent\": useragents.FIREFOX})\n stream = self._url_re.match(self.url).group(\"stream\")\n video_info = self.video_info()\n video_info_url = video_info.get(\"data-video-id\" if stream.lower() in (\"itv\", \"itv4\") else \"data-html5-playlist\")\n if video_info_url.endswith(\".xml\"):\n return self._get_rtmp_streams(video_info_url)\n else:\n return self._get_html5_streams(video_info_url)\n\n\n__plugin__ = ITVPlayer\n", "path": "src/streamlink/plugins/itvplayer.py"}]} | 1,980 | 861 |
gh_patches_debug_34783 | rasdani/github-patches | git_diff | netbox-community__netbox-2957 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
API Docs - Typo error on /virtualisation/interfaces/
<!--
Before opening a new issue, please search through the existing issues to
see if your topic has already been addressed. Note that you may need to
remove the "is:open" filter from the search bar to include closed issues.
Check the appropriate type for your issue below by placing an x between the
brackets. For assistance with installation issues, or for any other issues
other than those listed below, please raise your topic for discussion on
our mailing list:
https://groups.google.com/forum/#!forum/netbox-discuss
Please note that issues which do not fall under any of the below categories
will be closed. Due to an excessive backlog of feature requests, we are
not currently accepting any proposals which extend NetBox's feature scope.
Do not prepend any sort of tag to your issue's title. An administrator will
review your issue and assign labels as appropriate.
--->
### Issue type
[ ] Feature request <!-- An enhancement of existing functionality -->
[ ] Bug report <!-- Unexpected or erroneous behavior -->
[X] Documentation <!-- A modification to the documentation -->
<!--
Please describe the environment in which you are running NetBox. (Be sure
to verify that you are running the latest stable release of NetBox before
submitting a bug report.) If you are submitting a bug report and have made
any changes to the code base, please first validate that your bug can be
recreated while running an official release.
-->
### Environment
* Python version: 3.6.5
* NetBox version: 2.3.3
<!--
BUG REPORTS must include:
* A list of the steps needed for someone else to reproduce the bug
* A description of the expected and observed behavior
* Any relevant error messages (screenshots may also help)
FEATURE REQUESTS must include:
* A detailed description of the proposed functionality
* A use case for the new feature
* A rough description of any necessary changes to the database schema
* Any relevant third-party libraries which would be needed
-->
### Description
Hello,
On the API documentation it seems there is a little error.
On `/virtualisation/interfaces/` endpoint, method `POST`, the field `device` is marked as mandatory, but the API only respond to the field `virtual_machine`.
The error message returned by the API is also misleading :
```
{
"non_field_errors": [
"An interface must belong to either a device or a virtual machine."
]
}
```
It seems the error is also present on `/virtualisation/interfaces/{id}` methods `PUT` and `PATCH`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `netbox/utilities/custom_inspectors.py`
Content:
```
1 from drf_yasg import openapi
2 from drf_yasg.inspectors import FieldInspector, NotHandled, PaginatorInspector, FilterInspector, SwaggerAutoSchema
3 from rest_framework.fields import ChoiceField
4 from rest_framework.relations import ManyRelatedField
5 from taggit_serializer.serializers import TagListSerializerField
6
7 from extras.api.customfields import CustomFieldsSerializer
8 from utilities.api import ChoiceField, SerializedPKRelatedField, WritableNestedSerializer
9
10
11 class NetBoxSwaggerAutoSchema(SwaggerAutoSchema):
12 def get_request_serializer(self):
13 serializer = super().get_request_serializer()
14
15 if serializer is not None and self.method in self.implicit_body_methods:
16 properties = {}
17 for child_name, child in serializer.fields.items():
18 if isinstance(child, (ChoiceField, WritableNestedSerializer)):
19 properties[child_name] = None
20 elif isinstance(child, ManyRelatedField) and isinstance(child.child_relation, SerializedPKRelatedField):
21 properties[child_name] = None
22
23 if properties:
24 writable_class = type('Writable' + type(serializer).__name__, (type(serializer),), properties)
25 serializer = writable_class()
26
27 return serializer
28
29
30 class SerializedPKRelatedFieldInspector(FieldInspector):
31 def field_to_swagger_object(self, field, swagger_object_type, use_references, **kwargs):
32 SwaggerType, ChildSwaggerType = self._get_partial_types(field, swagger_object_type, use_references, **kwargs)
33 if isinstance(field, SerializedPKRelatedField):
34 return self.probe_field_inspectors(field.serializer(), ChildSwaggerType, use_references)
35
36 return NotHandled
37
38
39 class TagListFieldInspector(FieldInspector):
40 def field_to_swagger_object(self, field, swagger_object_type, use_references, **kwargs):
41 SwaggerType, ChildSwaggerType = self._get_partial_types(field, swagger_object_type, use_references, **kwargs)
42 if isinstance(field, TagListSerializerField):
43 child_schema = self.probe_field_inspectors(field.child, ChildSwaggerType, use_references)
44 return SwaggerType(
45 type=openapi.TYPE_ARRAY,
46 items=child_schema,
47 )
48
49 return NotHandled
50
51
52 class CustomChoiceFieldInspector(FieldInspector):
53 def field_to_swagger_object(self, field, swagger_object_type, use_references, **kwargs):
54 # this returns a callable which extracts title, description and other stuff
55 # https://drf-yasg.readthedocs.io/en/stable/_modules/drf_yasg/inspectors/base.html#FieldInspector._get_partial_types
56 SwaggerType, _ = self._get_partial_types(field, swagger_object_type, use_references, **kwargs)
57
58 if isinstance(field, ChoiceField):
59 value_schema = openapi.Schema(type=openapi.TYPE_INTEGER)
60
61 choices = list(field._choices.keys())
62 if set([None] + choices) == {None, True, False}:
63 # DeviceType.subdevice_role, Device.face and InterfaceConnection.connection_status all need to be
64 # differentiated since they each have subtly different values in their choice keys.
65 # - subdevice_role and connection_status are booleans, although subdevice_role includes None
66 # - face is an integer set {0, 1} which is easily confused with {False, True}
67 schema_type = openapi.TYPE_INTEGER
68 if all(type(x) == bool for x in [c for c in choices if c is not None]):
69 schema_type = openapi.TYPE_BOOLEAN
70 value_schema = openapi.Schema(type=schema_type)
71 value_schema['x-nullable'] = True
72
73 schema = SwaggerType(type=openapi.TYPE_OBJECT, required=["label", "value"], properties={
74 "label": openapi.Schema(type=openapi.TYPE_STRING),
75 "value": value_schema
76 })
77
78 return schema
79
80 elif isinstance(field, CustomFieldsSerializer):
81 schema = SwaggerType(type=openapi.TYPE_OBJECT)
82 return schema
83
84 return NotHandled
85
86
87 class NullableBooleanFieldInspector(FieldInspector):
88 def process_result(self, result, method_name, obj, **kwargs):
89
90 if isinstance(result, openapi.Schema) and isinstance(obj, ChoiceField) and result.type == 'boolean':
91 keys = obj.choices.keys()
92 if set(keys) == {None, True, False}:
93 result['x-nullable'] = True
94 result.type = 'boolean'
95
96 return result
97
98
99 class IdInFilterInspector(FilterInspector):
100 def process_result(self, result, method_name, obj, **kwargs):
101 if isinstance(result, list):
102 params = [p for p in result if isinstance(p, openapi.Parameter) and p.name == 'id__in']
103 for p in params:
104 p.type = 'string'
105
106 return result
107
108
109 class NullablePaginatorInspector(PaginatorInspector):
110 def process_result(self, result, method_name, obj, **kwargs):
111 if method_name == 'get_paginated_response' and isinstance(result, openapi.Schema):
112 next = result.properties['next']
113 if isinstance(next, openapi.Schema):
114 next['x-nullable'] = True
115 previous = result.properties['previous']
116 if isinstance(previous, openapi.Schema):
117 previous['x-nullable'] = True
118
119 return result
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/netbox/utilities/custom_inspectors.py b/netbox/utilities/custom_inspectors.py
--- a/netbox/utilities/custom_inspectors.py
+++ b/netbox/utilities/custom_inspectors.py
@@ -1,14 +1,24 @@
from drf_yasg import openapi
from drf_yasg.inspectors import FieldInspector, NotHandled, PaginatorInspector, FilterInspector, SwaggerAutoSchema
+from drf_yasg.utils import get_serializer_ref_name
from rest_framework.fields import ChoiceField
from rest_framework.relations import ManyRelatedField
from taggit_serializer.serializers import TagListSerializerField
+from dcim.api.serializers import InterfaceSerializer as DCIMInterfaceSerializer
+from virtualization.api.serializers import InterfaceSerializer as VirtualMachineInterfaceSerializer
from extras.api.customfields import CustomFieldsSerializer
from utilities.api import ChoiceField, SerializedPKRelatedField, WritableNestedSerializer
+# this might be ugly, but it limits drf_yasg-specific code to this file
+DCIMInterfaceSerializer.Meta.ref_name = 'DCIMInterface'
+VirtualMachineInterfaceSerializer.Meta.ref_name = 'VirtualMachineInterface'
+
+
class NetBoxSwaggerAutoSchema(SwaggerAutoSchema):
+ writable_serializers = {}
+
def get_request_serializer(self):
serializer = super().get_request_serializer()
@@ -21,7 +31,17 @@
properties[child_name] = None
if properties:
- writable_class = type('Writable' + type(serializer).__name__, (type(serializer),), properties)
+ if type(serializer) not in self.writable_serializers:
+ writable_name = 'Writable' + type(serializer).__name__
+ meta_class = getattr(type(serializer), 'Meta', None)
+ if meta_class:
+ ref_name = 'Writable' + get_serializer_ref_name(serializer)
+ writable_meta = type('Meta', (meta_class,), {'ref_name': ref_name})
+ properties['Meta'] = writable_meta
+
+ self.writable_serializers[type(serializer)] = type(writable_name, (type(serializer),), properties)
+
+ writable_class = self.writable_serializers[type(serializer)]
serializer = writable_class()
return serializer
| {"golden_diff": "diff --git a/netbox/utilities/custom_inspectors.py b/netbox/utilities/custom_inspectors.py\n--- a/netbox/utilities/custom_inspectors.py\n+++ b/netbox/utilities/custom_inspectors.py\n@@ -1,14 +1,24 @@\n from drf_yasg import openapi\n from drf_yasg.inspectors import FieldInspector, NotHandled, PaginatorInspector, FilterInspector, SwaggerAutoSchema\n+from drf_yasg.utils import get_serializer_ref_name\n from rest_framework.fields import ChoiceField\n from rest_framework.relations import ManyRelatedField\n from taggit_serializer.serializers import TagListSerializerField\n \n+from dcim.api.serializers import InterfaceSerializer as DCIMInterfaceSerializer\n+from virtualization.api.serializers import InterfaceSerializer as VirtualMachineInterfaceSerializer\n from extras.api.customfields import CustomFieldsSerializer\n from utilities.api import ChoiceField, SerializedPKRelatedField, WritableNestedSerializer\n \n \n+# this might be ugly, but it limits drf_yasg-specific code to this file\n+DCIMInterfaceSerializer.Meta.ref_name = 'DCIMInterface'\n+VirtualMachineInterfaceSerializer.Meta.ref_name = 'VirtualMachineInterface'\n+\n+\n class NetBoxSwaggerAutoSchema(SwaggerAutoSchema):\n+ writable_serializers = {}\n+\n def get_request_serializer(self):\n serializer = super().get_request_serializer()\n \n@@ -21,7 +31,17 @@\n properties[child_name] = None\n \n if properties:\n- writable_class = type('Writable' + type(serializer).__name__, (type(serializer),), properties)\n+ if type(serializer) not in self.writable_serializers:\n+ writable_name = 'Writable' + type(serializer).__name__\n+ meta_class = getattr(type(serializer), 'Meta', None)\n+ if meta_class:\n+ ref_name = 'Writable' + get_serializer_ref_name(serializer)\n+ writable_meta = type('Meta', (meta_class,), {'ref_name': ref_name})\n+ properties['Meta'] = writable_meta\n+\n+ self.writable_serializers[type(serializer)] = type(writable_name, (type(serializer),), properties)\n+\n+ writable_class = self.writable_serializers[type(serializer)]\n serializer = writable_class()\n \n return serializer\n", "issue": "API Docs - Typo error on /virtualisation/interfaces/\n<!--\r\n Before opening a new issue, please search through the existing issues to\r\n see if your topic has already been addressed. Note that you may need to\r\n remove the \"is:open\" filter from the search bar to include closed issues.\r\n\r\n Check the appropriate type for your issue below by placing an x between the\r\n brackets. For assistance with installation issues, or for any other issues\r\n other than those listed below, please raise your topic for discussion on\r\n our mailing list:\r\n\r\n https://groups.google.com/forum/#!forum/netbox-discuss\r\n\r\n Please note that issues which do not fall under any of the below categories\r\n will be closed. Due to an excessive backlog of feature requests, we are\r\n not currently accepting any proposals which extend NetBox's feature scope.\r\n\r\n Do not prepend any sort of tag to your issue's title. An administrator will\r\n review your issue and assign labels as appropriate.\r\n--->\r\n### Issue type\r\n[ ] Feature request <!-- An enhancement of existing functionality -->\r\n[ ] Bug report <!-- Unexpected or erroneous behavior -->\r\n[X] Documentation <!-- A modification to the documentation -->\r\n\r\n<!--\r\n Please describe the environment in which you are running NetBox. (Be sure\r\n to verify that you are running the latest stable release of NetBox before\r\n submitting a bug report.) If you are submitting a bug report and have made\r\n any changes to the code base, please first validate that your bug can be\r\n recreated while running an official release.\r\n-->\r\n### Environment\r\n* Python version: 3.6.5\r\n* NetBox version: 2.3.3\r\n\r\n<!--\r\n BUG REPORTS must include:\r\n * A list of the steps needed for someone else to reproduce the bug\r\n * A description of the expected and observed behavior\r\n * Any relevant error messages (screenshots may also help)\r\n\r\n FEATURE REQUESTS must include:\r\n * A detailed description of the proposed functionality\r\n * A use case for the new feature\r\n * A rough description of any necessary changes to the database schema\r\n * Any relevant third-party libraries which would be needed\r\n-->\r\n### Description\r\nHello,\r\n\r\nOn the API documentation it seems there is a little error.\r\n\r\nOn `/virtualisation/interfaces/` endpoint, method `POST`, the field `device` is marked as mandatory, but the API only respond to the field `virtual_machine`.\r\n\r\nThe error message returned by the API is also misleading :\r\n```\r\n{\r\n \"non_field_errors\": [\r\n \"An interface must belong to either a device or a virtual machine.\"\r\n ]\r\n}\r\n```\r\n\r\nIt seems the error is also present on `/virtualisation/interfaces/{id}` methods `PUT` and `PATCH`.\n", "before_files": [{"content": "from drf_yasg import openapi\nfrom drf_yasg.inspectors import FieldInspector, NotHandled, PaginatorInspector, FilterInspector, SwaggerAutoSchema\nfrom rest_framework.fields import ChoiceField\nfrom rest_framework.relations import ManyRelatedField\nfrom taggit_serializer.serializers import TagListSerializerField\n\nfrom extras.api.customfields import CustomFieldsSerializer\nfrom utilities.api import ChoiceField, SerializedPKRelatedField, WritableNestedSerializer\n\n\nclass NetBoxSwaggerAutoSchema(SwaggerAutoSchema):\n def get_request_serializer(self):\n serializer = super().get_request_serializer()\n\n if serializer is not None and self.method in self.implicit_body_methods:\n properties = {}\n for child_name, child in serializer.fields.items():\n if isinstance(child, (ChoiceField, WritableNestedSerializer)):\n properties[child_name] = None\n elif isinstance(child, ManyRelatedField) and isinstance(child.child_relation, SerializedPKRelatedField):\n properties[child_name] = None\n\n if properties:\n writable_class = type('Writable' + type(serializer).__name__, (type(serializer),), properties)\n serializer = writable_class()\n\n return serializer\n\n\nclass SerializedPKRelatedFieldInspector(FieldInspector):\n def field_to_swagger_object(self, field, swagger_object_type, use_references, **kwargs):\n SwaggerType, ChildSwaggerType = self._get_partial_types(field, swagger_object_type, use_references, **kwargs)\n if isinstance(field, SerializedPKRelatedField):\n return self.probe_field_inspectors(field.serializer(), ChildSwaggerType, use_references)\n\n return NotHandled\n\n\nclass TagListFieldInspector(FieldInspector):\n def field_to_swagger_object(self, field, swagger_object_type, use_references, **kwargs):\n SwaggerType, ChildSwaggerType = self._get_partial_types(field, swagger_object_type, use_references, **kwargs)\n if isinstance(field, TagListSerializerField):\n child_schema = self.probe_field_inspectors(field.child, ChildSwaggerType, use_references)\n return SwaggerType(\n type=openapi.TYPE_ARRAY,\n items=child_schema,\n )\n\n return NotHandled\n\n\nclass CustomChoiceFieldInspector(FieldInspector):\n def field_to_swagger_object(self, field, swagger_object_type, use_references, **kwargs):\n # this returns a callable which extracts title, description and other stuff\n # https://drf-yasg.readthedocs.io/en/stable/_modules/drf_yasg/inspectors/base.html#FieldInspector._get_partial_types\n SwaggerType, _ = self._get_partial_types(field, swagger_object_type, use_references, **kwargs)\n\n if isinstance(field, ChoiceField):\n value_schema = openapi.Schema(type=openapi.TYPE_INTEGER)\n\n choices = list(field._choices.keys())\n if set([None] + choices) == {None, True, False}:\n # DeviceType.subdevice_role, Device.face and InterfaceConnection.connection_status all need to be\n # differentiated since they each have subtly different values in their choice keys.\n # - subdevice_role and connection_status are booleans, although subdevice_role includes None\n # - face is an integer set {0, 1} which is easily confused with {False, True}\n schema_type = openapi.TYPE_INTEGER\n if all(type(x) == bool for x in [c for c in choices if c is not None]):\n schema_type = openapi.TYPE_BOOLEAN\n value_schema = openapi.Schema(type=schema_type)\n value_schema['x-nullable'] = True\n\n schema = SwaggerType(type=openapi.TYPE_OBJECT, required=[\"label\", \"value\"], properties={\n \"label\": openapi.Schema(type=openapi.TYPE_STRING),\n \"value\": value_schema\n })\n\n return schema\n\n elif isinstance(field, CustomFieldsSerializer):\n schema = SwaggerType(type=openapi.TYPE_OBJECT)\n return schema\n\n return NotHandled\n\n\nclass NullableBooleanFieldInspector(FieldInspector):\n def process_result(self, result, method_name, obj, **kwargs):\n\n if isinstance(result, openapi.Schema) and isinstance(obj, ChoiceField) and result.type == 'boolean':\n keys = obj.choices.keys()\n if set(keys) == {None, True, False}:\n result['x-nullable'] = True\n result.type = 'boolean'\n\n return result\n\n\nclass IdInFilterInspector(FilterInspector):\n def process_result(self, result, method_name, obj, **kwargs):\n if isinstance(result, list):\n params = [p for p in result if isinstance(p, openapi.Parameter) and p.name == 'id__in']\n for p in params:\n p.type = 'string'\n\n return result\n\n\nclass NullablePaginatorInspector(PaginatorInspector):\n def process_result(self, result, method_name, obj, **kwargs):\n if method_name == 'get_paginated_response' and isinstance(result, openapi.Schema):\n next = result.properties['next']\n if isinstance(next, openapi.Schema):\n next['x-nullable'] = True\n previous = result.properties['previous']\n if isinstance(previous, openapi.Schema):\n previous['x-nullable'] = True\n\n return result\n", "path": "netbox/utilities/custom_inspectors.py"}], "after_files": [{"content": "from drf_yasg import openapi\nfrom drf_yasg.inspectors import FieldInspector, NotHandled, PaginatorInspector, FilterInspector, SwaggerAutoSchema\nfrom drf_yasg.utils import get_serializer_ref_name\nfrom rest_framework.fields import ChoiceField\nfrom rest_framework.relations import ManyRelatedField\nfrom taggit_serializer.serializers import TagListSerializerField\n\nfrom dcim.api.serializers import InterfaceSerializer as DCIMInterfaceSerializer\nfrom virtualization.api.serializers import InterfaceSerializer as VirtualMachineInterfaceSerializer\nfrom extras.api.customfields import CustomFieldsSerializer\nfrom utilities.api import ChoiceField, SerializedPKRelatedField, WritableNestedSerializer\n\n\n# this might be ugly, but it limits drf_yasg-specific code to this file\nDCIMInterfaceSerializer.Meta.ref_name = 'DCIMInterface'\nVirtualMachineInterfaceSerializer.Meta.ref_name = 'VirtualMachineInterface'\n\n\nclass NetBoxSwaggerAutoSchema(SwaggerAutoSchema):\n writable_serializers = {}\n\n def get_request_serializer(self):\n serializer = super().get_request_serializer()\n\n if serializer is not None and self.method in self.implicit_body_methods:\n properties = {}\n for child_name, child in serializer.fields.items():\n if isinstance(child, (ChoiceField, WritableNestedSerializer)):\n properties[child_name] = None\n elif isinstance(child, ManyRelatedField) and isinstance(child.child_relation, SerializedPKRelatedField):\n properties[child_name] = None\n\n if properties:\n if type(serializer) not in self.writable_serializers:\n writable_name = 'Writable' + type(serializer).__name__\n meta_class = getattr(type(serializer), 'Meta', None)\n if meta_class:\n ref_name = 'Writable' + get_serializer_ref_name(serializer)\n writable_meta = type('Meta', (meta_class,), {'ref_name': ref_name})\n properties['Meta'] = writable_meta\n\n self.writable_serializers[type(serializer)] = type(writable_name, (type(serializer),), properties)\n\n writable_class = self.writable_serializers[type(serializer)]\n serializer = writable_class()\n\n return serializer\n\n\nclass SerializedPKRelatedFieldInspector(FieldInspector):\n def field_to_swagger_object(self, field, swagger_object_type, use_references, **kwargs):\n SwaggerType, ChildSwaggerType = self._get_partial_types(field, swagger_object_type, use_references, **kwargs)\n if isinstance(field, SerializedPKRelatedField):\n return self.probe_field_inspectors(field.serializer(), ChildSwaggerType, use_references)\n\n return NotHandled\n\n\nclass TagListFieldInspector(FieldInspector):\n def field_to_swagger_object(self, field, swagger_object_type, use_references, **kwargs):\n SwaggerType, ChildSwaggerType = self._get_partial_types(field, swagger_object_type, use_references, **kwargs)\n if isinstance(field, TagListSerializerField):\n child_schema = self.probe_field_inspectors(field.child, ChildSwaggerType, use_references)\n return SwaggerType(\n type=openapi.TYPE_ARRAY,\n items=child_schema,\n )\n\n return NotHandled\n\n\nclass CustomChoiceFieldInspector(FieldInspector):\n def field_to_swagger_object(self, field, swagger_object_type, use_references, **kwargs):\n # this returns a callable which extracts title, description and other stuff\n # https://drf-yasg.readthedocs.io/en/stable/_modules/drf_yasg/inspectors/base.html#FieldInspector._get_partial_types\n SwaggerType, _ = self._get_partial_types(field, swagger_object_type, use_references, **kwargs)\n\n if isinstance(field, ChoiceField):\n value_schema = openapi.Schema(type=openapi.TYPE_INTEGER)\n\n choices = list(field._choices.keys())\n if set([None] + choices) == {None, True, False}:\n # DeviceType.subdevice_role, Device.face and InterfaceConnection.connection_status all need to be\n # differentiated since they each have subtly different values in their choice keys.\n # - subdevice_role and connection_status are booleans, although subdevice_role includes None\n # - face is an integer set {0, 1} which is easily confused with {False, True}\n schema_type = openapi.TYPE_INTEGER\n if all(type(x) == bool for x in [c for c in choices if c is not None]):\n schema_type = openapi.TYPE_BOOLEAN\n value_schema = openapi.Schema(type=schema_type)\n value_schema['x-nullable'] = True\n\n schema = SwaggerType(type=openapi.TYPE_OBJECT, required=[\"label\", \"value\"], properties={\n \"label\": openapi.Schema(type=openapi.TYPE_STRING),\n \"value\": value_schema\n })\n\n return schema\n\n elif isinstance(field, CustomFieldsSerializer):\n schema = SwaggerType(type=openapi.TYPE_OBJECT)\n return schema\n\n return NotHandled\n\n\nclass NullableBooleanFieldInspector(FieldInspector):\n def process_result(self, result, method_name, obj, **kwargs):\n\n if isinstance(result, openapi.Schema) and isinstance(obj, ChoiceField) and result.type == 'boolean':\n keys = obj.choices.keys()\n if set(keys) == {None, True, False}:\n result['x-nullable'] = True\n result.type = 'boolean'\n\n return result\n\n\nclass IdInFilterInspector(FilterInspector):\n def process_result(self, result, method_name, obj, **kwargs):\n if isinstance(result, list):\n params = [p for p in result if isinstance(p, openapi.Parameter) and p.name == 'id__in']\n for p in params:\n p.type = 'string'\n\n return result\n\n\nclass NullablePaginatorInspector(PaginatorInspector):\n def process_result(self, result, method_name, obj, **kwargs):\n if method_name == 'get_paginated_response' and isinstance(result, openapi.Schema):\n next = result.properties['next']\n if isinstance(next, openapi.Schema):\n next['x-nullable'] = True\n previous = result.properties['previous']\n if isinstance(previous, openapi.Schema):\n previous['x-nullable'] = True\n\n return result\n", "path": "netbox/utilities/custom_inspectors.py"}]} | 2,182 | 475 |
gh_patches_debug_31188 | rasdani/github-patches | git_diff | saulpw__visidata-812 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[dev 2.0+] disp_rstatus_fmt causes halting exception on GraphSheet
**Small description**
I modified the `disp_rstatus_fmt` to provide information I require when working with sheets, however when using GraphSheet vd crashes with the following exception. I currently have to use `^g` to see the same information as I would be able to see using this `disp_rstatus_fmt`.
`options.disp_rstatus_fmt = ' {sheet.cursorRowIndex}/{sheet.nRows}[{sheet.nSelectedRows}] {sheet.rowtype} '`
`AttributeError: 'GraphSheet' object has no attribute 'cursorRowIndex'`
**Expected result**
GraphSheet to load
FWIW, I was also attempting to make a % counter in `disp_rstatus_fmt` however I get some odd other exceptions (this isn't really an issue so I haven't opened a separate bug, however this was my original use-case).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `visidata/utils.py`
Content:
```
1 import operator
2
3 'Various helper classes and functions.'
4
5 __all__ = ['AlwaysDict', 'AttrDict', 'moveListItem', 'namedlist', 'classproperty']
6
7
8 class AlwaysDict(dict):
9 'return same val for all keys'
10 def __init__(self, val, **kwargs):
11 super().__init__(**kwargs)
12 self._val = val
13
14 def __getitem__(self, k):
15 return self._val
16
17 class AttrDict(dict):
18 'Augment a dict with more convenient .attr syntax. not-present keys return None.'
19 def __getattr__(self, k):
20 try:
21 return self[k]
22 except KeyError:
23 if k.startswith("__"):
24 raise AttributeError
25 return None
26
27 def __setattr__(self, k, v):
28 self[k] = v
29
30 def __dir__(self):
31 return self.keys()
32
33
34 class classproperty(property):
35 def __get__(self, cls, obj):
36 return classmethod(self.fget).__get__(None, obj or cls)()
37
38
39 def moveListItem(L, fromidx, toidx):
40 "Move element within list `L` and return element's new index."
41 toidx = min(max(toidx, 0), len(L)-1)
42 fromidx = min(max(fromidx, 0), len(L)-1)
43 r = L.pop(fromidx)
44 L.insert(toidx, r)
45 return toidx
46
47
48 class OnExit:
49 '"with OnExit(func, ...):" calls func(...) when the context is exited'
50 def __init__(self, func, *args, **kwargs):
51 self.func = func
52 self.args = args
53 self.kwargs = kwargs
54
55 def __enter__(self):
56 return self
57
58 def __exit__(self, exc_type, exc_value, exc_traceback):
59 try:
60 self.func(*self.args, **self.kwargs)
61 except Exception as e:
62 vd.exceptionCaught(e)
63
64
65 def itemsetter(i):
66 def g(obj, v):
67 obj[i] = v
68 return g
69
70
71 def namedlist(objname, fieldnames):
72 'like namedtuple but editable'
73 class NamedListTemplate(list):
74 __name__ = objname
75 _fields = fieldnames
76
77 def __init__(self, L=None, **kwargs):
78 if L is None:
79 L = [None]*len(self._fields)
80 elif len(L) < len(self._fields):
81 L.extend([None]*(len(self._fields) - len(L)))
82 super().__init__(L)
83 for k, v in kwargs.items():
84 setattr(self, k, v)
85
86 def __getattr__(self, k):
87 'to enable .fieldname'
88 try:
89 return self[self._fields.index(k)]
90 except ValueError:
91 raise AttributeError
92
93 def __setattr__(self, k, v):
94 'to enable .fieldname ='
95 try:
96 self[self._fields.index(k)] = v
97 except ValueError:
98 super().__setattr__(k, v)
99
100 return NamedListTemplate
101
```
Path: `visidata/statusbar.py`
Content:
```
1 import collections
2 import curses
3
4 from visidata import vd, VisiData, BaseSheet, Sheet, ColumnItem, Column, RowColorizer, options, colors, wrmap, clipdraw, ExpectedException, update_attr, theme
5
6
7 __all__ = ['StatusSheet', 'status', 'error', 'fail', 'warning', 'debug']
8
9 theme('disp_rstatus_fmt', ' {sheet.longname} {sheet.nRows:9d} {sheet.rowtype} ', 'right-side status format string')
10 theme('disp_status_fmt', '{sheet.shortcut}› {sheet.name}| ', 'status line prefix')
11 theme('disp_lstatus_max', 0, 'maximum length of left status line')
12 theme('disp_status_sep', ' | ', 'separator between statuses')
13
14 theme('color_keystrokes', 'white', 'color of input keystrokes on status line')
15 theme('color_status', 'bold', 'status line color')
16 theme('color_error', 'red', 'error message color')
17 theme('color_warning', 'yellow', 'warning message color')
18 theme('color_top_status', 'underline', 'top window status bar color')
19 theme('color_active_status', 'bold', ' active window status bar color')
20 theme('color_inactive_status', '8', 'inactive window status bar color')
21
22 BaseSheet.init('longname', lambda: '')
23
24 vd.beforeExecHooks.append(lambda sheet, cmd, args, ks: setattr(sheet, 'longname', cmd.longname))
25
26 @VisiData.lazy_property
27 def statuses(vd):
28 return collections.OrderedDict() # (priority, statusmsg) -> num_repeats; shown until next action
29
30
31 @VisiData.lazy_property
32 def statusHistory(vd):
33 return list() # list of [priority, statusmsg, repeats] for all status messages ever
34
35
36 @VisiData.global_api
37 def status(self, *args, priority=0):
38 'Display *args* on status until next action.'
39 if not args:
40 return True
41
42 k = (priority, tuple(map(str, args)))
43 self.statuses[k] = self.statuses.get(k, 0) + 1
44
45 if self.statusHistory:
46 prevpri, prevargs, prevn = self.statusHistory[-1]
47 if prevpri == priority and prevargs == args:
48 self.statusHistory[-1][2] += 1
49 return True
50
51 self.statusHistory.append([priority, args, 1])
52 return True
53
54 @VisiData.global_api
55 def error(vd, *args):
56 'Abort with ExpectedException, and display *args* on status as an error.'
57 vd.status(*args, priority=3)
58 raise ExpectedException(args[0] if args else '')
59
60 @VisiData.global_api
61 def fail(vd, *args):
62 'Abort with ExpectedException, and display *args* on status as a warning.'
63 vd.status(*args, priority=2)
64 raise ExpectedException(args[0] if args else '')
65
66 @VisiData.global_api
67 def warning(vd, *args):
68 'Display *args* on status as a warning.'
69 vd.status(*args, priority=1)
70
71 @VisiData.global_api
72 def debug(vd, *args, **kwargs):
73 'Display *args* on status if options.debug is set.'
74 if options.debug:
75 return vd.status(*args, **kwargs)
76
77 def middleTruncate(s, w):
78 if len(s) <= w:
79 return s
80 return s[:w] + options.disp_truncator + s[-w:]
81
82
83 def composeStatus(msgparts, n):
84 msg = '; '.join(wrmap(str, msgparts))
85 if n > 1:
86 msg = '[%sx] %s' % (n, msg)
87 return msg
88
89
90 @BaseSheet.api
91 def leftStatus(sheet):
92 'Return left side of status bar for this sheet. Overridable.'
93 return options.disp_status_fmt.format(sheet=sheet, vd=vd)
94
95
96 @VisiData.api
97 def drawLeftStatus(vd, scr, vs):
98 'Draw left side of status bar.'
99 cattr = colors.get_color('color_status')
100 active = (vs is vd.sheets[0]) if vd.sheets else False # active sheet
101 if active:
102 cattr = update_attr(cattr, colors.color_active_status, 0)
103 else:
104 cattr = update_attr(cattr, colors.color_inactive_status, 0)
105
106 if scr is vd.winTop:
107 cattr = update_attr(cattr, colors.color_top_status, 1)
108
109 attr = cattr.attr
110 error_attr = update_attr(cattr, colors.color_error, 1).attr
111 warn_attr = update_attr(cattr, colors.color_warning, 2).attr
112 sep = options.disp_status_sep
113
114 x = 0
115 y = vs.windowHeight-1 # status for each window
116 try:
117 lstatus = vs.leftStatus()
118 maxwidth = options.disp_lstatus_max
119 if maxwidth > 0:
120 lstatus = middleTruncate(lstatus, maxwidth//2)
121
122 x = clipdraw(scr, y, 0, lstatus, attr, w=vs.windowWidth-1)
123
124 vd.onMouse(scr, y, 0, 1, x,
125 BUTTON1_PRESSED='sheets',
126 BUTTON3_PRESSED='rename-sheet',
127 BUTTON3_CLICKED='rename-sheet')
128 except Exception as e:
129 vd.exceptionCaught(e)
130
131 if not active:
132 return
133
134 one = False
135 for (pri, msgparts), n in sorted(vd.statuses.items(), key=lambda k: -k[0][0]):
136 try:
137 if x > vs.windowWidth:
138 break
139 if one: # any messages already:
140 x += clipdraw(scr, y, x, sep, attr, w=vs.windowWidth-x)
141 one = True
142 msg = composeStatus(msgparts, n)
143
144 if pri == 3: msgattr = error_attr
145 elif pri == 2: msgattr = warn_attr
146 elif pri == 1: msgattr = warn_attr
147 else: msgattr = attr
148 x += clipdraw(scr, y, x, msg, msgattr, w=vs.windowWidth-x)
149 except Exception as e:
150 vd.exceptionCaught(e)
151
152
153 @VisiData.api
154 def rightStatus(vd, sheet):
155 'Return right side of status bar. Overrideable.'
156 return options.disp_rstatus_fmt.format(sheet=sheet, vd=vd)
157
158
159 @VisiData.api
160 def drawRightStatus(vd, scr, vs):
161 'Draw right side of status bar. Return length displayed.'
162 rightx = vs.windowWidth
163
164 ret = 0
165 statcolors = [
166 (vd.rightStatus(vs), 'color_status'),
167 ]
168
169 active = vs is vd.activeSheet
170
171 if active:
172 statcolors.append((vd.keystrokes or '', 'color_keystrokes'))
173
174 if vs.currentThreads:
175 statcolors.insert(0, vd.checkMemoryUsage())
176 if vs.progresses:
177 gerund = vs.progresses[0].gerund
178 else:
179 gerund = 'processing'
180 statcolors.insert(1, (' %s %s…' % (vs.progressPct, gerund), 'color_working'))
181
182 if active and vd.currentReplay:
183 statcolors.insert(0, (vd.replayStatus, 'color_status_replay'))
184
185 for rstatcolor in statcolors:
186 if rstatcolor:
187 try:
188 rstatus, coloropt = rstatcolor
189 rstatus = ' '+rstatus
190 cattr = colors.get_color(coloropt)
191 if scr is vd.winTop:
192 cattr = update_attr(cattr, colors.color_top_status, 0)
193 if active:
194 cattr = update_attr(cattr, colors.color_active_status, 0)
195 else:
196 cattr = update_attr(cattr, colors.color_inactive_status, 0)
197 statuslen = clipdraw(scr, vs.windowHeight-1, rightx, rstatus, cattr.attr, w=vs.windowWidth-1, rtl=True)
198 rightx -= statuslen
199 ret += statuslen
200 except Exception as e:
201 vd.exceptionCaught(e)
202
203 if scr:
204 curses.doupdate()
205 return ret
206
207
208 class StatusSheet(Sheet):
209 precious = False
210 rowtype = 'statuses' # rowdef: (priority, args, nrepeats)
211 columns = [
212 ColumnItem('priority', 0, type=int, width=0),
213 ColumnItem('nrepeats', 2, type=int, width=0),
214 ColumnItem('args', 1, width=0),
215 Column('message', getter=lambda col,row: composeStatus(row[1], row[2])),
216 ]
217 colorizers = [
218 RowColorizer(1, 'color_error', lambda s,c,r,v: r and r[0] == 3),
219 RowColorizer(1, 'color_warning', lambda s,c,r,v: r and r[0] in [1,2]),
220 ]
221
222 def reload(self):
223 self.rows = self.source
224
225
226 @VisiData.property
227 def statusHistorySheet(vd):
228 return StatusSheet("status_history", source=vd.statusHistory[::-1]) # in reverse order
229
230
231 BaseSheet.addCommand('^P', 'open-statuses', 'vd.push(vd.statusHistorySheet)', 'open Status History')
232
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/visidata/statusbar.py b/visidata/statusbar.py
--- a/visidata/statusbar.py
+++ b/visidata/statusbar.py
@@ -1,7 +1,7 @@
import collections
import curses
-from visidata import vd, VisiData, BaseSheet, Sheet, ColumnItem, Column, RowColorizer, options, colors, wrmap, clipdraw, ExpectedException, update_attr, theme
+from visidata import vd, VisiData, BaseSheet, Sheet, ColumnItem, Column, RowColorizer, options, colors, wrmap, clipdraw, ExpectedException, update_attr, theme, MissingAttrFormatter
__all__ = ['StatusSheet', 'status', 'error', 'fail', 'warning', 'debug']
@@ -153,7 +153,7 @@
@VisiData.api
def rightStatus(vd, sheet):
'Return right side of status bar. Overrideable.'
- return options.disp_rstatus_fmt.format(sheet=sheet, vd=vd)
+ return MissingAttrFormatter().format(sheet.options.disp_rstatus_fmt, sheet=sheet, vd=vd)
@VisiData.api
diff --git a/visidata/utils.py b/visidata/utils.py
--- a/visidata/utils.py
+++ b/visidata/utils.py
@@ -1,8 +1,9 @@
import operator
+import string
'Various helper classes and functions.'
-__all__ = ['AlwaysDict', 'AttrDict', 'moveListItem', 'namedlist', 'classproperty']
+__all__ = ['AlwaysDict', 'AttrDict', 'moveListItem', 'namedlist', 'classproperty', 'MissingAttrFormatter']
class AlwaysDict(dict):
@@ -98,3 +99,17 @@
super().__setattr__(k, v)
return NamedListTemplate
+
+class MissingAttrFormatter(string.Formatter):
+ "formats {} fields with `''`, that would normally result in a raised KeyError or AttributeError; intended for user customisable format strings."
+ def get_field(self, field_name, *args, **kwargs):
+ try:
+ return super().get_field(field_name, *args, **kwargs)
+ except (KeyError, AttributeError):
+ return (None, field_name)
+
+ def format_field(self, value, format_spec):
+ # value is missing
+ if not value:
+ return ''
+ return super().format_field(value, format_spec)
| {"golden_diff": "diff --git a/visidata/statusbar.py b/visidata/statusbar.py\n--- a/visidata/statusbar.py\n+++ b/visidata/statusbar.py\n@@ -1,7 +1,7 @@\n import collections\n import curses\n \n-from visidata import vd, VisiData, BaseSheet, Sheet, ColumnItem, Column, RowColorizer, options, colors, wrmap, clipdraw, ExpectedException, update_attr, theme\n+from visidata import vd, VisiData, BaseSheet, Sheet, ColumnItem, Column, RowColorizer, options, colors, wrmap, clipdraw, ExpectedException, update_attr, theme, MissingAttrFormatter\n \n \n __all__ = ['StatusSheet', 'status', 'error', 'fail', 'warning', 'debug']\n@@ -153,7 +153,7 @@\n @VisiData.api\n def rightStatus(vd, sheet):\n 'Return right side of status bar. Overrideable.'\n- return options.disp_rstatus_fmt.format(sheet=sheet, vd=vd)\n+ return MissingAttrFormatter().format(sheet.options.disp_rstatus_fmt, sheet=sheet, vd=vd)\n \n \n @VisiData.api\ndiff --git a/visidata/utils.py b/visidata/utils.py\n--- a/visidata/utils.py\n+++ b/visidata/utils.py\n@@ -1,8 +1,9 @@\n import operator\n+import string\n \n 'Various helper classes and functions.'\n \n-__all__ = ['AlwaysDict', 'AttrDict', 'moveListItem', 'namedlist', 'classproperty']\n+__all__ = ['AlwaysDict', 'AttrDict', 'moveListItem', 'namedlist', 'classproperty', 'MissingAttrFormatter']\n \n \n class AlwaysDict(dict):\n@@ -98,3 +99,17 @@\n super().__setattr__(k, v)\n \n return NamedListTemplate\n+\n+class MissingAttrFormatter(string.Formatter):\n+ \"formats {} fields with `''`, that would normally result in a raised KeyError or AttributeError; intended for user customisable format strings.\"\n+ def get_field(self, field_name, *args, **kwargs):\n+ try:\n+ return super().get_field(field_name, *args, **kwargs)\n+ except (KeyError, AttributeError):\n+ return (None, field_name)\n+\n+ def format_field(self, value, format_spec):\n+ # value is missing\n+ if not value:\n+ return ''\n+ return super().format_field(value, format_spec)\n", "issue": "[dev 2.0+] disp_rstatus_fmt causes halting exception on GraphSheet\n**Small description**\r\nI modified the `disp_rstatus_fmt` to provide information I require when working with sheets, however when using GraphSheet vd crashes with the following exception. I currently have to use `^g` to see the same information as I would be able to see using this `disp_rstatus_fmt`.\r\n\r\n`options.disp_rstatus_fmt = ' {sheet.cursorRowIndex}/{sheet.nRows}[{sheet.nSelectedRows}] {sheet.rowtype} '`\r\n\r\n`AttributeError: 'GraphSheet' object has no attribute 'cursorRowIndex'`\r\n\r\n**Expected result**\r\nGraphSheet to load\r\n\r\nFWIW, I was also attempting to make a % counter in `disp_rstatus_fmt` however I get some odd other exceptions (this isn't really an issue so I haven't opened a separate bug, however this was my original use-case).\n", "before_files": [{"content": "import operator\n\n'Various helper classes and functions.'\n\n__all__ = ['AlwaysDict', 'AttrDict', 'moveListItem', 'namedlist', 'classproperty']\n\n\nclass AlwaysDict(dict):\n 'return same val for all keys'\n def __init__(self, val, **kwargs):\n super().__init__(**kwargs)\n self._val = val\n\n def __getitem__(self, k):\n return self._val\n\nclass AttrDict(dict):\n 'Augment a dict with more convenient .attr syntax. not-present keys return None.'\n def __getattr__(self, k):\n try:\n return self[k]\n except KeyError:\n if k.startswith(\"__\"):\n raise AttributeError\n return None\n\n def __setattr__(self, k, v):\n self[k] = v\n\n def __dir__(self):\n return self.keys()\n\n\nclass classproperty(property):\n def __get__(self, cls, obj):\n return classmethod(self.fget).__get__(None, obj or cls)()\n\n\ndef moveListItem(L, fromidx, toidx):\n \"Move element within list `L` and return element's new index.\"\n toidx = min(max(toidx, 0), len(L)-1)\n fromidx = min(max(fromidx, 0), len(L)-1)\n r = L.pop(fromidx)\n L.insert(toidx, r)\n return toidx\n\n\nclass OnExit:\n '\"with OnExit(func, ...):\" calls func(...) when the context is exited'\n def __init__(self, func, *args, **kwargs):\n self.func = func\n self.args = args\n self.kwargs = kwargs\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, exc_traceback):\n try:\n self.func(*self.args, **self.kwargs)\n except Exception as e:\n vd.exceptionCaught(e)\n\n\ndef itemsetter(i):\n def g(obj, v):\n obj[i] = v\n return g\n\n\ndef namedlist(objname, fieldnames):\n 'like namedtuple but editable'\n class NamedListTemplate(list):\n __name__ = objname\n _fields = fieldnames\n\n def __init__(self, L=None, **kwargs):\n if L is None:\n L = [None]*len(self._fields)\n elif len(L) < len(self._fields):\n L.extend([None]*(len(self._fields) - len(L)))\n super().__init__(L)\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n def __getattr__(self, k):\n 'to enable .fieldname'\n try:\n return self[self._fields.index(k)]\n except ValueError:\n raise AttributeError\n\n def __setattr__(self, k, v):\n 'to enable .fieldname ='\n try:\n self[self._fields.index(k)] = v\n except ValueError:\n super().__setattr__(k, v)\n\n return NamedListTemplate\n", "path": "visidata/utils.py"}, {"content": "import collections\nimport curses\n\nfrom visidata import vd, VisiData, BaseSheet, Sheet, ColumnItem, Column, RowColorizer, options, colors, wrmap, clipdraw, ExpectedException, update_attr, theme\n\n\n__all__ = ['StatusSheet', 'status', 'error', 'fail', 'warning', 'debug']\n\ntheme('disp_rstatus_fmt', ' {sheet.longname} {sheet.nRows:9d} {sheet.rowtype} ', 'right-side status format string')\ntheme('disp_status_fmt', '{sheet.shortcut}\u203a {sheet.name}| ', 'status line prefix')\ntheme('disp_lstatus_max', 0, 'maximum length of left status line')\ntheme('disp_status_sep', ' | ', 'separator between statuses')\n\ntheme('color_keystrokes', 'white', 'color of input keystrokes on status line')\ntheme('color_status', 'bold', 'status line color')\ntheme('color_error', 'red', 'error message color')\ntheme('color_warning', 'yellow', 'warning message color')\ntheme('color_top_status', 'underline', 'top window status bar color')\ntheme('color_active_status', 'bold', ' active window status bar color')\ntheme('color_inactive_status', '8', 'inactive window status bar color')\n\nBaseSheet.init('longname', lambda: '')\n\nvd.beforeExecHooks.append(lambda sheet, cmd, args, ks: setattr(sheet, 'longname', cmd.longname))\n\[email protected]_property\ndef statuses(vd):\n return collections.OrderedDict() # (priority, statusmsg) -> num_repeats; shown until next action\n\n\[email protected]_property\ndef statusHistory(vd):\n return list() # list of [priority, statusmsg, repeats] for all status messages ever\n\n\[email protected]_api\ndef status(self, *args, priority=0):\n 'Display *args* on status until next action.'\n if not args:\n return True\n\n k = (priority, tuple(map(str, args)))\n self.statuses[k] = self.statuses.get(k, 0) + 1\n\n if self.statusHistory:\n prevpri, prevargs, prevn = self.statusHistory[-1]\n if prevpri == priority and prevargs == args:\n self.statusHistory[-1][2] += 1\n return True\n\n self.statusHistory.append([priority, args, 1])\n return True\n\[email protected]_api\ndef error(vd, *args):\n 'Abort with ExpectedException, and display *args* on status as an error.'\n vd.status(*args, priority=3)\n raise ExpectedException(args[0] if args else '')\n\[email protected]_api\ndef fail(vd, *args):\n 'Abort with ExpectedException, and display *args* on status as a warning.'\n vd.status(*args, priority=2)\n raise ExpectedException(args[0] if args else '')\n\[email protected]_api\ndef warning(vd, *args):\n 'Display *args* on status as a warning.'\n vd.status(*args, priority=1)\n\[email protected]_api\ndef debug(vd, *args, **kwargs):\n 'Display *args* on status if options.debug is set.'\n if options.debug:\n return vd.status(*args, **kwargs)\n\ndef middleTruncate(s, w):\n if len(s) <= w:\n return s\n return s[:w] + options.disp_truncator + s[-w:]\n\n\ndef composeStatus(msgparts, n):\n msg = '; '.join(wrmap(str, msgparts))\n if n > 1:\n msg = '[%sx] %s' % (n, msg)\n return msg\n\n\[email protected]\ndef leftStatus(sheet):\n 'Return left side of status bar for this sheet. Overridable.'\n return options.disp_status_fmt.format(sheet=sheet, vd=vd)\n\n\[email protected]\ndef drawLeftStatus(vd, scr, vs):\n 'Draw left side of status bar.'\n cattr = colors.get_color('color_status')\n active = (vs is vd.sheets[0]) if vd.sheets else False # active sheet\n if active:\n cattr = update_attr(cattr, colors.color_active_status, 0)\n else:\n cattr = update_attr(cattr, colors.color_inactive_status, 0)\n\n if scr is vd.winTop:\n cattr = update_attr(cattr, colors.color_top_status, 1)\n\n attr = cattr.attr\n error_attr = update_attr(cattr, colors.color_error, 1).attr\n warn_attr = update_attr(cattr, colors.color_warning, 2).attr\n sep = options.disp_status_sep\n\n x = 0\n y = vs.windowHeight-1 # status for each window\n try:\n lstatus = vs.leftStatus()\n maxwidth = options.disp_lstatus_max\n if maxwidth > 0:\n lstatus = middleTruncate(lstatus, maxwidth//2)\n\n x = clipdraw(scr, y, 0, lstatus, attr, w=vs.windowWidth-1)\n\n vd.onMouse(scr, y, 0, 1, x,\n BUTTON1_PRESSED='sheets',\n BUTTON3_PRESSED='rename-sheet',\n BUTTON3_CLICKED='rename-sheet')\n except Exception as e:\n vd.exceptionCaught(e)\n\n if not active:\n return\n\n one = False\n for (pri, msgparts), n in sorted(vd.statuses.items(), key=lambda k: -k[0][0]):\n try:\n if x > vs.windowWidth:\n break\n if one: # any messages already:\n x += clipdraw(scr, y, x, sep, attr, w=vs.windowWidth-x)\n one = True\n msg = composeStatus(msgparts, n)\n\n if pri == 3: msgattr = error_attr\n elif pri == 2: msgattr = warn_attr\n elif pri == 1: msgattr = warn_attr\n else: msgattr = attr\n x += clipdraw(scr, y, x, msg, msgattr, w=vs.windowWidth-x)\n except Exception as e:\n vd.exceptionCaught(e)\n\n\[email protected]\ndef rightStatus(vd, sheet):\n 'Return right side of status bar. Overrideable.'\n return options.disp_rstatus_fmt.format(sheet=sheet, vd=vd)\n\n\[email protected]\ndef drawRightStatus(vd, scr, vs):\n 'Draw right side of status bar. Return length displayed.'\n rightx = vs.windowWidth\n\n ret = 0\n statcolors = [\n (vd.rightStatus(vs), 'color_status'),\n ]\n\n active = vs is vd.activeSheet\n\n if active:\n statcolors.append((vd.keystrokes or '', 'color_keystrokes'))\n\n if vs.currentThreads:\n statcolors.insert(0, vd.checkMemoryUsage())\n if vs.progresses:\n gerund = vs.progresses[0].gerund\n else:\n gerund = 'processing'\n statcolors.insert(1, (' %s %s\u2026' % (vs.progressPct, gerund), 'color_working'))\n\n if active and vd.currentReplay:\n statcolors.insert(0, (vd.replayStatus, 'color_status_replay'))\n\n for rstatcolor in statcolors:\n if rstatcolor:\n try:\n rstatus, coloropt = rstatcolor\n rstatus = ' '+rstatus\n cattr = colors.get_color(coloropt)\n if scr is vd.winTop:\n cattr = update_attr(cattr, colors.color_top_status, 0)\n if active:\n cattr = update_attr(cattr, colors.color_active_status, 0)\n else:\n cattr = update_attr(cattr, colors.color_inactive_status, 0)\n statuslen = clipdraw(scr, vs.windowHeight-1, rightx, rstatus, cattr.attr, w=vs.windowWidth-1, rtl=True)\n rightx -= statuslen\n ret += statuslen\n except Exception as e:\n vd.exceptionCaught(e)\n\n if scr:\n curses.doupdate()\n return ret\n\n\nclass StatusSheet(Sheet):\n precious = False\n rowtype = 'statuses' # rowdef: (priority, args, nrepeats)\n columns = [\n ColumnItem('priority', 0, type=int, width=0),\n ColumnItem('nrepeats', 2, type=int, width=0),\n ColumnItem('args', 1, width=0),\n Column('message', getter=lambda col,row: composeStatus(row[1], row[2])),\n ]\n colorizers = [\n RowColorizer(1, 'color_error', lambda s,c,r,v: r and r[0] == 3),\n RowColorizer(1, 'color_warning', lambda s,c,r,v: r and r[0] in [1,2]),\n ]\n\n def reload(self):\n self.rows = self.source\n\n\[email protected]\ndef statusHistorySheet(vd):\n return StatusSheet(\"status_history\", source=vd.statusHistory[::-1]) # in reverse order\n\n\nBaseSheet.addCommand('^P', 'open-statuses', 'vd.push(vd.statusHistorySheet)', 'open Status History')\n", "path": "visidata/statusbar.py"}], "after_files": [{"content": "import operator\nimport string\n\n'Various helper classes and functions.'\n\n__all__ = ['AlwaysDict', 'AttrDict', 'moveListItem', 'namedlist', 'classproperty', 'MissingAttrFormatter']\n\n\nclass AlwaysDict(dict):\n 'return same val for all keys'\n def __init__(self, val, **kwargs):\n super().__init__(**kwargs)\n self._val = val\n\n def __getitem__(self, k):\n return self._val\n\nclass AttrDict(dict):\n 'Augment a dict with more convenient .attr syntax. not-present keys return None.'\n def __getattr__(self, k):\n try:\n return self[k]\n except KeyError:\n if k.startswith(\"__\"):\n raise AttributeError\n return None\n\n def __setattr__(self, k, v):\n self[k] = v\n\n def __dir__(self):\n return self.keys()\n\n\nclass classproperty(property):\n def __get__(self, cls, obj):\n return classmethod(self.fget).__get__(None, obj or cls)()\n\n\ndef moveListItem(L, fromidx, toidx):\n \"Move element within list `L` and return element's new index.\"\n toidx = min(max(toidx, 0), len(L)-1)\n fromidx = min(max(fromidx, 0), len(L)-1)\n r = L.pop(fromidx)\n L.insert(toidx, r)\n return toidx\n\n\nclass OnExit:\n '\"with OnExit(func, ...):\" calls func(...) when the context is exited'\n def __init__(self, func, *args, **kwargs):\n self.func = func\n self.args = args\n self.kwargs = kwargs\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, exc_traceback):\n try:\n self.func(*self.args, **self.kwargs)\n except Exception as e:\n vd.exceptionCaught(e)\n\n\ndef itemsetter(i):\n def g(obj, v):\n obj[i] = v\n return g\n\n\ndef namedlist(objname, fieldnames):\n 'like namedtuple but editable'\n class NamedListTemplate(list):\n __name__ = objname\n _fields = fieldnames\n\n def __init__(self, L=None, **kwargs):\n if L is None:\n L = [None]*len(self._fields)\n elif len(L) < len(self._fields):\n L.extend([None]*(len(self._fields) - len(L)))\n super().__init__(L)\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n def __getattr__(self, k):\n 'to enable .fieldname'\n try:\n return self[self._fields.index(k)]\n except ValueError:\n raise AttributeError\n\n def __setattr__(self, k, v):\n 'to enable .fieldname ='\n try:\n self[self._fields.index(k)] = v\n except ValueError:\n super().__setattr__(k, v)\n\n return NamedListTemplate\n\nclass MissingAttrFormatter(string.Formatter):\n \"formats {} fields with `''`, that would normally result in a raised KeyError or AttributeError; intended for user customisable format strings.\"\n def get_field(self, field_name, *args, **kwargs):\n try:\n return super().get_field(field_name, *args, **kwargs)\n except (KeyError, AttributeError):\n return (None, field_name)\n\n def format_field(self, value, format_spec):\n # value is missing\n if not value:\n return ''\n return super().format_field(value, format_spec)\n", "path": "visidata/utils.py"}, {"content": "import collections\nimport curses\n\nfrom visidata import vd, VisiData, BaseSheet, Sheet, ColumnItem, Column, RowColorizer, options, colors, wrmap, clipdraw, ExpectedException, update_attr, theme, MissingAttrFormatter\n\n\n__all__ = ['StatusSheet', 'status', 'error', 'fail', 'warning', 'debug']\n\ntheme('disp_rstatus_fmt', ' {sheet.longname} {sheet.nRows:9d} {sheet.rowtype} ', 'right-side status format string')\ntheme('disp_status_fmt', '{sheet.shortcut}\u203a {sheet.name}| ', 'status line prefix')\ntheme('disp_lstatus_max', 0, 'maximum length of left status line')\ntheme('disp_status_sep', ' | ', 'separator between statuses')\n\ntheme('color_keystrokes', 'white', 'color of input keystrokes on status line')\ntheme('color_status', 'bold', 'status line color')\ntheme('color_error', 'red', 'error message color')\ntheme('color_warning', 'yellow', 'warning message color')\ntheme('color_top_status', 'underline', 'top window status bar color')\ntheme('color_active_status', 'bold', ' active window status bar color')\ntheme('color_inactive_status', '8', 'inactive window status bar color')\n\nBaseSheet.init('longname', lambda: '')\n\nvd.beforeExecHooks.append(lambda sheet, cmd, args, ks: setattr(sheet, 'longname', cmd.longname))\n\[email protected]_property\ndef statuses(vd):\n return collections.OrderedDict() # (priority, statusmsg) -> num_repeats; shown until next action\n\n\[email protected]_property\ndef statusHistory(vd):\n return list() # list of [priority, statusmsg, repeats] for all status messages ever\n\n\[email protected]_api\ndef status(self, *args, priority=0):\n 'Display *args* on status until next action.'\n if not args:\n return True\n\n k = (priority, tuple(map(str, args)))\n self.statuses[k] = self.statuses.get(k, 0) + 1\n\n if self.statusHistory:\n prevpri, prevargs, prevn = self.statusHistory[-1]\n if prevpri == priority and prevargs == args:\n self.statusHistory[-1][2] += 1\n return True\n\n self.statusHistory.append([priority, args, 1])\n return True\n\[email protected]_api\ndef error(vd, *args):\n 'Abort with ExpectedException, and display *args* on status as an error.'\n vd.status(*args, priority=3)\n raise ExpectedException(args[0] if args else '')\n\[email protected]_api\ndef fail(vd, *args):\n 'Abort with ExpectedException, and display *args* on status as a warning.'\n vd.status(*args, priority=2)\n raise ExpectedException(args[0] if args else '')\n\[email protected]_api\ndef warning(vd, *args):\n 'Display *args* on status as a warning.'\n vd.status(*args, priority=1)\n\[email protected]_api\ndef debug(vd, *args, **kwargs):\n 'Display *args* on status if options.debug is set.'\n if options.debug:\n return vd.status(*args, **kwargs)\n\ndef middleTruncate(s, w):\n if len(s) <= w:\n return s\n return s[:w] + options.disp_truncator + s[-w:]\n\n\ndef composeStatus(msgparts, n):\n msg = '; '.join(wrmap(str, msgparts))\n if n > 1:\n msg = '[%sx] %s' % (n, msg)\n return msg\n\n\[email protected]\ndef leftStatus(sheet):\n 'Return left side of status bar for this sheet. Overridable.'\n return options.disp_status_fmt.format(sheet=sheet, vd=vd)\n\n\[email protected]\ndef drawLeftStatus(vd, scr, vs):\n 'Draw left side of status bar.'\n cattr = colors.get_color('color_status')\n active = (vs is vd.sheets[0]) if vd.sheets else False # active sheet\n if active:\n cattr = update_attr(cattr, colors.color_active_status, 0)\n else:\n cattr = update_attr(cattr, colors.color_inactive_status, 0)\n\n if scr is vd.winTop:\n cattr = update_attr(cattr, colors.color_top_status, 1)\n\n attr = cattr.attr\n error_attr = update_attr(cattr, colors.color_error, 1).attr\n warn_attr = update_attr(cattr, colors.color_warning, 2).attr\n sep = options.disp_status_sep\n\n x = 0\n y = vs.windowHeight-1 # status for each window\n try:\n lstatus = vs.leftStatus()\n maxwidth = options.disp_lstatus_max\n if maxwidth > 0:\n lstatus = middleTruncate(lstatus, maxwidth//2)\n\n x = clipdraw(scr, y, 0, lstatus, attr, w=vs.windowWidth-1)\n\n vd.onMouse(scr, y, 0, 1, x,\n BUTTON1_PRESSED='sheets',\n BUTTON3_PRESSED='rename-sheet',\n BUTTON3_CLICKED='rename-sheet')\n except Exception as e:\n vd.exceptionCaught(e)\n\n if not active:\n return\n\n one = False\n for (pri, msgparts), n in sorted(vd.statuses.items(), key=lambda k: -k[0][0]):\n try:\n if x > vs.windowWidth:\n break\n if one: # any messages already:\n x += clipdraw(scr, y, x, sep, attr, w=vs.windowWidth-x)\n one = True\n msg = composeStatus(msgparts, n)\n\n if pri == 3: msgattr = error_attr\n elif pri == 2: msgattr = warn_attr\n elif pri == 1: msgattr = warn_attr\n else: msgattr = attr\n x += clipdraw(scr, y, x, msg, msgattr, w=vs.windowWidth-x)\n except Exception as e:\n vd.exceptionCaught(e)\n\n\[email protected]\ndef rightStatus(vd, sheet):\n 'Return right side of status bar. Overrideable.'\n return MissingAttrFormatter().format(sheet.options.disp_rstatus_fmt, sheet=sheet, vd=vd)\n\n\[email protected]\ndef drawRightStatus(vd, scr, vs):\n 'Draw right side of status bar. Return length displayed.'\n rightx = vs.windowWidth\n\n ret = 0\n statcolors = [\n (vd.rightStatus(vs), 'color_status'),\n ]\n\n active = vs is vd.activeSheet\n\n if active:\n statcolors.append((vd.keystrokes or '', 'color_keystrokes'))\n\n if vs.currentThreads:\n statcolors.insert(0, vd.checkMemoryUsage())\n if vs.progresses:\n gerund = vs.progresses[0].gerund\n else:\n gerund = 'processing'\n statcolors.insert(1, (' %s %s\u2026' % (vs.progressPct, gerund), 'color_working'))\n\n if active and vd.currentReplay:\n statcolors.insert(0, (vd.replayStatus, 'color_status_replay'))\n\n for rstatcolor in statcolors:\n if rstatcolor:\n try:\n rstatus, coloropt = rstatcolor\n rstatus = ' '+rstatus\n cattr = colors.get_color(coloropt)\n if scr is vd.winTop:\n cattr = update_attr(cattr, colors.color_top_status, 0)\n if active:\n cattr = update_attr(cattr, colors.color_active_status, 0)\n else:\n cattr = update_attr(cattr, colors.color_inactive_status, 0)\n statuslen = clipdraw(scr, vs.windowHeight-1, rightx, rstatus, cattr.attr, w=vs.windowWidth-1, rtl=True)\n rightx -= statuslen\n ret += statuslen\n except Exception as e:\n vd.exceptionCaught(e)\n\n if scr:\n curses.doupdate()\n return ret\n\n\nclass StatusSheet(Sheet):\n precious = False\n rowtype = 'statuses' # rowdef: (priority, args, nrepeats)\n columns = [\n ColumnItem('priority', 0, type=int, width=0),\n ColumnItem('nrepeats', 2, type=int, width=0),\n ColumnItem('args', 1, width=0),\n Column('message', getter=lambda col,row: composeStatus(row[1], row[2])),\n ]\n colorizers = [\n RowColorizer(1, 'color_error', lambda s,c,r,v: r and r[0] == 3),\n RowColorizer(1, 'color_warning', lambda s,c,r,v: r and r[0] in [1,2]),\n ]\n\n def reload(self):\n self.rows = self.source\n\n\[email protected]\ndef statusHistorySheet(vd):\n return StatusSheet(\"status_history\", source=vd.statusHistory[::-1]) # in reverse order\n\n\nBaseSheet.addCommand('^P', 'open-statuses', 'vd.push(vd.statusHistorySheet)', 'open Status History')\n", "path": "visidata/statusbar.py"}]} | 3,976 | 536 |
gh_patches_debug_11333 | rasdani/github-patches | git_diff | searx__searx-3479 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Using single apostrophe causes a "search error"
<!-- PLEASE FILL THESE FIELDS, IT REALLY HELPS THE MAINTAINERS OF SEARX -->
**Version of Searx, commit number if you are using on master branch and stipulate if you forked Searx**
1.1.0-27-bf0a583f
<!-- If you are running on master branch using git execute this command
in order to fetch the latest commit ID:
```
git log -1
```
If you are using searx-docker then look at the bottom of the Searx page
and check for the version after "Powered by searx"
Please also stipulate if you are using a forked version of Searx and
include a link to the fork source code.
-->
**How did you install Searx?**
searx-docker. Specifically using docker-compose and running ``docker-compose up``.
Compose file:
```yaml
version: '3.3'
services:
searx:
volumes:
- /mnt/storage1/configs/searx:/etc/searx
ports:
- 9999:8080
environment:
- BASE_URL=http://localhost:9999/
image: searx/searx
restart: unless-stopped
```
<!-- Did you install Searx using the official wiki or using searx-docker
or manually by executing the searx/webapp.py file? -->
**What happened?**
Using a single apostrophe in a word (for example, "What's") causes a search error
<!-- A clear and concise description of what the bug is. -->
**How To Reproduce**
Use any single apostrophe in a search query
<!-- How can we reproduce this issue? (as minimally and as precisely as possible) -->
**Expected behavior**
Should return results, instead of an error.
<!-- A clear and concise description of what you expected to happen. -->
**Screenshots & Logs**
https://imgur.com/a/E2b4mNX
https://paste.ee/p/e5gS8
**Additional context**
It's also somewhat infuriating that it also returns from a result page to the home page when this happens, meaning I have to retype my query (though that's another issue entirely).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/plugins/search_operators.py`
Content:
```
1 import shlex
2 import string
3
4 from flask_babel import gettext
5
6 name = gettext("Search operators")
7 description = gettext("""Filter results using hyphen, site: and -site:.
8 Please note that you might get less results with the additional filtering.""")
9 default_on = False
10
11
12 def on_result(request, search, result):
13 q = search.search_query.query
14 qs = shlex.split(q)
15 spitems = [x.lower() for x in qs if ' ' in x]
16 mitems = [x.lower() for x in qs if x.startswith('-')]
17 siteitems = [x.lower() for x in qs if x.startswith('site:')]
18 msiteitems = [x.lower() for x in qs if x.startswith('-site:')]
19 url, title, content = (
20 result["url"].lower(),
21 result["title"].lower(),
22 (result.get("content").lower() if result.get("content") else '')
23 )
24 if all((x not in title or x not in content) for x in spitems):
25 return False
26 if all((x in title or x in content) for x in mitems):
27 return False
28 if all(x not in url for x in siteitems):
29 return False
30 if all(x in url for x in msiteitems):
31 return False
32 return True
33
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/searx/plugins/search_operators.py b/searx/plugins/search_operators.py
--- a/searx/plugins/search_operators.py
+++ b/searx/plugins/search_operators.py
@@ -11,7 +11,11 @@
def on_result(request, search, result):
q = search.search_query.query
- qs = shlex.split(q)
+ # WARN: shlex.quote is designed only for Unix shells and may be vulnerable
+ # to command injection on non-POSIX compliant shells (Windows)
+ # https://docs.python.org/3/library/shlex.html#shlex.quote
+ squote = shlex.quote(q)
+ qs = shlex.split(squote)
spitems = [x.lower() for x in qs if ' ' in x]
mitems = [x.lower() for x in qs if x.startswith('-')]
siteitems = [x.lower() for x in qs if x.startswith('site:')]
| {"golden_diff": "diff --git a/searx/plugins/search_operators.py b/searx/plugins/search_operators.py\n--- a/searx/plugins/search_operators.py\n+++ b/searx/plugins/search_operators.py\n@@ -11,7 +11,11 @@\n \n def on_result(request, search, result):\n q = search.search_query.query\n- qs = shlex.split(q)\n+ # WARN: shlex.quote is designed only for Unix shells and may be vulnerable\n+ # to command injection on non-POSIX compliant shells (Windows)\n+ # https://docs.python.org/3/library/shlex.html#shlex.quote\n+ squote = shlex.quote(q)\n+ qs = shlex.split(squote)\n spitems = [x.lower() for x in qs if ' ' in x]\n mitems = [x.lower() for x in qs if x.startswith('-')]\n siteitems = [x.lower() for x in qs if x.startswith('site:')]\n", "issue": "Using single apostrophe causes a \"search error\"\n<!-- PLEASE FILL THESE FIELDS, IT REALLY HELPS THE MAINTAINERS OF SEARX -->\r\n\r\n**Version of Searx, commit number if you are using on master branch and stipulate if you forked Searx**\r\n1.1.0-27-bf0a583f\r\n<!-- If you are running on master branch using git execute this command\r\nin order to fetch the latest commit ID:\r\n```\r\ngit log -1\r\n``` \r\nIf you are using searx-docker then look at the bottom of the Searx page\r\nand check for the version after \"Powered by searx\"\r\n\r\nPlease also stipulate if you are using a forked version of Searx and\r\ninclude a link to the fork source code.\r\n-->\r\n**How did you install Searx?**\r\nsearx-docker. Specifically using docker-compose and running ``docker-compose up``.\r\nCompose file:\r\n```yaml\r\nversion: '3.3'\r\nservices:\r\n searx:\r\n volumes:\r\n - /mnt/storage1/configs/searx:/etc/searx\r\n ports:\r\n - 9999:8080\r\n environment:\r\n - BASE_URL=http://localhost:9999/\r\n image: searx/searx\r\n restart: unless-stopped\r\n```\r\n<!-- Did you install Searx using the official wiki or using searx-docker\r\nor manually by executing the searx/webapp.py file? -->\r\n**What happened?**\r\nUsing a single apostrophe in a word (for example, \"What's\") causes a search error\r\n<!-- A clear and concise description of what the bug is. -->\r\n**How To Reproduce**\r\nUse any single apostrophe in a search query\r\n<!-- How can we reproduce this issue? (as minimally and as precisely as possible) -->\r\n**Expected behavior**\r\nShould return results, instead of an error.\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n**Screenshots & Logs**\r\nhttps://imgur.com/a/E2b4mNX\r\nhttps://paste.ee/p/e5gS8\r\n\r\n**Additional context**\r\nIt's also somewhat infuriating that it also returns from a result page to the home page when this happens, meaning I have to retype my query (though that's another issue entirely).\n", "before_files": [{"content": "import shlex\nimport string\n\nfrom flask_babel import gettext\n\nname = gettext(\"Search operators\")\ndescription = gettext(\"\"\"Filter results using hyphen, site: and -site:.\nPlease note that you might get less results with the additional filtering.\"\"\")\ndefault_on = False\n\n\ndef on_result(request, search, result):\n q = search.search_query.query\n qs = shlex.split(q)\n spitems = [x.lower() for x in qs if ' ' in x]\n mitems = [x.lower() for x in qs if x.startswith('-')]\n siteitems = [x.lower() for x in qs if x.startswith('site:')]\n msiteitems = [x.lower() for x in qs if x.startswith('-site:')]\n url, title, content = (\n result[\"url\"].lower(),\n result[\"title\"].lower(),\n (result.get(\"content\").lower() if result.get(\"content\") else '')\n )\n if all((x not in title or x not in content) for x in spitems):\n return False\n if all((x in title or x in content) for x in mitems):\n return False\n if all(x not in url for x in siteitems):\n return False\n if all(x in url for x in msiteitems):\n return False\n return True\n", "path": "searx/plugins/search_operators.py"}], "after_files": [{"content": "import shlex\nimport string\n\nfrom flask_babel import gettext\n\nname = gettext(\"Search operators\")\ndescription = gettext(\"\"\"Filter results using hyphen, site: and -site:.\nPlease note that you might get less results with the additional filtering.\"\"\")\ndefault_on = False\n\n\ndef on_result(request, search, result):\n q = search.search_query.query\n # WARN: shlex.quote is designed only for Unix shells and may be vulnerable\n # to command injection on non-POSIX compliant shells (Windows)\n # https://docs.python.org/3/library/shlex.html#shlex.quote\n squote = shlex.quote(q)\n qs = shlex.split(squote)\n spitems = [x.lower() for x in qs if ' ' in x]\n mitems = [x.lower() for x in qs if x.startswith('-')]\n siteitems = [x.lower() for x in qs if x.startswith('site:')]\n msiteitems = [x.lower() for x in qs if x.startswith('-site:')]\n url, title, content = (\n result[\"url\"].lower(),\n result[\"title\"].lower(),\n (result.get(\"content\").lower() if result.get(\"content\") else '')\n )\n if all((x not in title or x not in content) for x in spitems):\n return False\n if all((x in title or x in content) for x in mitems):\n return False\n if all(x not in url for x in siteitems):\n return False\n if all(x in url for x in msiteitems):\n return False\n return True\n", "path": "searx/plugins/search_operators.py"}]} | 1,100 | 212 |
gh_patches_debug_38414 | rasdani/github-patches | git_diff | pymeasure__pymeasure-284 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Rename pyvisa imports
In the upcoming pyvisa version, using `import visa` is being [deprecated](https://github.com/pyvisa/pyvisa/pull/448) to avoid a name clash with another package. We need to fix that in our codebase, too. This could also be behind the version comparison issues like #130
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pymeasure/adapters/visa.py`
Content:
```
1 #
2 # This file is part of the PyMeasure package.
3 #
4 # Copyright (c) 2013-2020 PyMeasure Developers
5 #
6 # Permission is hereby granted, free of charge, to any person obtaining a copy
7 # of this software and associated documentation files (the "Software"), to deal
8 # in the Software without restriction, including without limitation the rights
9 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 # copies of the Software, and to permit persons to whom the Software is
11 # furnished to do so, subject to the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be included in
14 # all copies or substantial portions of the Software.
15 #
16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 # THE SOFTWARE.
23 #
24
25 import logging
26
27 import copy
28 import visa
29 import numpy as np
30 from pkg_resources import parse_version
31
32 from .adapter import Adapter
33
34 log = logging.getLogger(__name__)
35 log.addHandler(logging.NullHandler())
36
37
38 # noinspection PyPep8Naming,PyUnresolvedReferences
39 class VISAAdapter(Adapter):
40 """ Adapter class for the VISA library using PyVISA to communicate
41 with instruments.
42
43 :param resource: VISA resource name that identifies the address
44 :param visa_library: VisaLibrary Instance, path of the VISA library or VisaLibrary spec string (@py or @ni).
45 if not given, the default for the platform will be used.
46 :param kwargs: Any valid key-word arguments for constructing a PyVISA instrument
47 """
48
49 def __init__(self, resourceName, visa_library='', **kwargs):
50 if not VISAAdapter.has_supported_version():
51 raise NotImplementedError("Please upgrade PyVISA to version 1.8 or later.")
52
53 if isinstance(resourceName, int):
54 resourceName = "GPIB0::%d::INSTR" % resourceName
55 super(VISAAdapter, self).__init__()
56 self.resource_name = resourceName
57 self.manager = visa.ResourceManager(visa_library)
58 safeKeywords = ['resource_name', 'timeout',
59 'chunk_size', 'lock', 'query_delay', 'send_end',
60 'values_format', 'read_termination', 'write_termination']
61 kwargsCopy = copy.deepcopy(kwargs)
62 for key in kwargsCopy:
63 if key not in safeKeywords:
64 kwargs.pop(key)
65 self.connection = self.manager.get_instrument(
66 resourceName,
67 **kwargs
68 )
69
70 @staticmethod
71 def has_supported_version():
72 """ Returns True if the PyVISA version is greater than 1.8 """
73 if hasattr(visa, '__version__'):
74 return parse_version(visa.__version__) >= parse_version('1.8')
75 else:
76 return False
77
78 def __repr__(self):
79 return "<VISAAdapter(resource='%s')>" % self.connection.resourceName
80
81 def write(self, command):
82 """ Writes a command to the instrument
83
84 :param command: SCPI command string to be sent to the instrument
85 """
86 self.connection.write(command)
87
88 def read(self):
89 """ Reads until the buffer is empty and returns the resulting
90 ASCII response
91
92 :returns: String ASCII response of the instrument.
93 """
94 return self.connection.read()
95
96 def read_bytes(self, size):
97 """ Reads specified number of bytes from the buffer and returns
98 the resulting ASCII response
99
100 :param size: Number of bytes to read from the buffer
101 :returns: String ASCII response of the instrument.
102 """
103 return self.connection.read_bytes(size)
104
105 def ask(self, command):
106 """ Writes the command to the instrument and returns the resulting
107 ASCII response
108
109 :param command: SCPI command string to be sent to the instrument
110 :returns: String ASCII response of the instrument
111 """
112 return self.connection.query(command)
113
114 def ask_values(self, command):
115 """ Writes a command to the instrument and returns a list of formatted
116 values from the result. The format of the return is configurated by
117 self.config().
118
119 :param command: SCPI command to be sent to the instrument
120 :returns: Formatted response of the instrument.
121 """
122 return self.connection.query_values(command)
123
124 def binary_values(self, command, header_bytes=0, dtype=np.float32):
125 """ Returns a numpy array from a query for binary data
126
127 :param command: SCPI command to be sent to the instrument
128 :param header_bytes: Integer number of bytes to ignore in header
129 :param dtype: The NumPy data type to format the values with
130 :returns: NumPy array of values
131 """
132 self.connection.write(command)
133 binary = self.connection.read_raw()
134 header, data = binary[:header_bytes], binary[header_bytes:]
135 return np.fromstring(data, dtype=dtype)
136
137 def config(self, is_binary=False, datatype='str',
138 container=np.array, converter='s',
139 separator=',', is_big_endian=False):
140 """ Configurate the format of data transfer to and from the instrument.
141
142 :param is_binary: If True, data is in binary format, otherwise ASCII.
143 :param datatype: Data type.
144 :param container: Return format. Any callable/type that takes an iterable.
145 :param converter: String converter, used in dealing with ASCII data.
146 :param separator: Delimiter of a series of data in ASCII.
147 :param is_big_endian: Endianness.
148 """
149 self.connection.values_format.is_binary = is_binary
150 self.connection.values_format.datatype = datatype
151 self.connection.values_format.container = container
152 self.connection.values_format.converter = converter
153 self.connection.values_format.separator = separator
154 self.connection.values_format.is_big_endian = is_big_endian
155
156 def wait_for_srq(self, timeout=25, delay=0.1):
157 """ Blocks until a SRQ, and leaves the bit high
158
159 :param timeout: Timeout duration in seconds
160 :param delay: Time delay between checking SRQ in seconds
161 """
162 self.connection.wait_for_srq(timeout * 1000)
163
```
Path: `pymeasure/instruments/resources.py`
Content:
```
1 #
2 # This file is part of the PyMeasure package.
3 #
4 # Copyright (c) 2013-2020 PyMeasure Developers
5 #
6 # Permission is hereby granted, free of charge, to any person obtaining a copy
7 # of this software and associated documentation files (the "Software"), to deal
8 # in the Software without restriction, including without limitation the rights
9 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 # copies of the Software, and to permit persons to whom the Software is
11 # furnished to do so, subject to the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be included in
14 # all copies or substantial portions of the Software.
15 #
16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 # THE SOFTWARE.
23 #
24
25 import visa
26
27
28 def list_resources():
29 """
30 Prints the available resources, and returns a list of VISA resource names
31
32 .. code-block:: python
33
34 resources = list_resources()
35 #prints (e.g.)
36 #0 : GPIB0::22::INSTR : Agilent Technologies,34410A,******
37 #1 : GPIB0::26::INSTR : Keithley Instruments Inc., Model 2612, *****
38 dmm = Agilent34410(resources[0])
39
40 """
41 rm = visa.ResourceManager()
42 instrs = rm.list_resources()
43 for n, instr in enumerate(instrs):
44 # trying to catch errors in comunication
45 try:
46 res = rm.open_resource(instr)
47 # try to avoid errors from *idn?
48 try:
49 # noinspection PyUnresolvedReferences
50 idn = res.ask('*idn?')[:-1]
51 except visa.Error:
52 idn = "Not known"
53 finally:
54 res.close()
55 print(n, ":", instr, ":", idn)
56 except visa.VisaIOError as e:
57 print(n, ":", instr, ":", "Visa IO Error: check connections")
58 print(e)
59 rm.close()
60 return instrs
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pymeasure/adapters/visa.py b/pymeasure/adapters/visa.py
--- a/pymeasure/adapters/visa.py
+++ b/pymeasure/adapters/visa.py
@@ -25,7 +25,7 @@
import logging
import copy
-import visa
+import pyvisa
import numpy as np
from pkg_resources import parse_version
@@ -54,7 +54,7 @@
resourceName = "GPIB0::%d::INSTR" % resourceName
super(VISAAdapter, self).__init__()
self.resource_name = resourceName
- self.manager = visa.ResourceManager(visa_library)
+ self.manager = pyvisa.ResourceManager(visa_library)
safeKeywords = ['resource_name', 'timeout',
'chunk_size', 'lock', 'query_delay', 'send_end',
'values_format', 'read_termination', 'write_termination']
@@ -70,8 +70,8 @@
@staticmethod
def has_supported_version():
""" Returns True if the PyVISA version is greater than 1.8 """
- if hasattr(visa, '__version__'):
- return parse_version(visa.__version__) >= parse_version('1.8')
+ if hasattr(pyvisa, '__version__'):
+ return parse_version(pyvisa.__version__) >= parse_version('1.8')
else:
return False
diff --git a/pymeasure/instruments/resources.py b/pymeasure/instruments/resources.py
--- a/pymeasure/instruments/resources.py
+++ b/pymeasure/instruments/resources.py
@@ -22,7 +22,7 @@
# THE SOFTWARE.
#
-import visa
+import pyvisa
def list_resources():
@@ -38,7 +38,7 @@
dmm = Agilent34410(resources[0])
"""
- rm = visa.ResourceManager()
+ rm = pyvisa.ResourceManager()
instrs = rm.list_resources()
for n, instr in enumerate(instrs):
# trying to catch errors in comunication
@@ -48,12 +48,12 @@
try:
# noinspection PyUnresolvedReferences
idn = res.ask('*idn?')[:-1]
- except visa.Error:
+ except pyvisa.Error:
idn = "Not known"
finally:
res.close()
print(n, ":", instr, ":", idn)
- except visa.VisaIOError as e:
+ except pyvisa.VisaIOError as e:
print(n, ":", instr, ":", "Visa IO Error: check connections")
print(e)
rm.close()
| {"golden_diff": "diff --git a/pymeasure/adapters/visa.py b/pymeasure/adapters/visa.py\n--- a/pymeasure/adapters/visa.py\n+++ b/pymeasure/adapters/visa.py\n@@ -25,7 +25,7 @@\n import logging\n \n import copy\n-import visa\n+import pyvisa\n import numpy as np\n from pkg_resources import parse_version\n \n@@ -54,7 +54,7 @@\n resourceName = \"GPIB0::%d::INSTR\" % resourceName\n super(VISAAdapter, self).__init__()\n self.resource_name = resourceName\n- self.manager = visa.ResourceManager(visa_library)\n+ self.manager = pyvisa.ResourceManager(visa_library)\n safeKeywords = ['resource_name', 'timeout',\n 'chunk_size', 'lock', 'query_delay', 'send_end',\n 'values_format', 'read_termination', 'write_termination']\n@@ -70,8 +70,8 @@\n @staticmethod\n def has_supported_version():\n \"\"\" Returns True if the PyVISA version is greater than 1.8 \"\"\"\n- if hasattr(visa, '__version__'):\n- return parse_version(visa.__version__) >= parse_version('1.8')\n+ if hasattr(pyvisa, '__version__'):\n+ return parse_version(pyvisa.__version__) >= parse_version('1.8')\n else:\n return False\n \ndiff --git a/pymeasure/instruments/resources.py b/pymeasure/instruments/resources.py\n--- a/pymeasure/instruments/resources.py\n+++ b/pymeasure/instruments/resources.py\n@@ -22,7 +22,7 @@\n # THE SOFTWARE.\n #\n \n-import visa\n+import pyvisa\n \n \n def list_resources():\n@@ -38,7 +38,7 @@\n dmm = Agilent34410(resources[0])\n \n \"\"\"\n- rm = visa.ResourceManager()\n+ rm = pyvisa.ResourceManager()\n instrs = rm.list_resources()\n for n, instr in enumerate(instrs):\n # trying to catch errors in comunication\n@@ -48,12 +48,12 @@\n try:\n # noinspection PyUnresolvedReferences\n idn = res.ask('*idn?')[:-1]\n- except visa.Error:\n+ except pyvisa.Error:\n idn = \"Not known\"\n finally:\n res.close()\n print(n, \":\", instr, \":\", idn)\n- except visa.VisaIOError as e:\n+ except pyvisa.VisaIOError as e:\n print(n, \":\", instr, \":\", \"Visa IO Error: check connections\")\n print(e)\n rm.close()\n", "issue": "Rename pyvisa imports\nIn the upcoming pyvisa version, using `import visa` is being [deprecated](https://github.com/pyvisa/pyvisa/pull/448) to avoid a name clash with another package. We need to fix that in our codebase, too. This could also be behind the version comparison issues like #130\n", "before_files": [{"content": "#\n# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2020 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\nimport logging\n\nimport copy\nimport visa\nimport numpy as np\nfrom pkg_resources import parse_version\n\nfrom .adapter import Adapter\n\nlog = logging.getLogger(__name__)\nlog.addHandler(logging.NullHandler())\n\n\n# noinspection PyPep8Naming,PyUnresolvedReferences\nclass VISAAdapter(Adapter):\n \"\"\" Adapter class for the VISA library using PyVISA to communicate\n with instruments.\n\n :param resource: VISA resource name that identifies the address\n :param visa_library: VisaLibrary Instance, path of the VISA library or VisaLibrary spec string (@py or @ni).\n if not given, the default for the platform will be used.\n :param kwargs: Any valid key-word arguments for constructing a PyVISA instrument\n \"\"\"\n\n def __init__(self, resourceName, visa_library='', **kwargs):\n if not VISAAdapter.has_supported_version():\n raise NotImplementedError(\"Please upgrade PyVISA to version 1.8 or later.\")\n\n if isinstance(resourceName, int):\n resourceName = \"GPIB0::%d::INSTR\" % resourceName\n super(VISAAdapter, self).__init__()\n self.resource_name = resourceName\n self.manager = visa.ResourceManager(visa_library)\n safeKeywords = ['resource_name', 'timeout',\n 'chunk_size', 'lock', 'query_delay', 'send_end',\n 'values_format', 'read_termination', 'write_termination']\n kwargsCopy = copy.deepcopy(kwargs)\n for key in kwargsCopy:\n if key not in safeKeywords:\n kwargs.pop(key)\n self.connection = self.manager.get_instrument(\n resourceName,\n **kwargs\n )\n\n @staticmethod\n def has_supported_version():\n \"\"\" Returns True if the PyVISA version is greater than 1.8 \"\"\"\n if hasattr(visa, '__version__'):\n return parse_version(visa.__version__) >= parse_version('1.8')\n else:\n return False\n\n def __repr__(self):\n return \"<VISAAdapter(resource='%s')>\" % self.connection.resourceName\n\n def write(self, command):\n \"\"\" Writes a command to the instrument\n\n :param command: SCPI command string to be sent to the instrument\n \"\"\"\n self.connection.write(command)\n\n def read(self):\n \"\"\" Reads until the buffer is empty and returns the resulting\n ASCII response\n\n :returns: String ASCII response of the instrument.\n \"\"\"\n return self.connection.read()\n\n def read_bytes(self, size):\n \"\"\" Reads specified number of bytes from the buffer and returns\n the resulting ASCII response\n\n :param size: Number of bytes to read from the buffer\n :returns: String ASCII response of the instrument.\n \"\"\"\n return self.connection.read_bytes(size)\n\n def ask(self, command):\n \"\"\" Writes the command to the instrument and returns the resulting\n ASCII response\n\n :param command: SCPI command string to be sent to the instrument\n :returns: String ASCII response of the instrument\n \"\"\"\n return self.connection.query(command)\n\n def ask_values(self, command):\n \"\"\" Writes a command to the instrument and returns a list of formatted\n values from the result. The format of the return is configurated by\n self.config().\n\n :param command: SCPI command to be sent to the instrument\n :returns: Formatted response of the instrument.\n \"\"\"\n return self.connection.query_values(command)\n\n def binary_values(self, command, header_bytes=0, dtype=np.float32):\n \"\"\" Returns a numpy array from a query for binary data\n\n :param command: SCPI command to be sent to the instrument\n :param header_bytes: Integer number of bytes to ignore in header\n :param dtype: The NumPy data type to format the values with\n :returns: NumPy array of values\n \"\"\"\n self.connection.write(command)\n binary = self.connection.read_raw()\n header, data = binary[:header_bytes], binary[header_bytes:]\n return np.fromstring(data, dtype=dtype)\n\n def config(self, is_binary=False, datatype='str',\n container=np.array, converter='s',\n separator=',', is_big_endian=False):\n \"\"\" Configurate the format of data transfer to and from the instrument.\n\n :param is_binary: If True, data is in binary format, otherwise ASCII.\n :param datatype: Data type.\n :param container: Return format. Any callable/type that takes an iterable.\n :param converter: String converter, used in dealing with ASCII data.\n :param separator: Delimiter of a series of data in ASCII.\n :param is_big_endian: Endianness.\n \"\"\"\n self.connection.values_format.is_binary = is_binary\n self.connection.values_format.datatype = datatype\n self.connection.values_format.container = container\n self.connection.values_format.converter = converter\n self.connection.values_format.separator = separator\n self.connection.values_format.is_big_endian = is_big_endian\n\n def wait_for_srq(self, timeout=25, delay=0.1):\n \"\"\" Blocks until a SRQ, and leaves the bit high\n\n :param timeout: Timeout duration in seconds\n :param delay: Time delay between checking SRQ in seconds\n \"\"\"\n self.connection.wait_for_srq(timeout * 1000)\n", "path": "pymeasure/adapters/visa.py"}, {"content": "#\n# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2020 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\nimport visa\n\n\ndef list_resources():\n \"\"\"\n Prints the available resources, and returns a list of VISA resource names\n \n .. code-block:: python\n\n resources = list_resources()\n #prints (e.g.)\n #0 : GPIB0::22::INSTR : Agilent Technologies,34410A,******\n #1 : GPIB0::26::INSTR : Keithley Instruments Inc., Model 2612, *****\n dmm = Agilent34410(resources[0])\n \n \"\"\"\n rm = visa.ResourceManager()\n instrs = rm.list_resources()\n for n, instr in enumerate(instrs):\n # trying to catch errors in comunication\n try:\n res = rm.open_resource(instr)\n # try to avoid errors from *idn?\n try:\n # noinspection PyUnresolvedReferences\n idn = res.ask('*idn?')[:-1]\n except visa.Error:\n idn = \"Not known\"\n finally:\n res.close()\n print(n, \":\", instr, \":\", idn)\n except visa.VisaIOError as e:\n print(n, \":\", instr, \":\", \"Visa IO Error: check connections\")\n print(e)\n rm.close()\n return instrs\n", "path": "pymeasure/instruments/resources.py"}], "after_files": [{"content": "#\n# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2020 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\nimport logging\n\nimport copy\nimport pyvisa\nimport numpy as np\nfrom pkg_resources import parse_version\n\nfrom .adapter import Adapter\n\nlog = logging.getLogger(__name__)\nlog.addHandler(logging.NullHandler())\n\n\n# noinspection PyPep8Naming,PyUnresolvedReferences\nclass VISAAdapter(Adapter):\n \"\"\" Adapter class for the VISA library using PyVISA to communicate\n with instruments.\n\n :param resource: VISA resource name that identifies the address\n :param visa_library: VisaLibrary Instance, path of the VISA library or VisaLibrary spec string (@py or @ni).\n if not given, the default for the platform will be used.\n :param kwargs: Any valid key-word arguments for constructing a PyVISA instrument\n \"\"\"\n\n def __init__(self, resourceName, visa_library='', **kwargs):\n if not VISAAdapter.has_supported_version():\n raise NotImplementedError(\"Please upgrade PyVISA to version 1.8 or later.\")\n\n if isinstance(resourceName, int):\n resourceName = \"GPIB0::%d::INSTR\" % resourceName\n super(VISAAdapter, self).__init__()\n self.resource_name = resourceName\n self.manager = pyvisa.ResourceManager(visa_library)\n safeKeywords = ['resource_name', 'timeout',\n 'chunk_size', 'lock', 'query_delay', 'send_end',\n 'values_format', 'read_termination', 'write_termination']\n kwargsCopy = copy.deepcopy(kwargs)\n for key in kwargsCopy:\n if key not in safeKeywords:\n kwargs.pop(key)\n self.connection = self.manager.get_instrument(\n resourceName,\n **kwargs\n )\n\n @staticmethod\n def has_supported_version():\n \"\"\" Returns True if the PyVISA version is greater than 1.8 \"\"\"\n if hasattr(pyvisa, '__version__'):\n return parse_version(pyvisa.__version__) >= parse_version('1.8')\n else:\n return False\n\n def __repr__(self):\n return \"<VISAAdapter(resource='%s')>\" % self.connection.resourceName\n\n def write(self, command):\n \"\"\" Writes a command to the instrument\n\n :param command: SCPI command string to be sent to the instrument\n \"\"\"\n self.connection.write(command)\n\n def read(self):\n \"\"\" Reads until the buffer is empty and returns the resulting\n ASCII response\n\n :returns: String ASCII response of the instrument.\n \"\"\"\n return self.connection.read()\n\n def read_bytes(self, size):\n \"\"\" Reads specified number of bytes from the buffer and returns\n the resulting ASCII response\n\n :param size: Number of bytes to read from the buffer\n :returns: String ASCII response of the instrument.\n \"\"\"\n return self.connection.read_bytes(size)\n\n def ask(self, command):\n \"\"\" Writes the command to the instrument and returns the resulting\n ASCII response\n\n :param command: SCPI command string to be sent to the instrument\n :returns: String ASCII response of the instrument\n \"\"\"\n return self.connection.query(command)\n\n def ask_values(self, command):\n \"\"\" Writes a command to the instrument and returns a list of formatted\n values from the result. The format of the return is configurated by\n self.config().\n\n :param command: SCPI command to be sent to the instrument\n :returns: Formatted response of the instrument.\n \"\"\"\n return self.connection.query_values(command)\n\n def binary_values(self, command, header_bytes=0, dtype=np.float32):\n \"\"\" Returns a numpy array from a query for binary data\n\n :param command: SCPI command to be sent to the instrument\n :param header_bytes: Integer number of bytes to ignore in header\n :param dtype: The NumPy data type to format the values with\n :returns: NumPy array of values\n \"\"\"\n self.connection.write(command)\n binary = self.connection.read_raw()\n header, data = binary[:header_bytes], binary[header_bytes:]\n return np.fromstring(data, dtype=dtype)\n\n def config(self, is_binary=False, datatype='str',\n container=np.array, converter='s',\n separator=',', is_big_endian=False):\n \"\"\" Configurate the format of data transfer to and from the instrument.\n\n :param is_binary: If True, data is in binary format, otherwise ASCII.\n :param datatype: Data type.\n :param container: Return format. Any callable/type that takes an iterable.\n :param converter: String converter, used in dealing with ASCII data.\n :param separator: Delimiter of a series of data in ASCII.\n :param is_big_endian: Endianness.\n \"\"\"\n self.connection.values_format.is_binary = is_binary\n self.connection.values_format.datatype = datatype\n self.connection.values_format.container = container\n self.connection.values_format.converter = converter\n self.connection.values_format.separator = separator\n self.connection.values_format.is_big_endian = is_big_endian\n\n def wait_for_srq(self, timeout=25, delay=0.1):\n \"\"\" Blocks until a SRQ, and leaves the bit high\n\n :param timeout: Timeout duration in seconds\n :param delay: Time delay between checking SRQ in seconds\n \"\"\"\n self.connection.wait_for_srq(timeout * 1000)\n", "path": "pymeasure/adapters/visa.py"}, {"content": "#\n# This file is part of the PyMeasure package.\n#\n# Copyright (c) 2013-2020 PyMeasure Developers\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\nimport pyvisa\n\n\ndef list_resources():\n \"\"\"\n Prints the available resources, and returns a list of VISA resource names\n \n .. code-block:: python\n\n resources = list_resources()\n #prints (e.g.)\n #0 : GPIB0::22::INSTR : Agilent Technologies,34410A,******\n #1 : GPIB0::26::INSTR : Keithley Instruments Inc., Model 2612, *****\n dmm = Agilent34410(resources[0])\n \n \"\"\"\n rm = pyvisa.ResourceManager()\n instrs = rm.list_resources()\n for n, instr in enumerate(instrs):\n # trying to catch errors in comunication\n try:\n res = rm.open_resource(instr)\n # try to avoid errors from *idn?\n try:\n # noinspection PyUnresolvedReferences\n idn = res.ask('*idn?')[:-1]\n except pyvisa.Error:\n idn = \"Not known\"\n finally:\n res.close()\n print(n, \":\", instr, \":\", idn)\n except pyvisa.VisaIOError as e:\n print(n, \":\", instr, \":\", \"Visa IO Error: check connections\")\n print(e)\n rm.close()\n return instrs\n", "path": "pymeasure/instruments/resources.py"}]} | 2,765 | 580 |
gh_patches_debug_426 | rasdani/github-patches | git_diff | nautobot__nautobot-3981 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
2.0: Tag string representation is incorrect
### Environment
* Nautobot version (Docker tag too if applicable): `next`, e13883b7b8a4f44bca0c40d8074dcf8f82e544e6
### Steps to Reproduce
1. Create a Tag and associate it to any model
2. View the legacy-UI list view or detail view of that model
<!-- What did you expect to happen? -->
### Expected Behavior
Tag name to be displayed
<!-- What happened instead? -->
### Observed Behavior
`Tag object (<uuid>)` is displayed.

Appears to be a regression due to #3914.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nautobot/extras/models/tags.py`
Content:
```
1 from django.contrib.contenttypes.models import ContentType
2 from django.db import models
3 from taggit.models import GenericUUIDTaggedItemBase
4
5 from nautobot.core.choices import ColorChoices
6 from nautobot.core.models import BaseManager, BaseModel
7 from nautobot.core.models.fields import ColorField
8 from nautobot.core.models.querysets import RestrictedQuerySet
9 from nautobot.extras.models import ChangeLoggedModel, CustomFieldModel
10 from nautobot.extras.models.mixins import NotesMixin
11 from nautobot.extras.models.relationships import RelationshipModel
12 from nautobot.extras.utils import extras_features, TaggableClassesQuery
13
14
15 #
16 # Tags
17 #
18
19
20 class TagQuerySet(RestrictedQuerySet):
21 """Queryset for `Tags` objects."""
22
23 def get_for_model(self, model):
24 """
25 Return all `Tags` assigned to the given model.
26 """
27 return self.filter(content_types__model=model._meta.model_name, content_types__app_label=model._meta.app_label)
28
29
30 # Tag *should* be a `NameColorContentTypesModel` but that way lies circular import purgatory. Sigh.
31 @extras_features(
32 "custom_validators",
33 )
34 class Tag(BaseModel, ChangeLoggedModel, CustomFieldModel, RelationshipModel, NotesMixin):
35 name = models.CharField(max_length=100, unique=True)
36 content_types = models.ManyToManyField(
37 to=ContentType,
38 related_name="tags",
39 limit_choices_to=TaggableClassesQuery(),
40 )
41 color = ColorField(default=ColorChoices.COLOR_GREY)
42 description = models.CharField(
43 max_length=200,
44 blank=True,
45 )
46
47 objects = BaseManager.from_queryset(TagQuerySet)()
48
49 class Meta:
50 ordering = ["name"]
51
52 def validate_content_types_removal(self, content_types_id):
53 """Validate content_types to be removed are not tagged to a model"""
54 errors = {}
55
56 removed_content_types = self.content_types.exclude(id__in=content_types_id)
57
58 # check if tag is assigned to any of the removed content_types
59 for content_type in removed_content_types:
60 model = content_type.model_class()
61 if model.objects.filter(tags=self).exists():
62 errors.setdefault("content_types", []).append(
63 f"Unable to remove {model._meta.label_lower}. Dependent objects were found."
64 )
65
66 return errors
67
68
69 class TaggedItem(BaseModel, GenericUUIDTaggedItemBase):
70 tag = models.ForeignKey(to=Tag, related_name="%(app_label)s_%(class)s_items", on_delete=models.CASCADE)
71
72 class Meta:
73 index_together = ("content_type", "object_id")
74 unique_together = [["content_type", "object_id", "tag"]]
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nautobot/extras/models/tags.py b/nautobot/extras/models/tags.py
--- a/nautobot/extras/models/tags.py
+++ b/nautobot/extras/models/tags.py
@@ -46,6 +46,9 @@
objects = BaseManager.from_queryset(TagQuerySet)()
+ def __str__(self):
+ return self.name
+
class Meta:
ordering = ["name"]
| {"golden_diff": "diff --git a/nautobot/extras/models/tags.py b/nautobot/extras/models/tags.py\n--- a/nautobot/extras/models/tags.py\n+++ b/nautobot/extras/models/tags.py\n@@ -46,6 +46,9 @@\n \n objects = BaseManager.from_queryset(TagQuerySet)()\n \n+ def __str__(self):\n+ return self.name\n+\n class Meta:\n ordering = [\"name\"]\n", "issue": "2.0: Tag string representation is incorrect\n### Environment\r\n* Nautobot version (Docker tag too if applicable): `next`, e13883b7b8a4f44bca0c40d8074dcf8f82e544e6\r\n\r\n### Steps to Reproduce\r\n1. Create a Tag and associate it to any model\r\n2. View the legacy-UI list view or detail view of that model\r\n\r\n<!-- What did you expect to happen? -->\r\n### Expected Behavior\r\n\r\nTag name to be displayed\r\n\r\n<!-- What happened instead? -->\r\n### Observed Behavior\r\n\r\n`Tag object (<uuid>)` is displayed.\r\n\r\n\r\n\r\nAppears to be a regression due to #3914.\r\n\n", "before_files": [{"content": "from django.contrib.contenttypes.models import ContentType\nfrom django.db import models\nfrom taggit.models import GenericUUIDTaggedItemBase\n\nfrom nautobot.core.choices import ColorChoices\nfrom nautobot.core.models import BaseManager, BaseModel\nfrom nautobot.core.models.fields import ColorField\nfrom nautobot.core.models.querysets import RestrictedQuerySet\nfrom nautobot.extras.models import ChangeLoggedModel, CustomFieldModel\nfrom nautobot.extras.models.mixins import NotesMixin\nfrom nautobot.extras.models.relationships import RelationshipModel\nfrom nautobot.extras.utils import extras_features, TaggableClassesQuery\n\n\n#\n# Tags\n#\n\n\nclass TagQuerySet(RestrictedQuerySet):\n \"\"\"Queryset for `Tags` objects.\"\"\"\n\n def get_for_model(self, model):\n \"\"\"\n Return all `Tags` assigned to the given model.\n \"\"\"\n return self.filter(content_types__model=model._meta.model_name, content_types__app_label=model._meta.app_label)\n\n\n# Tag *should* be a `NameColorContentTypesModel` but that way lies circular import purgatory. Sigh.\n@extras_features(\n \"custom_validators\",\n)\nclass Tag(BaseModel, ChangeLoggedModel, CustomFieldModel, RelationshipModel, NotesMixin):\n name = models.CharField(max_length=100, unique=True)\n content_types = models.ManyToManyField(\n to=ContentType,\n related_name=\"tags\",\n limit_choices_to=TaggableClassesQuery(),\n )\n color = ColorField(default=ColorChoices.COLOR_GREY)\n description = models.CharField(\n max_length=200,\n blank=True,\n )\n\n objects = BaseManager.from_queryset(TagQuerySet)()\n\n class Meta:\n ordering = [\"name\"]\n\n def validate_content_types_removal(self, content_types_id):\n \"\"\"Validate content_types to be removed are not tagged to a model\"\"\"\n errors = {}\n\n removed_content_types = self.content_types.exclude(id__in=content_types_id)\n\n # check if tag is assigned to any of the removed content_types\n for content_type in removed_content_types:\n model = content_type.model_class()\n if model.objects.filter(tags=self).exists():\n errors.setdefault(\"content_types\", []).append(\n f\"Unable to remove {model._meta.label_lower}. Dependent objects were found.\"\n )\n\n return errors\n\n\nclass TaggedItem(BaseModel, GenericUUIDTaggedItemBase):\n tag = models.ForeignKey(to=Tag, related_name=\"%(app_label)s_%(class)s_items\", on_delete=models.CASCADE)\n\n class Meta:\n index_together = (\"content_type\", \"object_id\")\n unique_together = [[\"content_type\", \"object_id\", \"tag\"]]\n", "path": "nautobot/extras/models/tags.py"}], "after_files": [{"content": "from django.contrib.contenttypes.models import ContentType\nfrom django.db import models\nfrom taggit.models import GenericUUIDTaggedItemBase\n\nfrom nautobot.core.choices import ColorChoices\nfrom nautobot.core.models import BaseManager, BaseModel\nfrom nautobot.core.models.fields import ColorField\nfrom nautobot.core.models.querysets import RestrictedQuerySet\nfrom nautobot.extras.models import ChangeLoggedModel, CustomFieldModel\nfrom nautobot.extras.models.mixins import NotesMixin\nfrom nautobot.extras.models.relationships import RelationshipModel\nfrom nautobot.extras.utils import extras_features, TaggableClassesQuery\n\n\n#\n# Tags\n#\n\n\nclass TagQuerySet(RestrictedQuerySet):\n \"\"\"Queryset for `Tags` objects.\"\"\"\n\n def get_for_model(self, model):\n \"\"\"\n Return all `Tags` assigned to the given model.\n \"\"\"\n return self.filter(content_types__model=model._meta.model_name, content_types__app_label=model._meta.app_label)\n\n\n# Tag *should* be a `NameColorContentTypesModel` but that way lies circular import purgatory. Sigh.\n@extras_features(\n \"custom_validators\",\n)\nclass Tag(BaseModel, ChangeLoggedModel, CustomFieldModel, RelationshipModel, NotesMixin):\n name = models.CharField(max_length=100, unique=True)\n content_types = models.ManyToManyField(\n to=ContentType,\n related_name=\"tags\",\n limit_choices_to=TaggableClassesQuery(),\n )\n color = ColorField(default=ColorChoices.COLOR_GREY)\n description = models.CharField(\n max_length=200,\n blank=True,\n )\n\n objects = BaseManager.from_queryset(TagQuerySet)()\n\n def __str__(self):\n return self.name\n\n class Meta:\n ordering = [\"name\"]\n\n def validate_content_types_removal(self, content_types_id):\n \"\"\"Validate content_types to be removed are not tagged to a model\"\"\"\n errors = {}\n\n removed_content_types = self.content_types.exclude(id__in=content_types_id)\n\n # check if tag is assigned to any of the removed content_types\n for content_type in removed_content_types:\n model = content_type.model_class()\n if model.objects.filter(tags=self).exists():\n errors.setdefault(\"content_types\", []).append(\n f\"Unable to remove {model._meta.label_lower}. Dependent objects were found.\"\n )\n\n return errors\n\n\nclass TaggedItem(BaseModel, GenericUUIDTaggedItemBase):\n tag = models.ForeignKey(to=Tag, related_name=\"%(app_label)s_%(class)s_items\", on_delete=models.CASCADE)\n\n class Meta:\n index_together = (\"content_type\", \"object_id\")\n unique_together = [[\"content_type\", \"object_id\", \"tag\"]]\n", "path": "nautobot/extras/models/tags.py"}]} | 1,195 | 95 |
gh_patches_debug_30621 | rasdani/github-patches | git_diff | pyodide__pyodide-3483 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
On first call, `pyodide config get emscripten_version` returns `Downloading xbuild environment Installing xbuild environment 3.1.27` instead of `3.1.27`
## 🐛 Bug
In [the docs for out-of-tree builds](https://pyodide.org/en/stable/development/building-and-testing-packages.html#building-and-testing-packages-out-of-tree) it gives this code snippet:
```bash
pip install pyodide-build
git clone https://github.com/emscripten-core/emsdk.git
cd emsdk
PYODIDE_EMSCRIPTEN_VERSION=$(pyodide config get emscripten_version)
./emsdk install ${PYODIDE_EMSCRIPTEN_VERSION}
./emsdk activate ${PYODIDE_EMSCRIPTEN_VERSION}
source emsdk_env.sh
```
But this doesn't work because on the first call, `pyodide config get emscripten_version` outputs this:
```
Downloading xbuild environment
Installing xbuild environment
3.1.27
```
On subsequent calls it returns `3.1.27`.
### To Reproduce
See above.
### Expected behavior
Calls to `pyodide config get emscripten_version` should only ever output the version string such that this command can be reliably used in build automation.
### Environment
- Pyodide Version<!-- (e.g. 1.8.1) -->: Pyodide CLI Version: 0.2.2
### Additional context
As a workaround for build scripts, `pyodide config get emscripten_version` can be called once before actually using it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyodide-build/pyodide_build/cli/config.py`
Content:
```
1 import typer
2
3 from ..common import get_make_environment_vars
4 from ..out_of_tree.utils import initialize_pyodide_root
5
6 app = typer.Typer(help="Manage config variables used in pyodide")
7
8
9 # A dictionary of config variables {key: env_var_in_makefile}
10 PYODIDE_CONFIGS = {
11 "emscripten_version": "PYODIDE_EMSCRIPTEN_VERSION",
12 "python_version": "PYVERSION",
13 }
14
15
16 @app.callback(no_args_is_help=True) # type: ignore[misc]
17 def callback() -> None:
18 return
19
20
21 def _get_configs() -> dict[str, str]:
22 initialize_pyodide_root()
23
24 configs: dict[str, str] = get_make_environment_vars()
25
26 configs_filtered = {k: configs[v] for k, v in PYODIDE_CONFIGS.items()}
27 return configs_filtered
28
29
30 @app.command("list")
31 def list_config():
32 """
33 List config variables used in pyodide
34 """
35 configs = _get_configs()
36
37 for k, v in configs.items():
38 typer.echo(f"{k}={v}")
39
40
41 @app.command("get") # type: ignore[misc]
42 def get_config(
43 config_var: str = typer.Argument(
44 ..., help="A config variable to get. Use `list` to see all possible values."
45 ),
46 ) -> None:
47 """
48 Get a value of a single config variable used in pyodide
49 """
50 configs = _get_configs()
51
52 if config_var not in configs:
53 typer.echo(f"Config variable {config_var} not found.")
54 typer.Exit(1)
55
56 typer.echo(configs[config_var])
57
```
Path: `pyodide-build/pyodide_build/out_of_tree/utils.py`
Content:
```
1 import os
2 from pathlib import Path
3
4 from ..common import search_pyodide_root
5
6
7 def ensure_env_installed(env: Path) -> None:
8 if env.exists():
9 return
10 from .. import __version__
11 from ..install_xbuildenv import download_xbuildenv, install_xbuildenv
12
13 if "dev" in __version__:
14 raise RuntimeError(
15 "To use out of tree builds with development Pyodide, you must explicitly set PYODIDE_ROOT"
16 )
17
18 download_xbuildenv(__version__, env)
19 install_xbuildenv(__version__, env)
20
21
22 def initialize_pyodide_root() -> None:
23 if "PYODIDE_ROOT" in os.environ:
24 return
25 try:
26 os.environ["PYODIDE_ROOT"] = str(search_pyodide_root(__file__))
27 return
28 except FileNotFoundError:
29 pass
30 env = Path(".pyodide-xbuildenv")
31 os.environ["PYODIDE_ROOT"] = str(env / "xbuildenv/pyodide-root")
32 ensure_env_installed(env)
33
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pyodide-build/pyodide_build/cli/config.py b/pyodide-build/pyodide_build/cli/config.py
--- a/pyodide-build/pyodide_build/cli/config.py
+++ b/pyodide-build/pyodide_build/cli/config.py
@@ -19,7 +19,7 @@
def _get_configs() -> dict[str, str]:
- initialize_pyodide_root()
+ initialize_pyodide_root(quiet=True)
configs: dict[str, str] = get_make_environment_vars()
diff --git a/pyodide-build/pyodide_build/out_of_tree/utils.py b/pyodide-build/pyodide_build/out_of_tree/utils.py
--- a/pyodide-build/pyodide_build/out_of_tree/utils.py
+++ b/pyodide-build/pyodide_build/out_of_tree/utils.py
@@ -1,10 +1,12 @@
import os
+from contextlib import ExitStack, redirect_stdout
+from io import StringIO
from pathlib import Path
from ..common import search_pyodide_root
-def ensure_env_installed(env: Path) -> None:
+def ensure_env_installed(env: Path, *, quiet: bool = False) -> None:
if env.exists():
return
from .. import __version__
@@ -15,11 +17,16 @@
"To use out of tree builds with development Pyodide, you must explicitly set PYODIDE_ROOT"
)
- download_xbuildenv(__version__, env)
- install_xbuildenv(__version__, env)
+ with ExitStack() as stack:
+ if quiet:
+ # Prevent writes to stdout
+ stack.enter_context(redirect_stdout(StringIO()))
+ download_xbuildenv(__version__, env)
+ install_xbuildenv(__version__, env)
-def initialize_pyodide_root() -> None:
+
+def initialize_pyodide_root(*, quiet: bool = False) -> None:
if "PYODIDE_ROOT" in os.environ:
return
try:
@@ -29,4 +36,4 @@
pass
env = Path(".pyodide-xbuildenv")
os.environ["PYODIDE_ROOT"] = str(env / "xbuildenv/pyodide-root")
- ensure_env_installed(env)
+ ensure_env_installed(env, quiet=quiet)
| {"golden_diff": "diff --git a/pyodide-build/pyodide_build/cli/config.py b/pyodide-build/pyodide_build/cli/config.py\n--- a/pyodide-build/pyodide_build/cli/config.py\n+++ b/pyodide-build/pyodide_build/cli/config.py\n@@ -19,7 +19,7 @@\n \n \n def _get_configs() -> dict[str, str]:\n- initialize_pyodide_root()\n+ initialize_pyodide_root(quiet=True)\n \n configs: dict[str, str] = get_make_environment_vars()\n \ndiff --git a/pyodide-build/pyodide_build/out_of_tree/utils.py b/pyodide-build/pyodide_build/out_of_tree/utils.py\n--- a/pyodide-build/pyodide_build/out_of_tree/utils.py\n+++ b/pyodide-build/pyodide_build/out_of_tree/utils.py\n@@ -1,10 +1,12 @@\n import os\n+from contextlib import ExitStack, redirect_stdout\n+from io import StringIO\n from pathlib import Path\n \n from ..common import search_pyodide_root\n \n \n-def ensure_env_installed(env: Path) -> None:\n+def ensure_env_installed(env: Path, *, quiet: bool = False) -> None:\n if env.exists():\n return\n from .. import __version__\n@@ -15,11 +17,16 @@\n \"To use out of tree builds with development Pyodide, you must explicitly set PYODIDE_ROOT\"\n )\n \n- download_xbuildenv(__version__, env)\n- install_xbuildenv(__version__, env)\n+ with ExitStack() as stack:\n+ if quiet:\n+ # Prevent writes to stdout\n+ stack.enter_context(redirect_stdout(StringIO()))\n \n+ download_xbuildenv(__version__, env)\n+ install_xbuildenv(__version__, env)\n \n-def initialize_pyodide_root() -> None:\n+\n+def initialize_pyodide_root(*, quiet: bool = False) -> None:\n if \"PYODIDE_ROOT\" in os.environ:\n return\n try:\n@@ -29,4 +36,4 @@\n pass\n env = Path(\".pyodide-xbuildenv\")\n os.environ[\"PYODIDE_ROOT\"] = str(env / \"xbuildenv/pyodide-root\")\n- ensure_env_installed(env)\n+ ensure_env_installed(env, quiet=quiet)\n", "issue": "On first call, `pyodide config get emscripten_version` returns `Downloading xbuild environment Installing xbuild environment 3.1.27` instead of `3.1.27`\n## \ud83d\udc1b Bug\r\n\r\nIn [the docs for out-of-tree builds](https://pyodide.org/en/stable/development/building-and-testing-packages.html#building-and-testing-packages-out-of-tree) it gives this code snippet:\r\n\r\n```bash\r\npip install pyodide-build\r\n\r\ngit clone https://github.com/emscripten-core/emsdk.git\r\ncd emsdk\r\n\r\nPYODIDE_EMSCRIPTEN_VERSION=$(pyodide config get emscripten_version)\r\n./emsdk install ${PYODIDE_EMSCRIPTEN_VERSION}\r\n./emsdk activate ${PYODIDE_EMSCRIPTEN_VERSION}\r\nsource emsdk_env.sh\r\n```\r\nBut this doesn't work because on the first call, `pyodide config get emscripten_version` outputs this:\r\n```\r\nDownloading xbuild environment\r\nInstalling xbuild environment\r\n3.1.27\r\n```\r\nOn subsequent calls it returns `3.1.27`.\r\n\r\n### To Reproduce\r\n\r\nSee above.\r\n\r\n### Expected behavior\r\n\r\nCalls to `pyodide config get emscripten_version` should only ever output the version string such that this command can be reliably used in build automation.\r\n\r\n### Environment\r\n\r\n- Pyodide Version<!-- (e.g. 1.8.1) -->: Pyodide CLI Version: 0.2.2\r\n\r\n### Additional context\r\n\r\nAs a workaround for build scripts, `pyodide config get emscripten_version` can be called once before actually using it.\r\n\n", "before_files": [{"content": "import typer\n\nfrom ..common import get_make_environment_vars\nfrom ..out_of_tree.utils import initialize_pyodide_root\n\napp = typer.Typer(help=\"Manage config variables used in pyodide\")\n\n\n# A dictionary of config variables {key: env_var_in_makefile}\nPYODIDE_CONFIGS = {\n \"emscripten_version\": \"PYODIDE_EMSCRIPTEN_VERSION\",\n \"python_version\": \"PYVERSION\",\n}\n\n\[email protected](no_args_is_help=True) # type: ignore[misc]\ndef callback() -> None:\n return\n\n\ndef _get_configs() -> dict[str, str]:\n initialize_pyodide_root()\n\n configs: dict[str, str] = get_make_environment_vars()\n\n configs_filtered = {k: configs[v] for k, v in PYODIDE_CONFIGS.items()}\n return configs_filtered\n\n\[email protected](\"list\")\ndef list_config():\n \"\"\"\n List config variables used in pyodide\n \"\"\"\n configs = _get_configs()\n\n for k, v in configs.items():\n typer.echo(f\"{k}={v}\")\n\n\[email protected](\"get\") # type: ignore[misc]\ndef get_config(\n config_var: str = typer.Argument(\n ..., help=\"A config variable to get. Use `list` to see all possible values.\"\n ),\n) -> None:\n \"\"\"\n Get a value of a single config variable used in pyodide\n \"\"\"\n configs = _get_configs()\n\n if config_var not in configs:\n typer.echo(f\"Config variable {config_var} not found.\")\n typer.Exit(1)\n\n typer.echo(configs[config_var])\n", "path": "pyodide-build/pyodide_build/cli/config.py"}, {"content": "import os\nfrom pathlib import Path\n\nfrom ..common import search_pyodide_root\n\n\ndef ensure_env_installed(env: Path) -> None:\n if env.exists():\n return\n from .. import __version__\n from ..install_xbuildenv import download_xbuildenv, install_xbuildenv\n\n if \"dev\" in __version__:\n raise RuntimeError(\n \"To use out of tree builds with development Pyodide, you must explicitly set PYODIDE_ROOT\"\n )\n\n download_xbuildenv(__version__, env)\n install_xbuildenv(__version__, env)\n\n\ndef initialize_pyodide_root() -> None:\n if \"PYODIDE_ROOT\" in os.environ:\n return\n try:\n os.environ[\"PYODIDE_ROOT\"] = str(search_pyodide_root(__file__))\n return\n except FileNotFoundError:\n pass\n env = Path(\".pyodide-xbuildenv\")\n os.environ[\"PYODIDE_ROOT\"] = str(env / \"xbuildenv/pyodide-root\")\n ensure_env_installed(env)\n", "path": "pyodide-build/pyodide_build/out_of_tree/utils.py"}], "after_files": [{"content": "import typer\n\nfrom ..common import get_make_environment_vars\nfrom ..out_of_tree.utils import initialize_pyodide_root\n\napp = typer.Typer(help=\"Manage config variables used in pyodide\")\n\n\n# A dictionary of config variables {key: env_var_in_makefile}\nPYODIDE_CONFIGS = {\n \"emscripten_version\": \"PYODIDE_EMSCRIPTEN_VERSION\",\n \"python_version\": \"PYVERSION\",\n}\n\n\[email protected](no_args_is_help=True) # type: ignore[misc]\ndef callback() -> None:\n return\n\n\ndef _get_configs() -> dict[str, str]:\n initialize_pyodide_root(quiet=True)\n\n configs: dict[str, str] = get_make_environment_vars()\n\n configs_filtered = {k: configs[v] for k, v in PYODIDE_CONFIGS.items()}\n return configs_filtered\n\n\[email protected](\"list\")\ndef list_config():\n \"\"\"\n List config variables used in pyodide\n \"\"\"\n configs = _get_configs()\n\n for k, v in configs.items():\n typer.echo(f\"{k}={v}\")\n\n\[email protected](\"get\") # type: ignore[misc]\ndef get_config(\n config_var: str = typer.Argument(\n ..., help=\"A config variable to get. Use `list` to see all possible values.\"\n ),\n) -> None:\n \"\"\"\n Get a value of a single config variable used in pyodide\n \"\"\"\n configs = _get_configs()\n\n if config_var not in configs:\n typer.echo(f\"Config variable {config_var} not found.\")\n typer.Exit(1)\n\n typer.echo(configs[config_var])\n", "path": "pyodide-build/pyodide_build/cli/config.py"}, {"content": "import os\nfrom contextlib import ExitStack, redirect_stdout\nfrom io import StringIO\nfrom pathlib import Path\n\nfrom ..common import search_pyodide_root\n\n\ndef ensure_env_installed(env: Path, *, quiet: bool = False) -> None:\n if env.exists():\n return\n from .. import __version__\n from ..install_xbuildenv import download_xbuildenv, install_xbuildenv\n\n if \"dev\" in __version__:\n raise RuntimeError(\n \"To use out of tree builds with development Pyodide, you must explicitly set PYODIDE_ROOT\"\n )\n\n with ExitStack() as stack:\n if quiet:\n # Prevent writes to stdout\n stack.enter_context(redirect_stdout(StringIO()))\n\n download_xbuildenv(__version__, env)\n install_xbuildenv(__version__, env)\n\n\ndef initialize_pyodide_root(*, quiet: bool = False) -> None:\n if \"PYODIDE_ROOT\" in os.environ:\n return\n try:\n os.environ[\"PYODIDE_ROOT\"] = str(search_pyodide_root(__file__))\n return\n except FileNotFoundError:\n pass\n env = Path(\".pyodide-xbuildenv\")\n os.environ[\"PYODIDE_ROOT\"] = str(env / \"xbuildenv/pyodide-root\")\n ensure_env_installed(env, quiet=quiet)\n", "path": "pyodide-build/pyodide_build/out_of_tree/utils.py"}]} | 1,384 | 505 |
gh_patches_debug_8872 | rasdani/github-patches | git_diff | holoviz__panel-889 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
'str' object has no attribute 'items' when visualising Vega plot of network using Panel
When running `pn.pane.Vega(spec)` on a network spec (from the vega example showing a force-directed network of the Miserables characters), I get an error in line 21 of the `pane.py` source file. The error:
```
~/anaconda3/envs/i0u19a_jupyter/lib/python3.7/site-packages/panel/pane/vega.py in ds_as_cds(dataset)
19 if len(dataset) == 0:
20 return {}
---> 21 data = {k: [] for k, v in dataset[0].items()}
22 for item in dataset:
23 for k, v in item.items():
AttributeError: 'str' object has no attribute 'items'
```
See the image for a screenshot of the complete error message. The command used was `pn.pane.Vega(spec)`, the error what is shown above, and the output was `Vega(dict)`.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `panel/pane/vega.py`
Content:
```
1 from __future__ import absolute_import, division, unicode_literals
2
3 import sys
4
5 import param
6 import numpy as np
7
8 from bokeh.models import ColumnDataSource
9 from pyviz_comms import JupyterComm
10
11 from ..viewable import Layoutable
12 from .base import PaneBase
13
14
15 def ds_as_cds(dataset):
16 """
17 Converts Vega dataset into Bokeh ColumnDataSource data
18 """
19 if len(dataset) == 0:
20 return {}
21 data = {k: [] for k, v in dataset[0].items()}
22 for item in dataset:
23 for k, v in item.items():
24 data[k].append(v)
25 data = {k: np.asarray(v) for k, v in data.items()}
26 return data
27
28
29 class Vega(PaneBase):
30 """
31 Vega panes allow rendering Vega plots and traces.
32
33 For efficiency any array objects found inside a Figure are added
34 to a ColumnDataSource which allows using binary transport to sync
35 the figure on bokeh server and via Comms.
36 """
37
38 margin = param.Parameter(default=(5, 5, 30, 5), doc="""
39 Allows to create additional space around the component. May
40 be specified as a two-tuple of the form (vertical, horizontal)
41 or a four-tuple (top, right, bottom, left).""")
42
43 priority = 0.8
44
45 _updates = True
46
47 @classmethod
48 def is_altair(cls, obj):
49 if 'altair' in sys.modules:
50 import altair as alt
51 return isinstance(obj, alt.api.TopLevelMixin)
52 return False
53
54 @classmethod
55 def applies(cls, obj):
56 if isinstance(obj, dict) and 'vega' in obj.get('$schema', '').lower():
57 return True
58 return cls.is_altair(obj)
59
60 @classmethod
61 def _to_json(cls, obj):
62 if isinstance(obj, dict):
63 json = dict(obj)
64 if 'data' in json:
65 data = json['data']
66 if isinstance(data, dict):
67 json['data'] = dict(data)
68 elif isinstance(data, list):
69 json['data'] = [dict(d) for d in data]
70 return json
71 return obj.to_dict()
72
73 def _get_sources(self, json, sources):
74 datasets = json.get('datasets', {})
75 for name in list(datasets):
76 if name in sources or isinstance(datasets[name], dict):
77 continue
78 data = datasets.pop(name)
79 columns = set(data[0]) if data else []
80 if self.is_altair(self.object):
81 import altair as alt
82 if (not isinstance(self.object.data, (alt.Data, alt.UrlData)) and
83 columns == set(self.object.data)):
84 data = ColumnDataSource.from_df(self.object.data)
85 else:
86 data = ds_as_cds(data)
87 sources[name] = ColumnDataSource(data=data)
88 else:
89 sources[name] = ColumnDataSource(data=ds_as_cds(data))
90 data = json.get('data', {})
91 if isinstance(data, dict):
92 data = data.pop('values', {})
93 if data:
94 sources['data'] = ColumnDataSource(data=ds_as_cds(data))
95 elif isinstance(data, list):
96 for d in data:
97 sources[d['name']] = ColumnDataSource(data=ds_as_cds(d['values']))
98
99
100
101 @classmethod
102 def _get_dimensions(cls, json, props):
103 if json is None:
104 return
105
106 view = {}
107 if 'width' in json:
108 view['width'] = json['width']
109 if 'height' in json:
110 view['height'] = json['height']
111 if 'config' in json and 'view' in json['config']:
112 view = json['config']['view']
113 for p in ('width', 'height'):
114 if p not in view:
115 continue
116 if props.get(p) is None or p in view and props.get(p) < view[p]:
117 v = view[p]
118 props[p] = v+22 if isinstance(v, int) else v
119
120 def _get_model(self, doc, root=None, parent=None, comm=None):
121 if 'panel.models.vega' not in sys.modules:
122 if isinstance(comm, JupyterComm):
123 self.param.warning('VegaPlot was not imported on instantiation '
124 'and may not render in a notebook. Restart '
125 'the notebook kernel and ensure you load '
126 'it as part of the extension using:'
127 '\n\npn.extension(\'vega\')\n')
128 from ..models.vega import VegaPlot
129 else:
130 VegaPlot = getattr(sys.modules['panel.models.vega'], 'VegaPlot')
131
132 sources = {}
133 if self.object is None:
134 json = None
135 else:
136 json = self._to_json(self.object)
137 self._get_sources(json, sources)
138 props = self._process_param_change(self._init_properties())
139 self._get_dimensions(json, props)
140 model = VegaPlot(data=json, data_sources=sources, **props)
141 if root is None:
142 root = model
143 self._models[root.ref['id']] = (model, parent)
144 return model
145
146 def _update(self, model):
147 if self.object is None:
148 json = None
149 else:
150 json = self._to_json(self.object)
151 self._get_sources(json, model.data_sources)
152 props = {p : getattr(self, p) for p in list(Layoutable.param)
153 if getattr(self, p) is not None}
154 self._get_dimensions(json, props)
155 props['data'] = json
156 model.update(**props)
157
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/panel/pane/vega.py b/panel/pane/vega.py
--- a/panel/pane/vega.py
+++ b/panel/pane/vega.py
@@ -94,10 +94,10 @@
sources['data'] = ColumnDataSource(data=ds_as_cds(data))
elif isinstance(data, list):
for d in data:
- sources[d['name']] = ColumnDataSource(data=ds_as_cds(d['values']))
+ if 'values' in d:
+ sources[d['name']] = ColumnDataSource(data=ds_as_cds(d.pop('values')))
-
@classmethod
def _get_dimensions(cls, json, props):
if json is None:
| {"golden_diff": "diff --git a/panel/pane/vega.py b/panel/pane/vega.py\n--- a/panel/pane/vega.py\n+++ b/panel/pane/vega.py\n@@ -94,10 +94,10 @@\n sources['data'] = ColumnDataSource(data=ds_as_cds(data))\n elif isinstance(data, list):\n for d in data:\n- sources[d['name']] = ColumnDataSource(data=ds_as_cds(d['values']))\n+ if 'values' in d:\n+ sources[d['name']] = ColumnDataSource(data=ds_as_cds(d.pop('values')))\n \n \n-\n @classmethod\n def _get_dimensions(cls, json, props):\n if json is None:\n", "issue": "'str' object has no attribute 'items' when visualising Vega plot of network using Panel\nWhen running `pn.pane.Vega(spec)` on a network spec (from the vega example showing a force-directed network of the Miserables characters), I get an error in line 21 of the `pane.py` source file. The error:\r\n\r\n```\r\n~/anaconda3/envs/i0u19a_jupyter/lib/python3.7/site-packages/panel/pane/vega.py in ds_as_cds(dataset)\r\n 19 if len(dataset) == 0:\r\n 20 return {}\r\n---> 21 data = {k: [] for k, v in dataset[0].items()}\r\n 22 for item in dataset:\r\n 23 for k, v in item.items():\r\n\r\nAttributeError: 'str' object has no attribute 'items'\r\n```\r\n\r\nSee the image for a screenshot of the complete error message. The command used was `pn.pane.Vega(spec)`, the error what is shown above, and the output was `Vega(dict)`.\r\n\r\n\n", "before_files": [{"content": "from __future__ import absolute_import, division, unicode_literals\n\nimport sys\n\nimport param\nimport numpy as np\n\nfrom bokeh.models import ColumnDataSource\nfrom pyviz_comms import JupyterComm\n\nfrom ..viewable import Layoutable\nfrom .base import PaneBase\n\n\ndef ds_as_cds(dataset):\n \"\"\"\n Converts Vega dataset into Bokeh ColumnDataSource data\n \"\"\"\n if len(dataset) == 0:\n return {}\n data = {k: [] for k, v in dataset[0].items()}\n for item in dataset:\n for k, v in item.items():\n data[k].append(v)\n data = {k: np.asarray(v) for k, v in data.items()}\n return data\n\n\nclass Vega(PaneBase):\n \"\"\"\n Vega panes allow rendering Vega plots and traces.\n\n For efficiency any array objects found inside a Figure are added\n to a ColumnDataSource which allows using binary transport to sync\n the figure on bokeh server and via Comms.\n \"\"\"\n\n margin = param.Parameter(default=(5, 5, 30, 5), doc=\"\"\"\n Allows to create additional space around the component. May\n be specified as a two-tuple of the form (vertical, horizontal)\n or a four-tuple (top, right, bottom, left).\"\"\")\n\n priority = 0.8\n\n _updates = True\n\n @classmethod\n def is_altair(cls, obj):\n if 'altair' in sys.modules:\n import altair as alt\n return isinstance(obj, alt.api.TopLevelMixin)\n return False\n\n @classmethod\n def applies(cls, obj):\n if isinstance(obj, dict) and 'vega' in obj.get('$schema', '').lower():\n return True\n return cls.is_altair(obj)\n\n @classmethod\n def _to_json(cls, obj):\n if isinstance(obj, dict):\n json = dict(obj)\n if 'data' in json:\n data = json['data']\n if isinstance(data, dict):\n json['data'] = dict(data)\n elif isinstance(data, list):\n json['data'] = [dict(d) for d in data]\n return json\n return obj.to_dict()\n\n def _get_sources(self, json, sources):\n datasets = json.get('datasets', {})\n for name in list(datasets):\n if name in sources or isinstance(datasets[name], dict):\n continue\n data = datasets.pop(name)\n columns = set(data[0]) if data else []\n if self.is_altair(self.object):\n import altair as alt\n if (not isinstance(self.object.data, (alt.Data, alt.UrlData)) and\n columns == set(self.object.data)):\n data = ColumnDataSource.from_df(self.object.data)\n else:\n data = ds_as_cds(data)\n sources[name] = ColumnDataSource(data=data)\n else:\n sources[name] = ColumnDataSource(data=ds_as_cds(data))\n data = json.get('data', {})\n if isinstance(data, dict):\n data = data.pop('values', {})\n if data:\n sources['data'] = ColumnDataSource(data=ds_as_cds(data))\n elif isinstance(data, list):\n for d in data:\n sources[d['name']] = ColumnDataSource(data=ds_as_cds(d['values']))\n \n\n\n @classmethod\n def _get_dimensions(cls, json, props):\n if json is None:\n return\n\n view = {}\n if 'width' in json:\n view['width'] = json['width']\n if 'height' in json:\n view['height'] = json['height']\n if 'config' in json and 'view' in json['config']:\n view = json['config']['view']\n for p in ('width', 'height'):\n if p not in view:\n continue\n if props.get(p) is None or p in view and props.get(p) < view[p]:\n v = view[p]\n props[p] = v+22 if isinstance(v, int) else v\n\n def _get_model(self, doc, root=None, parent=None, comm=None):\n if 'panel.models.vega' not in sys.modules:\n if isinstance(comm, JupyterComm):\n self.param.warning('VegaPlot was not imported on instantiation '\n 'and may not render in a notebook. Restart '\n 'the notebook kernel and ensure you load '\n 'it as part of the extension using:'\n '\\n\\npn.extension(\\'vega\\')\\n')\n from ..models.vega import VegaPlot\n else:\n VegaPlot = getattr(sys.modules['panel.models.vega'], 'VegaPlot')\n\n sources = {}\n if self.object is None:\n json = None\n else:\n json = self._to_json(self.object)\n self._get_sources(json, sources)\n props = self._process_param_change(self._init_properties())\n self._get_dimensions(json, props)\n model = VegaPlot(data=json, data_sources=sources, **props)\n if root is None:\n root = model\n self._models[root.ref['id']] = (model, parent)\n return model\n\n def _update(self, model):\n if self.object is None:\n json = None\n else:\n json = self._to_json(self.object)\n self._get_sources(json, model.data_sources)\n props = {p : getattr(self, p) for p in list(Layoutable.param)\n if getattr(self, p) is not None}\n self._get_dimensions(json, props)\n props['data'] = json\n model.update(**props)\n", "path": "panel/pane/vega.py"}], "after_files": [{"content": "from __future__ import absolute_import, division, unicode_literals\n\nimport sys\n\nimport param\nimport numpy as np\n\nfrom bokeh.models import ColumnDataSource\nfrom pyviz_comms import JupyterComm\n\nfrom ..viewable import Layoutable\nfrom .base import PaneBase\n\n\ndef ds_as_cds(dataset):\n \"\"\"\n Converts Vega dataset into Bokeh ColumnDataSource data\n \"\"\"\n if len(dataset) == 0:\n return {}\n data = {k: [] for k, v in dataset[0].items()}\n for item in dataset:\n for k, v in item.items():\n data[k].append(v)\n data = {k: np.asarray(v) for k, v in data.items()}\n return data\n\n\nclass Vega(PaneBase):\n \"\"\"\n Vega panes allow rendering Vega plots and traces.\n\n For efficiency any array objects found inside a Figure are added\n to a ColumnDataSource which allows using binary transport to sync\n the figure on bokeh server and via Comms.\n \"\"\"\n\n margin = param.Parameter(default=(5, 5, 30, 5), doc=\"\"\"\n Allows to create additional space around the component. May\n be specified as a two-tuple of the form (vertical, horizontal)\n or a four-tuple (top, right, bottom, left).\"\"\")\n\n priority = 0.8\n\n _updates = True\n\n @classmethod\n def is_altair(cls, obj):\n if 'altair' in sys.modules:\n import altair as alt\n return isinstance(obj, alt.api.TopLevelMixin)\n return False\n\n @classmethod\n def applies(cls, obj):\n if isinstance(obj, dict) and 'vega' in obj.get('$schema', '').lower():\n return True\n return cls.is_altair(obj)\n\n @classmethod\n def _to_json(cls, obj):\n if isinstance(obj, dict):\n json = dict(obj)\n if 'data' in json:\n data = json['data']\n if isinstance(data, dict):\n json['data'] = dict(data)\n elif isinstance(data, list):\n json['data'] = [dict(d) for d in data]\n return json\n return obj.to_dict()\n\n def _get_sources(self, json, sources):\n datasets = json.get('datasets', {})\n for name in list(datasets):\n if name in sources or isinstance(datasets[name], dict):\n continue\n data = datasets.pop(name)\n columns = set(data[0]) if data else []\n if self.is_altair(self.object):\n import altair as alt\n if (not isinstance(self.object.data, (alt.Data, alt.UrlData)) and\n columns == set(self.object.data)):\n data = ColumnDataSource.from_df(self.object.data)\n else:\n data = ds_as_cds(data)\n sources[name] = ColumnDataSource(data=data)\n else:\n sources[name] = ColumnDataSource(data=ds_as_cds(data))\n data = json.get('data', {})\n if isinstance(data, dict):\n data = data.pop('values', {})\n if data:\n sources['data'] = ColumnDataSource(data=ds_as_cds(data))\n elif isinstance(data, list):\n for d in data:\n if 'values' in d:\n sources[d['name']] = ColumnDataSource(data=ds_as_cds(d.pop('values')))\n \n\n @classmethod\n def _get_dimensions(cls, json, props):\n if json is None:\n return\n\n view = {}\n if 'width' in json:\n view['width'] = json['width']\n if 'height' in json:\n view['height'] = json['height']\n if 'config' in json and 'view' in json['config']:\n view = json['config']['view']\n for p in ('width', 'height'):\n if p not in view:\n continue\n if props.get(p) is None or p in view and props.get(p) < view[p]:\n v = view[p]\n props[p] = v+22 if isinstance(v, int) else v\n\n def _get_model(self, doc, root=None, parent=None, comm=None):\n if 'panel.models.vega' not in sys.modules:\n if isinstance(comm, JupyterComm):\n self.param.warning('VegaPlot was not imported on instantiation '\n 'and may not render in a notebook. Restart '\n 'the notebook kernel and ensure you load '\n 'it as part of the extension using:'\n '\\n\\npn.extension(\\'vega\\')\\n')\n from ..models.vega import VegaPlot\n else:\n VegaPlot = getattr(sys.modules['panel.models.vega'], 'VegaPlot')\n\n sources = {}\n if self.object is None:\n json = None\n else:\n json = self._to_json(self.object)\n self._get_sources(json, sources)\n props = self._process_param_change(self._init_properties())\n self._get_dimensions(json, props)\n model = VegaPlot(data=json, data_sources=sources, **props)\n if root is None:\n root = model\n self._models[root.ref['id']] = (model, parent)\n return model\n\n def _update(self, model):\n if self.object is None:\n json = None\n else:\n json = self._to_json(self.object)\n self._get_sources(json, model.data_sources)\n props = {p : getattr(self, p) for p in list(Layoutable.param)\n if getattr(self, p) is not None}\n self._get_dimensions(json, props)\n props['data'] = json\n model.update(**props)\n", "path": "panel/pane/vega.py"}]} | 2,160 | 161 |
gh_patches_debug_11973 | rasdani/github-patches | git_diff | bids-standard__pybids-833 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pandas method FutureWarning: frame.append
With:
- Python 3.9.7
- pybids 0.15.0
- pandas 0.14.0
I'm getting the following `FutureWarning` when running `bids = BIDSLayout(self.dataset.path, derivatives=True)`:
```
...bids/variables/entities.py:245: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.
```
pointing to [this line](https://github.com/bids-standard/pybids/blob/e92d4e8d058fc0e1797c8882dd98024068a303bf/bids/variables/entities.py#L245).
Should I submit a PR to replace that, or are there reasons why this should not be done?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bids/variables/entities.py`
Content:
```
1 """ Data classes for internal BIDS data hierarchy. """
2
3 from itertools import chain
4 from collections import namedtuple
5
6 import pandas as pd
7
8 from . import collections as clc
9 from bids.utils import matches_entities
10
11
12 class Node(object):
13 """Base class for objects that represent a single object in the BIDS
14 hierarchy.
15
16 Parameters
17 ----------
18 id : int or str
19 A value uniquely identifying this node. Typically the
20 entity value extracted from the filename via layout.
21 """
22
23 def __init__(self, level, entities):
24 self.level = level.lower()
25 self.entities = entities
26 self.variables = {}
27
28 def add_variable(self, variable):
29 """Adds a BIDSVariable to the current Node's list.
30
31 Parameters
32 ----------
33 variable : BIDSVariable
34 The Variable to add to the list.
35 """
36 self.variables[variable.name] = variable
37
38
39 class RunNode(Node):
40 """Represents a single Run in a BIDS project.
41
42 Parameters
43 ----------
44 id : int
45 The index of the run.
46 entities : dict
47 Dictionary of entities for this Node.
48 image_file : str
49 The full path to the corresponding nifti image.
50 duration : float
51 Duration of the run, in seconds.
52 repetition_time : float
53 TR for the run.
54 task : str
55 The task name for this run.
56 """
57
58 def __init__(self, entities, image_file, duration, repetition_time, n_vols):
59 self.image_file = image_file
60 self.duration = duration
61 self.repetition_time = repetition_time
62 self.n_vols = n_vols
63 super(RunNode, self).__init__('run', entities)
64
65 def get_info(self):
66 # Note: do not remove the dict() call! self.entities is a SQLAlchemy
67 # association_proxy mapping, and without the conversion, the connection
68 # to the DB persists, causing problems on Python 3.5 if we try to clone
69 # a RunInfo or any containing object.
70 entities = dict(self.entities)
71 return RunInfo(entities, self.duration,
72 self.repetition_time, self.image_file, self.n_vols)
73
74
75 # Stores key information for each Run.
76 RunInfo_ = namedtuple('RunInfo', ['entities', 'duration', 'tr', 'image', 'n_vols'])
77
78
79 # Wrap with class to provide docstring
80 class RunInfo(RunInfo_):
81 """ A namedtuple storing run-related information.
82
83 Properties include 'entities', 'duration', 'tr', and 'image', 'n_vols'.
84 """
85 pass
86
87
88 class NodeIndex(object):
89 """Represents the top level in a BIDS hierarchy. """
90
91 def __init__(self):
92 super(NodeIndex, self).__init__()
93 self.index = pd.DataFrame()
94 self.nodes = []
95
96 def get_collections(self, unit, names=None, merge=False,
97 sampling_rate=None, **entities):
98 """Retrieve variable data for a specified level in the Dataset.
99
100 Parameters
101 ----------
102 unit : str
103 The unit of analysis to return variables for. Must be
104 one of 'run', 'session', 'subject', or 'dataset'.
105 names : list
106 Optional list of variables names to return. If
107 None, all available variables are returned.
108 merge : bool
109 If True, variables are merged across all observations
110 of the current unit. E.g., if unit='subject' and return_type=
111 'collection', variables from all subjects will be merged into a
112 single collection. If False, each observation is handled
113 separately, and the result is returned as a list.
114 sampling_rate : int or str
115 If unit='run', the sampling rate to
116 pass onto the returned BIDSRunVariableCollection.
117 entities : dict
118 Optional constraints used to limit what gets returned.
119
120 Returns
121 -------
122 A list of BIDSVariableCollections if merge=False; a single
123 BIDSVariableCollection if merge=True.
124 """
125
126 nodes = self.get_nodes(unit, entities)
127 var_sets = []
128
129 for n in nodes:
130 var_set = list(n.variables.values())
131 var_set = [v for v in var_set if matches_entities(v, entities)]
132 if names is not None:
133 var_set = [v for v in var_set if v.name in names]
134 # Additional filtering on Variables past run level, because their
135 # contents are extracted from TSV files containing rows from
136 # multiple observations
137 if unit != 'run':
138 var_set = [v.filter(entities) for v in var_set]
139 var_sets.append(var_set)
140
141 if merge:
142 var_sets = [list(chain(*var_sets))]
143
144 results = []
145 for vs in var_sets:
146 if not vs:
147 continue
148 if unit == 'run':
149 vs = clc.BIDSRunVariableCollection(vs, sampling_rate)
150 else:
151 vs = clc.BIDSVariableCollection(vs)
152 results.append(vs)
153
154 if merge:
155 return results[0] if results else None
156
157 return results
158
159 def get_nodes(self, level=None, entities=None, strict=False):
160 """Retrieves all nodes that match the specified criteria.
161
162 Parameters
163 ----------
164 level : str
165 The level of analysis of nodes to return.
166 entities : dict
167 Entities to filter on. All nodes must have
168 matching values on all defined keys to be included.
169 strict : bool
170 If True, an exception will be raised if the entities
171 dict contains any keys that aren't contained in the current
172 index.
173
174 Returns
175 -------
176 A list of Node instances.
177 """
178
179 entities = {} if entities is None else entities.copy()
180
181 if level is not None:
182 entities['level'] = level
183
184 if entities is None:
185 return self.nodes
186
187 match_ents = set(entities.keys())
188 common_cols = list(match_ents & set(self.index.columns))
189
190 if strict and match_ents - common_cols:
191 raise ValueError("Invalid entities: ", match_ents - common_cols)
192
193 if not common_cols:
194 return self.nodes
195
196 # Construct query string that handles both single values and iterables
197 query = []
198 for col in common_cols:
199 oper = 'in' if isinstance(entities[col], (list, tuple)) else '=='
200 q = '{name} {oper} {val}'.format(name=col, oper=oper,
201 val=repr(entities[col]))
202 query.append(q)
203 query = ' and '.join(query)
204
205 rows = self.index.query(query)
206 if rows.empty:
207 return []
208
209 # Sort and return
210 sort_cols = ['subject', 'session', 'task', 'run', 'node_index',
211 'suffix', 'level', 'datatype']
212 sort_cols = [sc for sc in sort_cols if sc in set(rows.columns)]
213 rows = rows.sort_values(sort_cols)
214 inds = rows['node_index'].astype(int)
215 return [self.nodes[i] for i in inds]
216
217 def create_node(self, level, entities, *args, **kwargs):
218 """Creates a new child Node.
219
220 Parameters
221 ----------
222 level : str
223 The level of analysis of the new Node.
224 entities : dict
225 Dictionary of entities belonging to Node.
226 args, kwargs : dict
227 Optional positional or named arguments to pass on to
228 class-specific initializers. These arguments are only used if
229 a Node that matches the passed entities doesn't already exist,
230 and a new one must be created.
231
232 Returns
233 -------
234 A Node instance.
235 """
236
237 if level == 'run':
238 node = RunNode(entities, *args, **kwargs)
239 else:
240 node = Node(level, entities)
241
242 entities = dict(entities, node_index=len(self.nodes), level=level)
243 self.nodes.append(node)
244 node_row = pd.Series(entities)
245 self.index = self.index.append(node_row, ignore_index=True)
246 return node
247
248 def get_or_create_node(self, level, entities, *args, **kwargs):
249 """Retrieves a child Node based on the specified criteria, creating a
250 new Node if necessary.
251
252 Parameters
253 ----------
254 level : str
255 The level of analysis of the Node.
256 entities : dict
257 Dictionary of entities to include in newly-created
258 Nodes or filter existing ones.
259 args, kwargs : dict
260 Optional positional or named arguments to pass on to
261 class-specific initializers. These arguments are only used if
262 a Node that matches the passed entities doesn't already exist,
263 and a new one must be created.
264
265 Returns
266 -------
267 A Node instance.
268 """
269
270 result = self.get_nodes(level, entities)
271
272 if result:
273 if len(result) > 1:
274 raise ValueError("More than one matching Node found! If you're"
275 " expecting more than one Node, use "
276 "get_nodes() instead of get_or_create_node()."
277 )
278 return result[0]
279
280 return self.create_node(level, entities, *args, **kwargs)
281
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bids/variables/entities.py b/bids/variables/entities.py
--- a/bids/variables/entities.py
+++ b/bids/variables/entities.py
@@ -241,8 +241,11 @@
entities = dict(entities, node_index=len(self.nodes), level=level)
self.nodes.append(node)
- node_row = pd.Series(entities)
- self.index = self.index.append(node_row, ignore_index=True)
+ # Because "entities" may have non-scalar values (such as `SliceTiming`)
+ # we need to first create a Series to avoid expansion
+ # From here we can concatenate
+ node_row = pd.DataFrame(pd.Series(entities)).T
+ self.index = pd.concat([self.index, node_row], ignore_index=True)
return node
def get_or_create_node(self, level, entities, *args, **kwargs):
| {"golden_diff": "diff --git a/bids/variables/entities.py b/bids/variables/entities.py\n--- a/bids/variables/entities.py\n+++ b/bids/variables/entities.py\n@@ -241,8 +241,11 @@\n \n entities = dict(entities, node_index=len(self.nodes), level=level)\n self.nodes.append(node)\n- node_row = pd.Series(entities)\n- self.index = self.index.append(node_row, ignore_index=True)\n+ # Because \"entities\" may have non-scalar values (such as `SliceTiming`)\n+ # we need to first create a Series to avoid expansion\n+ # From here we can concatenate\n+ node_row = pd.DataFrame(pd.Series(entities)).T\n+ self.index = pd.concat([self.index, node_row], ignore_index=True)\n return node\n \n def get_or_create_node(self, level, entities, *args, **kwargs):\n", "issue": "Pandas method FutureWarning: frame.append\nWith:\r\n- Python 3.9.7\r\n- pybids 0.15.0\r\n- pandas 0.14.0\r\n\r\nI'm getting the following `FutureWarning` when running `bids = BIDSLayout(self.dataset.path, derivatives=True)`:\r\n\r\n```\r\n...bids/variables/entities.py:245: FutureWarning: The frame.append method is deprecated and will be removed from pandas in a future version. Use pandas.concat instead.\r\n```\r\n\r\npointing to [this line](https://github.com/bids-standard/pybids/blob/e92d4e8d058fc0e1797c8882dd98024068a303bf/bids/variables/entities.py#L245).\r\n\r\nShould I submit a PR to replace that, or are there reasons why this should not be done?\n", "before_files": [{"content": "\"\"\" Data classes for internal BIDS data hierarchy. \"\"\"\n\nfrom itertools import chain\nfrom collections import namedtuple\n\nimport pandas as pd\n\nfrom . import collections as clc\nfrom bids.utils import matches_entities\n\n\nclass Node(object):\n \"\"\"Base class for objects that represent a single object in the BIDS\n hierarchy.\n\n Parameters\n ----------\n id : int or str\n A value uniquely identifying this node. Typically the\n entity value extracted from the filename via layout.\n \"\"\"\n\n def __init__(self, level, entities):\n self.level = level.lower()\n self.entities = entities\n self.variables = {}\n\n def add_variable(self, variable):\n \"\"\"Adds a BIDSVariable to the current Node's list.\n\n Parameters\n ----------\n variable : BIDSVariable\n The Variable to add to the list.\n \"\"\"\n self.variables[variable.name] = variable\n\n\nclass RunNode(Node):\n \"\"\"Represents a single Run in a BIDS project.\n\n Parameters\n ----------\n id : int\n The index of the run.\n entities : dict\n Dictionary of entities for this Node.\n image_file : str\n The full path to the corresponding nifti image.\n duration : float\n Duration of the run, in seconds.\n repetition_time : float\n TR for the run.\n task : str\n The task name for this run.\n \"\"\"\n\n def __init__(self, entities, image_file, duration, repetition_time, n_vols):\n self.image_file = image_file\n self.duration = duration\n self.repetition_time = repetition_time\n self.n_vols = n_vols\n super(RunNode, self).__init__('run', entities)\n\n def get_info(self):\n # Note: do not remove the dict() call! self.entities is a SQLAlchemy\n # association_proxy mapping, and without the conversion, the connection\n # to the DB persists, causing problems on Python 3.5 if we try to clone\n # a RunInfo or any containing object.\n entities = dict(self.entities)\n return RunInfo(entities, self.duration,\n self.repetition_time, self.image_file, self.n_vols)\n\n\n# Stores key information for each Run.\nRunInfo_ = namedtuple('RunInfo', ['entities', 'duration', 'tr', 'image', 'n_vols'])\n\n\n# Wrap with class to provide docstring\nclass RunInfo(RunInfo_):\n \"\"\" A namedtuple storing run-related information.\n\n Properties include 'entities', 'duration', 'tr', and 'image', 'n_vols'.\n \"\"\"\n pass\n\n\nclass NodeIndex(object):\n \"\"\"Represents the top level in a BIDS hierarchy. \"\"\"\n\n def __init__(self):\n super(NodeIndex, self).__init__()\n self.index = pd.DataFrame()\n self.nodes = []\n\n def get_collections(self, unit, names=None, merge=False,\n sampling_rate=None, **entities):\n \"\"\"Retrieve variable data for a specified level in the Dataset.\n\n Parameters\n ----------\n unit : str\n The unit of analysis to return variables for. Must be\n one of 'run', 'session', 'subject', or 'dataset'.\n names : list\n Optional list of variables names to return. If\n None, all available variables are returned.\n merge : bool\n If True, variables are merged across all observations\n of the current unit. E.g., if unit='subject' and return_type=\n 'collection', variables from all subjects will be merged into a\n single collection. If False, each observation is handled\n separately, and the result is returned as a list.\n sampling_rate : int or str\n If unit='run', the sampling rate to\n pass onto the returned BIDSRunVariableCollection.\n entities : dict\n Optional constraints used to limit what gets returned.\n\n Returns\n -------\n A list of BIDSVariableCollections if merge=False; a single\n BIDSVariableCollection if merge=True.\n \"\"\"\n\n nodes = self.get_nodes(unit, entities)\n var_sets = []\n\n for n in nodes:\n var_set = list(n.variables.values())\n var_set = [v for v in var_set if matches_entities(v, entities)]\n if names is not None:\n var_set = [v for v in var_set if v.name in names]\n # Additional filtering on Variables past run level, because their\n # contents are extracted from TSV files containing rows from\n # multiple observations\n if unit != 'run':\n var_set = [v.filter(entities) for v in var_set]\n var_sets.append(var_set)\n\n if merge:\n var_sets = [list(chain(*var_sets))]\n\n results = []\n for vs in var_sets:\n if not vs:\n continue\n if unit == 'run':\n vs = clc.BIDSRunVariableCollection(vs, sampling_rate)\n else:\n vs = clc.BIDSVariableCollection(vs)\n results.append(vs)\n\n if merge:\n return results[0] if results else None\n\n return results\n\n def get_nodes(self, level=None, entities=None, strict=False):\n \"\"\"Retrieves all nodes that match the specified criteria.\n\n Parameters\n ----------\n level : str\n The level of analysis of nodes to return.\n entities : dict\n Entities to filter on. All nodes must have\n matching values on all defined keys to be included.\n strict : bool\n If True, an exception will be raised if the entities\n dict contains any keys that aren't contained in the current\n index.\n\n Returns\n -------\n A list of Node instances.\n \"\"\"\n\n entities = {} if entities is None else entities.copy()\n\n if level is not None:\n entities['level'] = level\n\n if entities is None:\n return self.nodes\n\n match_ents = set(entities.keys())\n common_cols = list(match_ents & set(self.index.columns))\n\n if strict and match_ents - common_cols:\n raise ValueError(\"Invalid entities: \", match_ents - common_cols)\n\n if not common_cols:\n return self.nodes\n\n # Construct query string that handles both single values and iterables\n query = []\n for col in common_cols:\n oper = 'in' if isinstance(entities[col], (list, tuple)) else '=='\n q = '{name} {oper} {val}'.format(name=col, oper=oper,\n val=repr(entities[col]))\n query.append(q)\n query = ' and '.join(query)\n\n rows = self.index.query(query)\n if rows.empty:\n return []\n\n # Sort and return\n sort_cols = ['subject', 'session', 'task', 'run', 'node_index',\n 'suffix', 'level', 'datatype']\n sort_cols = [sc for sc in sort_cols if sc in set(rows.columns)]\n rows = rows.sort_values(sort_cols)\n inds = rows['node_index'].astype(int)\n return [self.nodes[i] for i in inds]\n\n def create_node(self, level, entities, *args, **kwargs):\n \"\"\"Creates a new child Node.\n\n Parameters\n ----------\n level : str\n The level of analysis of the new Node.\n entities : dict\n Dictionary of entities belonging to Node.\n args, kwargs : dict\n Optional positional or named arguments to pass on to\n class-specific initializers. These arguments are only used if\n a Node that matches the passed entities doesn't already exist,\n and a new one must be created.\n\n Returns\n -------\n A Node instance.\n \"\"\"\n\n if level == 'run':\n node = RunNode(entities, *args, **kwargs)\n else:\n node = Node(level, entities)\n\n entities = dict(entities, node_index=len(self.nodes), level=level)\n self.nodes.append(node)\n node_row = pd.Series(entities)\n self.index = self.index.append(node_row, ignore_index=True)\n return node\n\n def get_or_create_node(self, level, entities, *args, **kwargs):\n \"\"\"Retrieves a child Node based on the specified criteria, creating a\n new Node if necessary.\n\n Parameters\n ----------\n level : str\n The level of analysis of the Node.\n entities : dict\n Dictionary of entities to include in newly-created\n Nodes or filter existing ones.\n args, kwargs : dict\n Optional positional or named arguments to pass on to\n class-specific initializers. These arguments are only used if\n a Node that matches the passed entities doesn't already exist,\n and a new one must be created.\n\n Returns\n -------\n A Node instance.\n \"\"\"\n\n result = self.get_nodes(level, entities)\n\n if result:\n if len(result) > 1:\n raise ValueError(\"More than one matching Node found! If you're\"\n \" expecting more than one Node, use \"\n \"get_nodes() instead of get_or_create_node().\"\n )\n return result[0]\n\n return self.create_node(level, entities, *args, **kwargs)\n", "path": "bids/variables/entities.py"}], "after_files": [{"content": "\"\"\" Data classes for internal BIDS data hierarchy. \"\"\"\n\nfrom itertools import chain\nfrom collections import namedtuple\n\nimport pandas as pd\n\nfrom . import collections as clc\nfrom bids.utils import matches_entities\n\n\nclass Node(object):\n \"\"\"Base class for objects that represent a single object in the BIDS\n hierarchy.\n\n Parameters\n ----------\n id : int or str\n A value uniquely identifying this node. Typically the\n entity value extracted from the filename via layout.\n \"\"\"\n\n def __init__(self, level, entities):\n self.level = level.lower()\n self.entities = entities\n self.variables = {}\n\n def add_variable(self, variable):\n \"\"\"Adds a BIDSVariable to the current Node's list.\n\n Parameters\n ----------\n variable : BIDSVariable\n The Variable to add to the list.\n \"\"\"\n self.variables[variable.name] = variable\n\n\nclass RunNode(Node):\n \"\"\"Represents a single Run in a BIDS project.\n\n Parameters\n ----------\n id : int\n The index of the run.\n entities : dict\n Dictionary of entities for this Node.\n image_file : str\n The full path to the corresponding nifti image.\n duration : float\n Duration of the run, in seconds.\n repetition_time : float\n TR for the run.\n task : str\n The task name for this run.\n \"\"\"\n\n def __init__(self, entities, image_file, duration, repetition_time, n_vols):\n self.image_file = image_file\n self.duration = duration\n self.repetition_time = repetition_time\n self.n_vols = n_vols\n super(RunNode, self).__init__('run', entities)\n\n def get_info(self):\n # Note: do not remove the dict() call! self.entities is a SQLAlchemy\n # association_proxy mapping, and without the conversion, the connection\n # to the DB persists, causing problems on Python 3.5 if we try to clone\n # a RunInfo or any containing object.\n entities = dict(self.entities)\n return RunInfo(entities, self.duration,\n self.repetition_time, self.image_file, self.n_vols)\n\n\n# Stores key information for each Run.\nRunInfo_ = namedtuple('RunInfo', ['entities', 'duration', 'tr', 'image', 'n_vols'])\n\n\n# Wrap with class to provide docstring\nclass RunInfo(RunInfo_):\n \"\"\" A namedtuple storing run-related information.\n\n Properties include 'entities', 'duration', 'tr', and 'image', 'n_vols'.\n \"\"\"\n pass\n\n\nclass NodeIndex(object):\n \"\"\"Represents the top level in a BIDS hierarchy. \"\"\"\n\n def __init__(self):\n super(NodeIndex, self).__init__()\n self.index = pd.DataFrame()\n self.nodes = []\n\n def get_collections(self, unit, names=None, merge=False,\n sampling_rate=None, **entities):\n \"\"\"Retrieve variable data for a specified level in the Dataset.\n\n Parameters\n ----------\n unit : str\n The unit of analysis to return variables for. Must be\n one of 'run', 'session', 'subject', or 'dataset'.\n names : list\n Optional list of variables names to return. If\n None, all available variables are returned.\n merge : bool\n If True, variables are merged across all observations\n of the current unit. E.g., if unit='subject' and return_type=\n 'collection', variables from all subjects will be merged into a\n single collection. If False, each observation is handled\n separately, and the result is returned as a list.\n sampling_rate : int or str\n If unit='run', the sampling rate to\n pass onto the returned BIDSRunVariableCollection.\n entities : dict\n Optional constraints used to limit what gets returned.\n\n Returns\n -------\n A list of BIDSVariableCollections if merge=False; a single\n BIDSVariableCollection if merge=True.\n \"\"\"\n\n nodes = self.get_nodes(unit, entities)\n var_sets = []\n\n for n in nodes:\n var_set = list(n.variables.values())\n var_set = [v for v in var_set if matches_entities(v, entities)]\n if names is not None:\n var_set = [v for v in var_set if v.name in names]\n # Additional filtering on Variables past run level, because their\n # contents are extracted from TSV files containing rows from\n # multiple observations\n if unit != 'run':\n var_set = [v.filter(entities) for v in var_set]\n var_sets.append(var_set)\n\n if merge:\n var_sets = [list(chain(*var_sets))]\n\n results = []\n for vs in var_sets:\n if not vs:\n continue\n if unit == 'run':\n vs = clc.BIDSRunVariableCollection(vs, sampling_rate)\n else:\n vs = clc.BIDSVariableCollection(vs)\n results.append(vs)\n\n if merge:\n return results[0] if results else None\n\n return results\n\n def get_nodes(self, level=None, entities=None, strict=False):\n \"\"\"Retrieves all nodes that match the specified criteria.\n\n Parameters\n ----------\n level : str\n The level of analysis of nodes to return.\n entities : dict\n Entities to filter on. All nodes must have\n matching values on all defined keys to be included.\n strict : bool\n If True, an exception will be raised if the entities\n dict contains any keys that aren't contained in the current\n index.\n\n Returns\n -------\n A list of Node instances.\n \"\"\"\n\n entities = {} if entities is None else entities.copy()\n\n if level is not None:\n entities['level'] = level\n\n if entities is None:\n return self.nodes\n\n match_ents = set(entities.keys())\n common_cols = list(match_ents & set(self.index.columns))\n\n if strict and match_ents - common_cols:\n raise ValueError(\"Invalid entities: \", match_ents - common_cols)\n\n if not common_cols:\n return self.nodes\n\n # Construct query string that handles both single values and iterables\n query = []\n for col in common_cols:\n oper = 'in' if isinstance(entities[col], (list, tuple)) else '=='\n q = '{name} {oper} {val}'.format(name=col, oper=oper,\n val=repr(entities[col]))\n query.append(q)\n query = ' and '.join(query)\n\n rows = self.index.query(query)\n if rows.empty:\n return []\n\n # Sort and return\n sort_cols = ['subject', 'session', 'task', 'run', 'node_index',\n 'suffix', 'level', 'datatype']\n sort_cols = [sc for sc in sort_cols if sc in set(rows.columns)]\n rows = rows.sort_values(sort_cols)\n inds = rows['node_index'].astype(int)\n return [self.nodes[i] for i in inds]\n\n def create_node(self, level, entities, *args, **kwargs):\n \"\"\"Creates a new child Node.\n\n Parameters\n ----------\n level : str\n The level of analysis of the new Node.\n entities : dict\n Dictionary of entities belonging to Node.\n args, kwargs : dict\n Optional positional or named arguments to pass on to\n class-specific initializers. These arguments are only used if\n a Node that matches the passed entities doesn't already exist,\n and a new one must be created.\n\n Returns\n -------\n A Node instance.\n \"\"\"\n\n if level == 'run':\n node = RunNode(entities, *args, **kwargs)\n else:\n node = Node(level, entities)\n\n entities = dict(entities, node_index=len(self.nodes), level=level)\n self.nodes.append(node)\n # Because \"entities\" may have non-scalar values (such as `SliceTiming`)\n # we need to first create a Series to avoid expansion\n # From here we can concatenate\n node_row = pd.DataFrame(pd.Series(entities)).T\n self.index = pd.concat([self.index, node_row], ignore_index=True)\n return node\n\n def get_or_create_node(self, level, entities, *args, **kwargs):\n \"\"\"Retrieves a child Node based on the specified criteria, creating a\n new Node if necessary.\n\n Parameters\n ----------\n level : str\n The level of analysis of the Node.\n entities : dict\n Dictionary of entities to include in newly-created\n Nodes or filter existing ones.\n args, kwargs : dict\n Optional positional or named arguments to pass on to\n class-specific initializers. These arguments are only used if\n a Node that matches the passed entities doesn't already exist,\n and a new one must be created.\n\n Returns\n -------\n A Node instance.\n \"\"\"\n\n result = self.get_nodes(level, entities)\n\n if result:\n if len(result) > 1:\n raise ValueError(\"More than one matching Node found! If you're\"\n \" expecting more than one Node, use \"\n \"get_nodes() instead of get_or_create_node().\"\n )\n return result[0]\n\n return self.create_node(level, entities, *args, **kwargs)\n", "path": "bids/variables/entities.py"}]} | 3,152 | 194 |
gh_patches_debug_19145 | rasdani/github-patches | git_diff | kivy__python-for-android-1480 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
libxml2 build broken on latest p4a master with python 3
With latest p4a master & python3crystax & host python 3, the libxml2 recipe gets built inside `/root/.local/share/python-for-android/build/other_builds/libxml2/armeabi-v7a__ndk_target_19/libxml2/ `.
This is a folder name change, previously it used to be in `/root/.local/share/python-for-android/build/other_builds/libxml2/armeabi-v7a/libxml2/` - and as a result, the path detection inside the libxml2 fails for some reason.
### Versions
* Python: host python 3.x, python3crystax target
* OS: ubuntu in docker
* Kivy: not used
* Cython: not sure, but I don't think it's relevant
Edit: corrected lxml -> libxml2
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pythonforandroid/recipes/libxslt/__init__.py`
Content:
```
1 from pythonforandroid.toolchain import Recipe, shprint, shutil, current_directory
2 from os.path import exists, join, dirname
3 import sh
4
5
6 class LibxsltRecipe(Recipe):
7 version = "1.1.28"
8 url = "http://xmlsoft.org/sources/libxslt-{version}.tar.gz"
9 depends = ["libxml2"]
10 patches = ["fix-dlopen.patch"]
11
12 call_hostpython_via_targetpython = False
13
14 def should_build(self, arch):
15 super(LibxsltRecipe, self).should_build(arch)
16 return not exists(join(self.ctx.get_libs_dir(arch.arch), "libxslt.a"))
17
18 def build_arch(self, arch):
19 super(LibxsltRecipe, self).build_arch(arch)
20 env = self.get_recipe_env(arch)
21 with current_directory(self.get_build_dir(arch.arch)):
22 # If the build is done with /bin/sh things blow up,
23 # try really hard to use bash
24 env["CC"] += " -I%s" % self.get_build_dir(arch.arch)
25 libxml = (
26 dirname(dirname(self.get_build_container_dir(arch.arch)))
27 + "/libxml2/%s/libxml2" % arch.arch
28 )
29 shprint(
30 sh.Command("./configure"),
31 "--build=i686-pc-linux-gnu",
32 "--host=arm-linux-eabi",
33 "--without-plugins",
34 "--without-debug",
35 "--without-python",
36 "--without-crypto",
37 "--with-libxml-src=%s" % libxml,
38 _env=env,
39 )
40 shprint(sh.make, "V=1", _env=env)
41 shutil.copyfile(
42 "libxslt/.libs/libxslt.a",
43 join(self.ctx.get_libs_dir(arch.arch), "libxslt.a"),
44 )
45 shutil.copyfile(
46 "libexslt/.libs/libexslt.a",
47 join(self.ctx.get_libs_dir(arch.arch), "libexslt.a"),
48 )
49
50 def get_recipe_env(self, arch):
51 env = super(LibxsltRecipe, self).get_recipe_env(arch)
52 env["CONFIG_SHELL"] = "/bin/bash"
53 env["SHELL"] = "/bin/bash"
54 env[
55 "CC"
56 ] = "arm-linux-androideabi-gcc -DANDROID -mandroid -fomit-frame-pointer --sysroot={}".format(
57 self.ctx.ndk_platform
58 )
59
60 env["LDSHARED"] = "%s -nostartfiles -shared -fPIC" % env["CC"]
61 return env
62
63
64 recipe = LibxsltRecipe()
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pythonforandroid/recipes/libxslt/__init__.py b/pythonforandroid/recipes/libxslt/__init__.py
--- a/pythonforandroid/recipes/libxslt/__init__.py
+++ b/pythonforandroid/recipes/libxslt/__init__.py
@@ -1,5 +1,5 @@
from pythonforandroid.toolchain import Recipe, shprint, shutil, current_directory
-from os.path import exists, join, dirname
+from os.path import exists, join
import sh
@@ -22,10 +22,8 @@
# If the build is done with /bin/sh things blow up,
# try really hard to use bash
env["CC"] += " -I%s" % self.get_build_dir(arch.arch)
- libxml = (
- dirname(dirname(self.get_build_container_dir(arch.arch)))
- + "/libxml2/%s/libxml2" % arch.arch
- )
+ libxml = Recipe.get_recipe(
+ 'libxml2', self.ctx).get_build_dir(arch.arch)
shprint(
sh.Command("./configure"),
"--build=i686-pc-linux-gnu",
| {"golden_diff": "diff --git a/pythonforandroid/recipes/libxslt/__init__.py b/pythonforandroid/recipes/libxslt/__init__.py\n--- a/pythonforandroid/recipes/libxslt/__init__.py\n+++ b/pythonforandroid/recipes/libxslt/__init__.py\n@@ -1,5 +1,5 @@\n from pythonforandroid.toolchain import Recipe, shprint, shutil, current_directory\n-from os.path import exists, join, dirname\n+from os.path import exists, join\n import sh\n \n \n@@ -22,10 +22,8 @@\n # If the build is done with /bin/sh things blow up,\n # try really hard to use bash\n env[\"CC\"] += \" -I%s\" % self.get_build_dir(arch.arch)\n- libxml = (\n- dirname(dirname(self.get_build_container_dir(arch.arch)))\n- + \"/libxml2/%s/libxml2\" % arch.arch\n- )\n+ libxml = Recipe.get_recipe(\n+ 'libxml2', self.ctx).get_build_dir(arch.arch)\n shprint(\n sh.Command(\"./configure\"),\n \"--build=i686-pc-linux-gnu\",\n", "issue": "libxml2 build broken on latest p4a master with python 3\nWith latest p4a master & python3crystax & host python 3, the libxml2 recipe gets built inside `/root/.local/share/python-for-android/build/other_builds/libxml2/armeabi-v7a__ndk_target_19/libxml2/ `.\r\n\r\nThis is a folder name change, previously it used to be in `/root/.local/share/python-for-android/build/other_builds/libxml2/armeabi-v7a/libxml2/` - and as a result, the path detection inside the libxml2 fails for some reason.\r\n\r\n### Versions\r\n\r\n* Python: host python 3.x, python3crystax target\r\n* OS: ubuntu in docker\r\n* Kivy: not used\r\n* Cython: not sure, but I don't think it's relevant\r\n\r\nEdit: corrected lxml -> libxml2\n", "before_files": [{"content": "from pythonforandroid.toolchain import Recipe, shprint, shutil, current_directory\nfrom os.path import exists, join, dirname\nimport sh\n\n\nclass LibxsltRecipe(Recipe):\n version = \"1.1.28\"\n url = \"http://xmlsoft.org/sources/libxslt-{version}.tar.gz\"\n depends = [\"libxml2\"]\n patches = [\"fix-dlopen.patch\"]\n\n call_hostpython_via_targetpython = False\n\n def should_build(self, arch):\n super(LibxsltRecipe, self).should_build(arch)\n return not exists(join(self.ctx.get_libs_dir(arch.arch), \"libxslt.a\"))\n\n def build_arch(self, arch):\n super(LibxsltRecipe, self).build_arch(arch)\n env = self.get_recipe_env(arch)\n with current_directory(self.get_build_dir(arch.arch)):\n # If the build is done with /bin/sh things blow up,\n # try really hard to use bash\n env[\"CC\"] += \" -I%s\" % self.get_build_dir(arch.arch)\n libxml = (\n dirname(dirname(self.get_build_container_dir(arch.arch)))\n + \"/libxml2/%s/libxml2\" % arch.arch\n )\n shprint(\n sh.Command(\"./configure\"),\n \"--build=i686-pc-linux-gnu\",\n \"--host=arm-linux-eabi\",\n \"--without-plugins\",\n \"--without-debug\",\n \"--without-python\",\n \"--without-crypto\",\n \"--with-libxml-src=%s\" % libxml,\n _env=env,\n )\n shprint(sh.make, \"V=1\", _env=env)\n shutil.copyfile(\n \"libxslt/.libs/libxslt.a\",\n join(self.ctx.get_libs_dir(arch.arch), \"libxslt.a\"),\n )\n shutil.copyfile(\n \"libexslt/.libs/libexslt.a\",\n join(self.ctx.get_libs_dir(arch.arch), \"libexslt.a\"),\n )\n\n def get_recipe_env(self, arch):\n env = super(LibxsltRecipe, self).get_recipe_env(arch)\n env[\"CONFIG_SHELL\"] = \"/bin/bash\"\n env[\"SHELL\"] = \"/bin/bash\"\n env[\n \"CC\"\n ] = \"arm-linux-androideabi-gcc -DANDROID -mandroid -fomit-frame-pointer --sysroot={}\".format(\n self.ctx.ndk_platform\n )\n\n env[\"LDSHARED\"] = \"%s -nostartfiles -shared -fPIC\" % env[\"CC\"]\n return env\n\n\nrecipe = LibxsltRecipe()\n", "path": "pythonforandroid/recipes/libxslt/__init__.py"}], "after_files": [{"content": "from pythonforandroid.toolchain import Recipe, shprint, shutil, current_directory\nfrom os.path import exists, join\nimport sh\n\n\nclass LibxsltRecipe(Recipe):\n version = \"1.1.28\"\n url = \"http://xmlsoft.org/sources/libxslt-{version}.tar.gz\"\n depends = [\"libxml2\"]\n patches = [\"fix-dlopen.patch\"]\n\n call_hostpython_via_targetpython = False\n\n def should_build(self, arch):\n super(LibxsltRecipe, self).should_build(arch)\n return not exists(join(self.ctx.get_libs_dir(arch.arch), \"libxslt.a\"))\n\n def build_arch(self, arch):\n super(LibxsltRecipe, self).build_arch(arch)\n env = self.get_recipe_env(arch)\n with current_directory(self.get_build_dir(arch.arch)):\n # If the build is done with /bin/sh things blow up,\n # try really hard to use bash\n env[\"CC\"] += \" -I%s\" % self.get_build_dir(arch.arch)\n libxml = Recipe.get_recipe(\n 'libxml2', self.ctx).get_build_dir(arch.arch)\n shprint(\n sh.Command(\"./configure\"),\n \"--build=i686-pc-linux-gnu\",\n \"--host=arm-linux-eabi\",\n \"--without-plugins\",\n \"--without-debug\",\n \"--without-python\",\n \"--without-crypto\",\n \"--with-libxml-src=%s\" % libxml,\n _env=env,\n )\n shprint(sh.make, \"V=1\", _env=env)\n shutil.copyfile(\n \"libxslt/.libs/libxslt.a\",\n join(self.ctx.get_libs_dir(arch.arch), \"libxslt.a\"),\n )\n shutil.copyfile(\n \"libexslt/.libs/libexslt.a\",\n join(self.ctx.get_libs_dir(arch.arch), \"libexslt.a\"),\n )\n\n def get_recipe_env(self, arch):\n env = super(LibxsltRecipe, self).get_recipe_env(arch)\n env[\"CONFIG_SHELL\"] = \"/bin/bash\"\n env[\"SHELL\"] = \"/bin/bash\"\n env[\n \"CC\"\n ] = \"arm-linux-androideabi-gcc -DANDROID -mandroid -fomit-frame-pointer --sysroot={}\".format(\n self.ctx.ndk_platform\n )\n\n env[\"LDSHARED\"] = \"%s -nostartfiles -shared -fPIC\" % env[\"CC\"]\n return env\n\n\nrecipe = LibxsltRecipe()\n", "path": "pythonforandroid/recipes/libxslt/__init__.py"}]} | 1,149 | 254 |
gh_patches_debug_12242 | rasdani/github-patches | git_diff | DataBiosphere__toil-562 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Need `toil --version`
Need `toil --version`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/toil/utils/toilMain.py`
Content:
```
1 from __future__ import absolute_import, print_function
2 import os
3 import sys
4
5 def main():
6 modules = loadModules()
7 try:
8 command = sys.argv[1]
9 except IndexError:
10 printHelp(modules)
11 else:
12 if command == '--help':
13 printHelp(modules)
14 else:
15 try:
16 module = modules[command]
17 except KeyError:
18 print("Unknown option '%s'. "
19 "Pass --help to display usage information.\n" % command, file=sys.stderr)
20 sys.exit(1)
21 else:
22 del sys.argv[1]
23 module.main()
24
25
26 def loadModules():
27 # noinspection PyUnresolvedReferences
28 from toil.utils import toilKill, toilRestart, toilStats, toilStatus, toilClean
29 return {name[4:].lower(): module for name, module in locals().iteritems()}
30
31
32 def printHelp(modules):
33 usage = ("\n"
34 "Usage: {name} COMMAND ...\n"
35 " {name} --help\n"
36 " {name} COMMAND --help\n\n"
37 "where COMMAND is one of the following:\n\n{descriptions}\n\n")
38 print(usage.format(
39 name=os.path.basename(sys.argv[0]),
40 commands='|'.join(modules.iterkeys()),
41 descriptions='\n'.join("%s - %s" % (n, m.__doc__.strip()) for n, m in modules.iteritems())))
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/toil/utils/toilMain.py b/src/toil/utils/toilMain.py
--- a/src/toil/utils/toilMain.py
+++ b/src/toil/utils/toilMain.py
@@ -1,4 +1,6 @@
from __future__ import absolute_import, print_function
+from toil.version import version
+import pkg_resources
import os
import sys
@@ -11,6 +13,11 @@
else:
if command == '--help':
printHelp(modules)
+ elif command == '--version':
+ try:
+ print(pkg_resources.get_distribution('toil').version)
+ except:
+ print("Version gathered from toil.version: "+version)
else:
try:
module = modules[command]
| {"golden_diff": "diff --git a/src/toil/utils/toilMain.py b/src/toil/utils/toilMain.py\n--- a/src/toil/utils/toilMain.py\n+++ b/src/toil/utils/toilMain.py\n@@ -1,4 +1,6 @@\n from __future__ import absolute_import, print_function\n+from toil.version import version\n+import pkg_resources\n import os\n import sys\n \n@@ -11,6 +13,11 @@\n else:\n if command == '--help':\n printHelp(modules)\n+ elif command == '--version':\n+ try:\n+ print(pkg_resources.get_distribution('toil').version)\n+ except:\n+ print(\"Version gathered from toil.version: \"+version)\n else:\n try:\n module = modules[command]\n", "issue": "Need `toil --version`\n\nNeed `toil --version`\n\n", "before_files": [{"content": "from __future__ import absolute_import, print_function\nimport os\nimport sys\n\ndef main():\n modules = loadModules()\n try:\n command = sys.argv[1]\n except IndexError:\n printHelp(modules)\n else:\n if command == '--help':\n printHelp(modules)\n else:\n try:\n module = modules[command]\n except KeyError:\n print(\"Unknown option '%s'. \"\n \"Pass --help to display usage information.\\n\" % command, file=sys.stderr)\n sys.exit(1)\n else:\n del sys.argv[1]\n module.main()\n\n\ndef loadModules():\n # noinspection PyUnresolvedReferences\n from toil.utils import toilKill, toilRestart, toilStats, toilStatus, toilClean\n return {name[4:].lower(): module for name, module in locals().iteritems()}\n\n\ndef printHelp(modules):\n usage = (\"\\n\"\n \"Usage: {name} COMMAND ...\\n\"\n \" {name} --help\\n\"\n \" {name} COMMAND --help\\n\\n\"\n \"where COMMAND is one of the following:\\n\\n{descriptions}\\n\\n\")\n print(usage.format(\n name=os.path.basename(sys.argv[0]),\n commands='|'.join(modules.iterkeys()),\n descriptions='\\n'.join(\"%s - %s\" % (n, m.__doc__.strip()) for n, m in modules.iteritems())))\n", "path": "src/toil/utils/toilMain.py"}], "after_files": [{"content": "from __future__ import absolute_import, print_function\nfrom toil.version import version\nimport pkg_resources\nimport os\nimport sys\n\ndef main():\n modules = loadModules()\n try:\n command = sys.argv[1]\n except IndexError:\n printHelp(modules)\n else:\n if command == '--help':\n printHelp(modules)\n elif command == '--version':\n try:\n print(pkg_resources.get_distribution('toil').version)\n except:\n print(\"Version gathered from toil.version: \"+version)\n else:\n try:\n module = modules[command]\n except KeyError:\n print(\"Unknown option '%s'. \"\n \"Pass --help to display usage information.\\n\" % command, file=sys.stderr)\n sys.exit(1)\n else:\n del sys.argv[1]\n module.main()\n\n\ndef loadModules():\n # noinspection PyUnresolvedReferences\n from toil.utils import toilKill, toilRestart, toilStats, toilStatus, toilClean\n return {name[4:].lower(): module for name, module in locals().iteritems()}\n\n\ndef printHelp(modules):\n usage = (\"\\n\"\n \"Usage: {name} COMMAND ...\\n\"\n \" {name} --help\\n\"\n \" {name} COMMAND --help\\n\\n\"\n \"where COMMAND is one of the following:\\n\\n{descriptions}\\n\\n\")\n print(usage.format(\n name=os.path.basename(sys.argv[0]),\n commands='|'.join(modules.iterkeys()),\n descriptions='\\n'.join(\"%s - %s\" % (n, m.__doc__.strip()) for n, m in modules.iteritems())))\n", "path": "src/toil/utils/toilMain.py"}]} | 665 | 169 |
gh_patches_debug_35008 | rasdani/github-patches | git_diff | quantumlib__Cirq-1170 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
check.sh fails on clean master
Just merged master and ran check.sh and it is failing due to tensorflow
```
(cirq) [dabacon:cirq:~/git/cirq]$./continuous-integration/check.sh
run: ('virtualenv', '--quiet', '-p', '/usr/bin/python3.5', '/tmp/test-cirq-kp5mfrah/.test_virtualenv')
run: ('/tmp/test-cirq-kp5mfrah/.test_virtualenv/bin/pip', 'install', '--quiet', '-r', '/tmp/test-cirq-kp5mfrah/requirements.txt')
run: ('/tmp/test-cirq-kp5mfrah/.test_virtualenv/bin/pip', 'install', '--quiet', '-r', '/tmp/test-cirq-kp5mfrah/dev_tools/conf/pip-list-dev-tools.txt')
Running pylint
Finished pylint
Running typecheck
Finished typecheck
Running pytest
==================================== ERRORS ====================================
_______ ERROR collecting cirq/contrib/tpu/circuit_to_tensorflow_test.py ________
ImportError while importing test module '/tmp/test-cirq-kp5mfrah/cirq/contrib/tpu/circuit_to_tensorflow_test.py'.
Hint: make sure your test modules/packages have valid Python names.
Traceback:
cirq/contrib/tpu/__init__.py:75: in <module>
from cirq.contrib.tpu.circuit_to_tensorflow import (
cirq/contrib/tpu/circuit_to_tensorflow.py:20: in <module>
import tensorflow as tf
E ImportError: No module named 'tensorflow'
----------- coverage: platform linux, python 3.5.3-final-0 -----------
Coverage annotated source written next to source
!!!!!!!!!!!!!!!!!!! Interrupted: 1 errors during collection !!!!!!!!!!!!!!!!!!!!
1 error in 34.82 seconds
Finished pytest
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dev_tools/env_tools.py`
Content:
```
1 # Copyright 2018 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 import shutil
17 import sys
18 from typing import Optional, Iterable, Callable, cast
19
20 from dev_tools import shell_tools, git_env_tools
21 from dev_tools.github_repository import GithubRepository
22 from dev_tools.prepared_env import PreparedEnv
23
24
25 def get_unhidden_ungenerated_python_files(directory: str) -> Iterable[str]:
26 """Iterates through relevant python files within the given directory.
27
28 Args:
29 directory: The top-level directory to explore.
30
31 Yields:
32 File paths.
33 """
34 for dirpath, dirnames, filenames in os.walk(directory, topdown=True):
35 if os.path.split(dirpath)[-1].startswith('.'):
36 dirnames.clear()
37 continue
38
39 for filename in filenames:
40 if filename.endswith('.py') and not filename.endswith('_pb2.py'):
41 yield os.path.join(dirpath, filename)
42
43
44 def create_virtual_env(venv_path: str,
45 requirements_paths: Iterable[str],
46 python_path: str,
47 verbose: bool) -> None:
48 """Creates a new virtual environment and then installs dependencies.
49
50 Args:
51 venv_path: Where to put the virtual environment's state.
52 requirements_paths: Location of requirements files to -r install.
53 python_path: The python binary to use.
54 verbose: When set, more progress output is produced.
55 """
56 shell_tools.run_cmd('virtualenv',
57 None if verbose else '--quiet',
58 '-p',
59 python_path,
60 venv_path,
61 out=sys.stderr)
62 pip_path = os.path.join(venv_path, 'bin', 'pip')
63 for req_path in requirements_paths:
64 shell_tools.run_cmd(pip_path,
65 'install',
66 None if verbose else '--quiet',
67 '-r',
68 req_path,
69 out=sys.stderr)
70
71
72 def prepare_temporary_test_environment(
73 destination_directory: str,
74 repository: GithubRepository,
75 pull_request_number: Optional[int],
76 verbose: bool,
77 env_name: str = '.test_virtualenv',
78 python_path: str = '/usr/bin/python3.5',
79 commit_ids_known_callback: Callable[[PreparedEnv], None] = None
80 ) -> PreparedEnv:
81 """Prepares a temporary test environment at the (existing empty) directory.
82
83 Args:
84 destination_directory: The location to put files. The caller is
85 responsible for deleting the directory, whether or not this method
86 succeeds or fails.
87 repository: The github repository to download content from, if a pull
88 request number is given.
89 pull_request_number: If set, test content is fetched from github.
90 Otherwise copies of local files are used.
91 verbose: When set, more progress output is produced.
92 env_name: The name to use for the virtual environment.
93 python_path: Location of the python binary to use within the
94 virtual environment.
95 commit_ids_known_callback: A function to call when the actual commit id
96 being tested is known, before the virtual environment is ready.
97
98 Returns:
99 Commit ids corresponding to content to test/compare.
100 """
101 # Fetch content.
102 if pull_request_number is not None:
103 env = git_env_tools.fetch_github_pull_request(
104 destination_directory=destination_directory,
105 repository=repository,
106 pull_request_number=pull_request_number,
107 verbose=verbose)
108 else:
109 env = git_env_tools.fetch_local_files(
110 destination_directory=destination_directory,
111 verbose=verbose)
112
113 if commit_ids_known_callback is not None:
114 commit_ids_known_callback(env)
115
116 # Create virtual environment.
117 base_path = cast(str, env.destination_directory)
118 env_path = os.path.join(base_path, env_name)
119 req_path = os.path.join(base_path, 'requirements.txt')
120 req_path_2 = os.path.join(base_path,
121 'dev_tools',
122 'conf',
123 'pip-list-dev-tools.txt')
124 create_virtual_env(venv_path=env_path,
125 python_path=python_path,
126 requirements_paths=[req_path, req_path_2],
127 verbose=verbose)
128
129 return PreparedEnv(github_repo=env.repository,
130 actual_commit_id=env.actual_commit_id,
131 compare_commit_id=env.compare_commit_id,
132 destination_directory=env.destination_directory,
133 virtual_env_path=env_path)
134
135
136 def derive_temporary_python2_environment(
137 destination_directory: str,
138 python3_environment: PreparedEnv,
139 verbose: bool,
140 env_name: str = '.test_virtualenv_py2',
141 python_path: str = "/usr/bin/python2.7") -> PreparedEnv:
142 """Creates a python 2.7 environment starting from a prepared python 3 one.
143
144 Args:
145 destination_directory: Where to put the python 2 environment.
146 python3_environment: The prepared environment to start from.
147 verbose: When set, more progress output is produced.
148 env_name: The name to use for the virtualenv directory.
149 python_path: The python binary to use.
150
151 Returns:
152 A description of the environment that was prepared.
153 """
154
155 shutil.rmtree(destination_directory)
156 input_directory = cast(str, python3_environment.destination_directory)
157 os.chdir(input_directory)
158 conversion_script_path = os.path.join(
159 input_directory,
160 'dev_tools',
161 'python2.7-generate.sh')
162 shell_tools.run_cmd('bash',
163 conversion_script_path,
164 destination_directory,
165 input_directory,
166 python3_environment.virtual_env_path,
167 out=sys.stderr)
168 os.chdir(destination_directory)
169
170 # Create virtual environment.
171 env_path = os.path.join(destination_directory, env_name)
172 # (These files are output by dev_tools/python2.7-generate.sh.)
173 req_path = os.path.join(destination_directory, 'requirements.txt')
174 req_path_2 = os.path.join(destination_directory, 'pip-list-test-tools.txt')
175 create_virtual_env(venv_path=env_path,
176 python_path=python_path,
177 requirements_paths=[req_path, req_path_2],
178 verbose=verbose)
179
180 return PreparedEnv(github_repo=python3_environment.repository,
181 actual_commit_id=python3_environment.actual_commit_id,
182 compare_commit_id=python3_environment.compare_commit_id,
183 destination_directory=destination_directory,
184 virtual_env_path=env_path)
185
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dev_tools/env_tools.py b/dev_tools/env_tools.py
--- a/dev_tools/env_tools.py
+++ b/dev_tools/env_tools.py
@@ -117,13 +117,18 @@
base_path = cast(str, env.destination_directory)
env_path = os.path.join(base_path, env_name)
req_path = os.path.join(base_path, 'requirements.txt')
- req_path_2 = os.path.join(base_path,
- 'dev_tools',
- 'conf',
- 'pip-list-dev-tools.txt')
+ dev_req_path = os.path.join(base_path,
+ 'dev_tools',
+ 'conf',
+ 'pip-list-dev-tools.txt')
+ contrib_req_path = os.path.join(base_path,
+ 'cirq',
+ 'contrib',
+ 'contrib-requirements.txt')
+ rev_paths = [req_path, dev_req_path, contrib_req_path]
create_virtual_env(venv_path=env_path,
python_path=python_path,
- requirements_paths=[req_path, req_path_2],
+ requirements_paths=rev_paths,
verbose=verbose)
return PreparedEnv(github_repo=env.repository,
@@ -171,10 +176,16 @@
env_path = os.path.join(destination_directory, env_name)
# (These files are output by dev_tools/python2.7-generate.sh.)
req_path = os.path.join(destination_directory, 'requirements.txt')
- req_path_2 = os.path.join(destination_directory, 'pip-list-test-tools.txt')
+ dev_req_path = os.path.join(destination_directory,
+ 'pip-list-test-tools.txt')
+ contrib_req_path = os.path.join(destination_directory,
+ 'cirq',
+ 'contrib',
+ 'contrib-requirements.txt')
+ req_paths = [req_path, dev_req_path, contrib_req_path]
create_virtual_env(venv_path=env_path,
python_path=python_path,
- requirements_paths=[req_path, req_path_2],
+ requirements_paths=req_paths,
verbose=verbose)
return PreparedEnv(github_repo=python3_environment.repository,
| {"golden_diff": "diff --git a/dev_tools/env_tools.py b/dev_tools/env_tools.py\n--- a/dev_tools/env_tools.py\n+++ b/dev_tools/env_tools.py\n@@ -117,13 +117,18 @@\n base_path = cast(str, env.destination_directory)\n env_path = os.path.join(base_path, env_name)\n req_path = os.path.join(base_path, 'requirements.txt')\n- req_path_2 = os.path.join(base_path,\n- 'dev_tools',\n- 'conf',\n- 'pip-list-dev-tools.txt')\n+ dev_req_path = os.path.join(base_path,\n+ 'dev_tools',\n+ 'conf',\n+ 'pip-list-dev-tools.txt')\n+ contrib_req_path = os.path.join(base_path,\n+ 'cirq',\n+ 'contrib',\n+ 'contrib-requirements.txt')\n+ rev_paths = [req_path, dev_req_path, contrib_req_path]\n create_virtual_env(venv_path=env_path,\n python_path=python_path,\n- requirements_paths=[req_path, req_path_2],\n+ requirements_paths=rev_paths,\n verbose=verbose)\n \n return PreparedEnv(github_repo=env.repository,\n@@ -171,10 +176,16 @@\n env_path = os.path.join(destination_directory, env_name)\n # (These files are output by dev_tools/python2.7-generate.sh.)\n req_path = os.path.join(destination_directory, 'requirements.txt')\n- req_path_2 = os.path.join(destination_directory, 'pip-list-test-tools.txt')\n+ dev_req_path = os.path.join(destination_directory,\n+ 'pip-list-test-tools.txt')\n+ contrib_req_path = os.path.join(destination_directory,\n+ 'cirq',\n+ 'contrib',\n+ 'contrib-requirements.txt')\n+ req_paths = [req_path, dev_req_path, contrib_req_path]\n create_virtual_env(venv_path=env_path,\n python_path=python_path,\n- requirements_paths=[req_path, req_path_2],\n+ requirements_paths=req_paths,\n verbose=verbose)\n \n return PreparedEnv(github_repo=python3_environment.repository,\n", "issue": "check.sh fails on clean master\nJust merged master and ran check.sh and it is failing due to tensorflow\r\n\r\n```\r\n(cirq) [dabacon:cirq:~/git/cirq]$./continuous-integration/check.sh \r\nrun: ('virtualenv', '--quiet', '-p', '/usr/bin/python3.5', '/tmp/test-cirq-kp5mfrah/.test_virtualenv')\r\nrun: ('/tmp/test-cirq-kp5mfrah/.test_virtualenv/bin/pip', 'install', '--quiet', '-r', '/tmp/test-cirq-kp5mfrah/requirements.txt')\r\nrun: ('/tmp/test-cirq-kp5mfrah/.test_virtualenv/bin/pip', 'install', '--quiet', '-r', '/tmp/test-cirq-kp5mfrah/dev_tools/conf/pip-list-dev-tools.txt')\r\n\r\nRunning pylint\r\nFinished pylint\r\n\r\n\r\nRunning typecheck\r\nFinished typecheck\r\n\r\n\r\nRunning pytest\r\n\r\n==================================== ERRORS ====================================\r\n_______ ERROR collecting cirq/contrib/tpu/circuit_to_tensorflow_test.py ________\r\nImportError while importing test module '/tmp/test-cirq-kp5mfrah/cirq/contrib/tpu/circuit_to_tensorflow_test.py'.\r\nHint: make sure your test modules/packages have valid Python names.\r\nTraceback:\r\ncirq/contrib/tpu/__init__.py:75: in <module>\r\n from cirq.contrib.tpu.circuit_to_tensorflow import (\r\ncirq/contrib/tpu/circuit_to_tensorflow.py:20: in <module>\r\n import tensorflow as tf\r\nE ImportError: No module named 'tensorflow'\r\n\r\n----------- coverage: platform linux, python 3.5.3-final-0 -----------\r\nCoverage annotated source written next to source\r\n\r\n!!!!!!!!!!!!!!!!!!! Interrupted: 1 errors during collection !!!!!!!!!!!!!!!!!!!!\r\n1 error in 34.82 seconds\r\nFinished pytest\r\n```\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport shutil\nimport sys\nfrom typing import Optional, Iterable, Callable, cast\n\nfrom dev_tools import shell_tools, git_env_tools\nfrom dev_tools.github_repository import GithubRepository\nfrom dev_tools.prepared_env import PreparedEnv\n\n\ndef get_unhidden_ungenerated_python_files(directory: str) -> Iterable[str]:\n \"\"\"Iterates through relevant python files within the given directory.\n\n Args:\n directory: The top-level directory to explore.\n\n Yields:\n File paths.\n \"\"\"\n for dirpath, dirnames, filenames in os.walk(directory, topdown=True):\n if os.path.split(dirpath)[-1].startswith('.'):\n dirnames.clear()\n continue\n\n for filename in filenames:\n if filename.endswith('.py') and not filename.endswith('_pb2.py'):\n yield os.path.join(dirpath, filename)\n\n\ndef create_virtual_env(venv_path: str,\n requirements_paths: Iterable[str],\n python_path: str,\n verbose: bool) -> None:\n \"\"\"Creates a new virtual environment and then installs dependencies.\n\n Args:\n venv_path: Where to put the virtual environment's state.\n requirements_paths: Location of requirements files to -r install.\n python_path: The python binary to use.\n verbose: When set, more progress output is produced.\n \"\"\"\n shell_tools.run_cmd('virtualenv',\n None if verbose else '--quiet',\n '-p',\n python_path,\n venv_path,\n out=sys.stderr)\n pip_path = os.path.join(venv_path, 'bin', 'pip')\n for req_path in requirements_paths:\n shell_tools.run_cmd(pip_path,\n 'install',\n None if verbose else '--quiet',\n '-r',\n req_path,\n out=sys.stderr)\n\n\ndef prepare_temporary_test_environment(\n destination_directory: str,\n repository: GithubRepository,\n pull_request_number: Optional[int],\n verbose: bool,\n env_name: str = '.test_virtualenv',\n python_path: str = '/usr/bin/python3.5',\n commit_ids_known_callback: Callable[[PreparedEnv], None] = None\n) -> PreparedEnv:\n \"\"\"Prepares a temporary test environment at the (existing empty) directory.\n\n Args:\n destination_directory: The location to put files. The caller is\n responsible for deleting the directory, whether or not this method\n succeeds or fails.\n repository: The github repository to download content from, if a pull\n request number is given.\n pull_request_number: If set, test content is fetched from github.\n Otherwise copies of local files are used.\n verbose: When set, more progress output is produced.\n env_name: The name to use for the virtual environment.\n python_path: Location of the python binary to use within the\n virtual environment.\n commit_ids_known_callback: A function to call when the actual commit id\n being tested is known, before the virtual environment is ready.\n\n Returns:\n Commit ids corresponding to content to test/compare.\n \"\"\"\n # Fetch content.\n if pull_request_number is not None:\n env = git_env_tools.fetch_github_pull_request(\n destination_directory=destination_directory,\n repository=repository,\n pull_request_number=pull_request_number,\n verbose=verbose)\n else:\n env = git_env_tools.fetch_local_files(\n destination_directory=destination_directory,\n verbose=verbose)\n\n if commit_ids_known_callback is not None:\n commit_ids_known_callback(env)\n\n # Create virtual environment.\n base_path = cast(str, env.destination_directory)\n env_path = os.path.join(base_path, env_name)\n req_path = os.path.join(base_path, 'requirements.txt')\n req_path_2 = os.path.join(base_path,\n 'dev_tools',\n 'conf',\n 'pip-list-dev-tools.txt')\n create_virtual_env(venv_path=env_path,\n python_path=python_path,\n requirements_paths=[req_path, req_path_2],\n verbose=verbose)\n\n return PreparedEnv(github_repo=env.repository,\n actual_commit_id=env.actual_commit_id,\n compare_commit_id=env.compare_commit_id,\n destination_directory=env.destination_directory,\n virtual_env_path=env_path)\n\n\ndef derive_temporary_python2_environment(\n destination_directory: str,\n python3_environment: PreparedEnv,\n verbose: bool,\n env_name: str = '.test_virtualenv_py2',\n python_path: str = \"/usr/bin/python2.7\") -> PreparedEnv:\n \"\"\"Creates a python 2.7 environment starting from a prepared python 3 one.\n\n Args:\n destination_directory: Where to put the python 2 environment.\n python3_environment: The prepared environment to start from.\n verbose: When set, more progress output is produced.\n env_name: The name to use for the virtualenv directory.\n python_path: The python binary to use.\n\n Returns:\n A description of the environment that was prepared.\n \"\"\"\n\n shutil.rmtree(destination_directory)\n input_directory = cast(str, python3_environment.destination_directory)\n os.chdir(input_directory)\n conversion_script_path = os.path.join(\n input_directory,\n 'dev_tools',\n 'python2.7-generate.sh')\n shell_tools.run_cmd('bash',\n conversion_script_path,\n destination_directory,\n input_directory,\n python3_environment.virtual_env_path,\n out=sys.stderr)\n os.chdir(destination_directory)\n\n # Create virtual environment.\n env_path = os.path.join(destination_directory, env_name)\n # (These files are output by dev_tools/python2.7-generate.sh.)\n req_path = os.path.join(destination_directory, 'requirements.txt')\n req_path_2 = os.path.join(destination_directory, 'pip-list-test-tools.txt')\n create_virtual_env(venv_path=env_path,\n python_path=python_path,\n requirements_paths=[req_path, req_path_2],\n verbose=verbose)\n\n return PreparedEnv(github_repo=python3_environment.repository,\n actual_commit_id=python3_environment.actual_commit_id,\n compare_commit_id=python3_environment.compare_commit_id,\n destination_directory=destination_directory,\n virtual_env_path=env_path)\n", "path": "dev_tools/env_tools.py"}], "after_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport shutil\nimport sys\nfrom typing import Optional, Iterable, Callable, cast\n\nfrom dev_tools import shell_tools, git_env_tools\nfrom dev_tools.github_repository import GithubRepository\nfrom dev_tools.prepared_env import PreparedEnv\n\n\ndef get_unhidden_ungenerated_python_files(directory: str) -> Iterable[str]:\n \"\"\"Iterates through relevant python files within the given directory.\n\n Args:\n directory: The top-level directory to explore.\n\n Yields:\n File paths.\n \"\"\"\n for dirpath, dirnames, filenames in os.walk(directory, topdown=True):\n if os.path.split(dirpath)[-1].startswith('.'):\n dirnames.clear()\n continue\n\n for filename in filenames:\n if filename.endswith('.py') and not filename.endswith('_pb2.py'):\n yield os.path.join(dirpath, filename)\n\n\ndef create_virtual_env(venv_path: str,\n requirements_paths: Iterable[str],\n python_path: str,\n verbose: bool) -> None:\n \"\"\"Creates a new virtual environment and then installs dependencies.\n\n Args:\n venv_path: Where to put the virtual environment's state.\n requirements_paths: Location of requirements files to -r install.\n python_path: The python binary to use.\n verbose: When set, more progress output is produced.\n \"\"\"\n shell_tools.run_cmd('virtualenv',\n None if verbose else '--quiet',\n '-p',\n python_path,\n venv_path,\n out=sys.stderr)\n pip_path = os.path.join(venv_path, 'bin', 'pip')\n for req_path in requirements_paths:\n shell_tools.run_cmd(pip_path,\n 'install',\n None if verbose else '--quiet',\n '-r',\n req_path,\n out=sys.stderr)\n\n\ndef prepare_temporary_test_environment(\n destination_directory: str,\n repository: GithubRepository,\n pull_request_number: Optional[int],\n verbose: bool,\n env_name: str = '.test_virtualenv',\n python_path: str = '/usr/bin/python3.5',\n commit_ids_known_callback: Callable[[PreparedEnv], None] = None\n) -> PreparedEnv:\n \"\"\"Prepares a temporary test environment at the (existing empty) directory.\n\n Args:\n destination_directory: The location to put files. The caller is\n responsible for deleting the directory, whether or not this method\n succeeds or fails.\n repository: The github repository to download content from, if a pull\n request number is given.\n pull_request_number: If set, test content is fetched from github.\n Otherwise copies of local files are used.\n verbose: When set, more progress output is produced.\n env_name: The name to use for the virtual environment.\n python_path: Location of the python binary to use within the\n virtual environment.\n commit_ids_known_callback: A function to call when the actual commit id\n being tested is known, before the virtual environment is ready.\n\n Returns:\n Commit ids corresponding to content to test/compare.\n \"\"\"\n # Fetch content.\n if pull_request_number is not None:\n env = git_env_tools.fetch_github_pull_request(\n destination_directory=destination_directory,\n repository=repository,\n pull_request_number=pull_request_number,\n verbose=verbose)\n else:\n env = git_env_tools.fetch_local_files(\n destination_directory=destination_directory,\n verbose=verbose)\n\n if commit_ids_known_callback is not None:\n commit_ids_known_callback(env)\n\n # Create virtual environment.\n base_path = cast(str, env.destination_directory)\n env_path = os.path.join(base_path, env_name)\n req_path = os.path.join(base_path, 'requirements.txt')\n dev_req_path = os.path.join(base_path,\n 'dev_tools',\n 'conf',\n 'pip-list-dev-tools.txt')\n contrib_req_path = os.path.join(base_path,\n 'cirq',\n 'contrib',\n 'contrib-requirements.txt')\n rev_paths = [req_path, dev_req_path, contrib_req_path]\n create_virtual_env(venv_path=env_path,\n python_path=python_path,\n requirements_paths=rev_paths,\n verbose=verbose)\n\n return PreparedEnv(github_repo=env.repository,\n actual_commit_id=env.actual_commit_id,\n compare_commit_id=env.compare_commit_id,\n destination_directory=env.destination_directory,\n virtual_env_path=env_path)\n\n\ndef derive_temporary_python2_environment(\n destination_directory: str,\n python3_environment: PreparedEnv,\n verbose: bool,\n env_name: str = '.test_virtualenv_py2',\n python_path: str = \"/usr/bin/python2.7\") -> PreparedEnv:\n \"\"\"Creates a python 2.7 environment starting from a prepared python 3 one.\n\n Args:\n destination_directory: Where to put the python 2 environment.\n python3_environment: The prepared environment to start from.\n verbose: When set, more progress output is produced.\n env_name: The name to use for the virtualenv directory.\n python_path: The python binary to use.\n\n Returns:\n A description of the environment that was prepared.\n \"\"\"\n\n shutil.rmtree(destination_directory)\n input_directory = cast(str, python3_environment.destination_directory)\n os.chdir(input_directory)\n conversion_script_path = os.path.join(\n input_directory,\n 'dev_tools',\n 'python2.7-generate.sh')\n shell_tools.run_cmd('bash',\n conversion_script_path,\n destination_directory,\n input_directory,\n python3_environment.virtual_env_path,\n out=sys.stderr)\n os.chdir(destination_directory)\n\n # Create virtual environment.\n env_path = os.path.join(destination_directory, env_name)\n # (These files are output by dev_tools/python2.7-generate.sh.)\n req_path = os.path.join(destination_directory, 'requirements.txt')\n dev_req_path = os.path.join(destination_directory,\n 'pip-list-test-tools.txt')\n contrib_req_path = os.path.join(destination_directory,\n 'cirq',\n 'contrib',\n 'contrib-requirements.txt')\n req_paths = [req_path, dev_req_path, contrib_req_path]\n create_virtual_env(venv_path=env_path,\n python_path=python_path,\n requirements_paths=req_paths,\n verbose=verbose)\n\n return PreparedEnv(github_repo=python3_environment.repository,\n actual_commit_id=python3_environment.actual_commit_id,\n compare_commit_id=python3_environment.compare_commit_id,\n destination_directory=destination_directory,\n virtual_env_path=env_path)\n", "path": "dev_tools/env_tools.py"}]} | 2,545 | 464 |
gh_patches_debug_22578 | rasdani/github-patches | git_diff | quantumlib__Cirq-4690 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Should an IonDevice accept any qubit type?
Some docstrings ([here](https://github.com/quantumlib/Cirq/blob/master/cirq/ion/ion_device.py#L26)) state qubits are one a line, while others ([here](https://github.com/quantumlib/Cirq/blob/master/cirq/ion/ion_device.py#L42)) state qubits are identified by their x, y location. I believe the latter is a typo due to the [annotation](https://github.com/quantumlib/Cirq/blob/master/cirq/ion/ion_device.py#L34).
If so, should `__init__` raise an error if the input qubits are not `LineQubit`s? (The `at` and `neighbors_of` methods assume `LineQubit`s.)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cirq-core/cirq/ion/ion_device.py`
Content:
```
1 # Copyright 2018 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import Any, FrozenSet, Iterable, Optional, Set, TYPE_CHECKING
16
17 from cirq import circuits, value, devices, ops, protocols
18 from cirq.ion import convert_to_ion_gates
19
20 if TYPE_CHECKING:
21 import cirq
22
23
24 def get_ion_gateset() -> ops.Gateset:
25 return ops.Gateset(
26 ops.XXPowGate,
27 ops.MeasurementGate,
28 ops.XPowGate,
29 ops.YPowGate,
30 ops.ZPowGate,
31 ops.PhasedXPowGate,
32 unroll_circuit_op=False,
33 accept_global_phase_op=False,
34 )
35
36
37 @value.value_equality
38 class IonDevice(devices.Device):
39 """A device with qubits placed on a line.
40
41 Qubits have all-to-all connectivity.
42 """
43
44 def __init__(
45 self,
46 measurement_duration: 'cirq.DURATION_LIKE',
47 twoq_gates_duration: 'cirq.DURATION_LIKE',
48 oneq_gates_duration: 'cirq.DURATION_LIKE',
49 qubits: Iterable[devices.LineQubit],
50 ) -> None:
51 """Initializes the description of an ion trap device.
52
53 Args:
54 measurement_duration: The maximum duration of a measurement.
55 twoq_gates_duration: The maximum duration of a two qubit operation.
56 oneq_gates_duration: The maximum duration of a single qubit
57 operation.
58 qubits: Qubits on the device, identified by their x, y location.
59 """
60 self._measurement_duration = value.Duration(measurement_duration)
61 self._twoq_gates_duration = value.Duration(twoq_gates_duration)
62 self._oneq_gates_duration = value.Duration(oneq_gates_duration)
63 self.qubits = frozenset(qubits)
64 self.gateset = get_ion_gateset()
65
66 def qubit_set(self) -> FrozenSet['cirq.LineQubit']:
67 return self.qubits
68
69 def qid_pairs(self) -> FrozenSet['cirq.SymmetricalQidPair']:
70 """Qubits have all-to-all connectivity, so returns all pairs.
71
72 Returns:
73 All qubit pairs on the device.
74 """
75 qs = self.qubits
76 return frozenset([devices.SymmetricalQidPair(q, q2) for q in qs for q2 in qs if q < q2])
77
78 def decompose_operation(self, operation: ops.Operation) -> ops.OP_TREE:
79 return convert_to_ion_gates.ConvertToIonGates().convert_one(operation)
80
81 def decompose_circuit(self, circuit: circuits.Circuit) -> circuits.Circuit:
82 return convert_to_ion_gates.ConvertToIonGates().convert_circuit(circuit)
83
84 def duration_of(self, operation):
85 if isinstance(operation.gate, ops.XXPowGate):
86 return self._twoq_gates_duration
87 if isinstance(
88 operation.gate, (ops.XPowGate, ops.YPowGate, ops.ZPowGate, ops.PhasedXPowGate)
89 ):
90 return self._oneq_gates_duration
91 if isinstance(operation.gate, ops.MeasurementGate):
92 return self._measurement_duration
93 raise ValueError(f'Unsupported gate type: {operation!r}')
94
95 def validate_gate(self, gate: ops.Gate):
96 if gate not in self.gateset:
97 raise ValueError(f'Unsupported gate type: {gate!r}')
98
99 def validate_operation(self, operation):
100 if not isinstance(operation, ops.GateOperation):
101 raise ValueError(f'Unsupported operation: {operation!r}')
102
103 self.validate_gate(operation.gate)
104
105 for q in operation.qubits:
106 if not isinstance(q, devices.LineQubit):
107 raise ValueError(f'Unsupported qubit type: {q!r}')
108 if q not in self.qubits:
109 raise ValueError(f'Qubit not on device: {q!r}')
110
111 def validate_circuit(self, circuit: circuits.AbstractCircuit):
112 super().validate_circuit(circuit)
113 _verify_unique_measurement_keys(circuit.all_operations())
114
115 def at(self, position: int) -> Optional[devices.LineQubit]:
116 """Returns the qubit at the given position, if there is one, else None."""
117 q = devices.LineQubit(position)
118 return q if q in self.qubits else None
119
120 def neighbors_of(self, qubit: devices.LineQubit) -> Iterable[devices.LineQubit]:
121 """Returns the qubits that the given qubit can interact with."""
122 possibles = [
123 devices.LineQubit(qubit.x + 1),
124 devices.LineQubit(qubit.x - 1),
125 ]
126 return [e for e in possibles if e in self.qubits]
127
128 def __repr__(self) -> str:
129 return (
130 f'IonDevice(measurement_duration={self._measurement_duration!r}, '
131 f'twoq_gates_duration={self._twoq_gates_duration!r}, '
132 f'oneq_gates_duration={self._oneq_gates_duration!r} '
133 f'qubits={sorted(self.qubits)!r})'
134 )
135
136 def __str__(self) -> str:
137 diagram = circuits.TextDiagramDrawer()
138
139 for q in self.qubits:
140 diagram.write(q.x, 0, str(q))
141 for q2 in self.neighbors_of(q):
142 diagram.grid_line(q.x, 0, q2.x, 0)
143
144 return diagram.render(horizontal_spacing=3, vertical_spacing=2, use_unicode_characters=True)
145
146 def _repr_pretty_(self, p: Any, cycle: bool):
147 """iPython (Jupyter) pretty print."""
148 p.text("IonDevice(...)" if cycle else self.__str__())
149
150 def _value_equality_values_(self) -> Any:
151 return (
152 self._measurement_duration,
153 self._twoq_gates_duration,
154 self._oneq_gates_duration,
155 self.qubits,
156 )
157
158
159 def _verify_unique_measurement_keys(operations: Iterable[ops.Operation]):
160 seen: Set[str] = set()
161 for op in operations:
162 if isinstance(op.gate, ops.MeasurementGate):
163 meas = op.gate
164 key = protocols.measurement_key_name(meas)
165 if key in seen:
166 raise ValueError(f'Measurement key {key} repeated')
167 seen.add(key)
168
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cirq-core/cirq/ion/ion_device.py b/cirq-core/cirq/ion/ion_device.py
--- a/cirq-core/cirq/ion/ion_device.py
+++ b/cirq-core/cirq/ion/ion_device.py
@@ -55,11 +55,19 @@
twoq_gates_duration: The maximum duration of a two qubit operation.
oneq_gates_duration: The maximum duration of a single qubit
operation.
- qubits: Qubits on the device, identified by their x, y location.
+ qubits: Qubits on the device, identified by their x location.
+
+ Raises:
+ TypeError: If not all the qubits supplied are `cirq.LineQubit`s.
"""
self._measurement_duration = value.Duration(measurement_duration)
self._twoq_gates_duration = value.Duration(twoq_gates_duration)
self._oneq_gates_duration = value.Duration(oneq_gates_duration)
+ if not all(isinstance(qubit, devices.LineQubit) for qubit in qubits):
+ raise TypeError(
+ "All qubits were not of type cirq.LineQubit, instead were "
+ f"{set(type(qubit) for qubit in qubits)}"
+ )
self.qubits = frozenset(qubits)
self.gateset = get_ion_gateset()
| {"golden_diff": "diff --git a/cirq-core/cirq/ion/ion_device.py b/cirq-core/cirq/ion/ion_device.py\n--- a/cirq-core/cirq/ion/ion_device.py\n+++ b/cirq-core/cirq/ion/ion_device.py\n@@ -55,11 +55,19 @@\n twoq_gates_duration: The maximum duration of a two qubit operation.\n oneq_gates_duration: The maximum duration of a single qubit\n operation.\n- qubits: Qubits on the device, identified by their x, y location.\n+ qubits: Qubits on the device, identified by their x location.\n+\n+ Raises:\n+ TypeError: If not all the qubits supplied are `cirq.LineQubit`s.\n \"\"\"\n self._measurement_duration = value.Duration(measurement_duration)\n self._twoq_gates_duration = value.Duration(twoq_gates_duration)\n self._oneq_gates_duration = value.Duration(oneq_gates_duration)\n+ if not all(isinstance(qubit, devices.LineQubit) for qubit in qubits):\n+ raise TypeError(\n+ \"All qubits were not of type cirq.LineQubit, instead were \"\n+ f\"{set(type(qubit) for qubit in qubits)}\"\n+ )\n self.qubits = frozenset(qubits)\n self.gateset = get_ion_gateset()\n", "issue": "Should an IonDevice accept any qubit type?\nSome docstrings ([here](https://github.com/quantumlib/Cirq/blob/master/cirq/ion/ion_device.py#L26)) state qubits are one a line, while others ([here](https://github.com/quantumlib/Cirq/blob/master/cirq/ion/ion_device.py#L42)) state qubits are identified by their x, y location. I believe the latter is a typo due to the [annotation](https://github.com/quantumlib/Cirq/blob/master/cirq/ion/ion_device.py#L34).\r\n\r\nIf so, should `__init__` raise an error if the input qubits are not `LineQubit`s? (The `at` and `neighbors_of` methods assume `LineQubit`s.)\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Any, FrozenSet, Iterable, Optional, Set, TYPE_CHECKING\n\nfrom cirq import circuits, value, devices, ops, protocols\nfrom cirq.ion import convert_to_ion_gates\n\nif TYPE_CHECKING:\n import cirq\n\n\ndef get_ion_gateset() -> ops.Gateset:\n return ops.Gateset(\n ops.XXPowGate,\n ops.MeasurementGate,\n ops.XPowGate,\n ops.YPowGate,\n ops.ZPowGate,\n ops.PhasedXPowGate,\n unroll_circuit_op=False,\n accept_global_phase_op=False,\n )\n\n\[email protected]_equality\nclass IonDevice(devices.Device):\n \"\"\"A device with qubits placed on a line.\n\n Qubits have all-to-all connectivity.\n \"\"\"\n\n def __init__(\n self,\n measurement_duration: 'cirq.DURATION_LIKE',\n twoq_gates_duration: 'cirq.DURATION_LIKE',\n oneq_gates_duration: 'cirq.DURATION_LIKE',\n qubits: Iterable[devices.LineQubit],\n ) -> None:\n \"\"\"Initializes the description of an ion trap device.\n\n Args:\n measurement_duration: The maximum duration of a measurement.\n twoq_gates_duration: The maximum duration of a two qubit operation.\n oneq_gates_duration: The maximum duration of a single qubit\n operation.\n qubits: Qubits on the device, identified by their x, y location.\n \"\"\"\n self._measurement_duration = value.Duration(measurement_duration)\n self._twoq_gates_duration = value.Duration(twoq_gates_duration)\n self._oneq_gates_duration = value.Duration(oneq_gates_duration)\n self.qubits = frozenset(qubits)\n self.gateset = get_ion_gateset()\n\n def qubit_set(self) -> FrozenSet['cirq.LineQubit']:\n return self.qubits\n\n def qid_pairs(self) -> FrozenSet['cirq.SymmetricalQidPair']:\n \"\"\"Qubits have all-to-all connectivity, so returns all pairs.\n\n Returns:\n All qubit pairs on the device.\n \"\"\"\n qs = self.qubits\n return frozenset([devices.SymmetricalQidPair(q, q2) for q in qs for q2 in qs if q < q2])\n\n def decompose_operation(self, operation: ops.Operation) -> ops.OP_TREE:\n return convert_to_ion_gates.ConvertToIonGates().convert_one(operation)\n\n def decompose_circuit(self, circuit: circuits.Circuit) -> circuits.Circuit:\n return convert_to_ion_gates.ConvertToIonGates().convert_circuit(circuit)\n\n def duration_of(self, operation):\n if isinstance(operation.gate, ops.XXPowGate):\n return self._twoq_gates_duration\n if isinstance(\n operation.gate, (ops.XPowGate, ops.YPowGate, ops.ZPowGate, ops.PhasedXPowGate)\n ):\n return self._oneq_gates_duration\n if isinstance(operation.gate, ops.MeasurementGate):\n return self._measurement_duration\n raise ValueError(f'Unsupported gate type: {operation!r}')\n\n def validate_gate(self, gate: ops.Gate):\n if gate not in self.gateset:\n raise ValueError(f'Unsupported gate type: {gate!r}')\n\n def validate_operation(self, operation):\n if not isinstance(operation, ops.GateOperation):\n raise ValueError(f'Unsupported operation: {operation!r}')\n\n self.validate_gate(operation.gate)\n\n for q in operation.qubits:\n if not isinstance(q, devices.LineQubit):\n raise ValueError(f'Unsupported qubit type: {q!r}')\n if q not in self.qubits:\n raise ValueError(f'Qubit not on device: {q!r}')\n\n def validate_circuit(self, circuit: circuits.AbstractCircuit):\n super().validate_circuit(circuit)\n _verify_unique_measurement_keys(circuit.all_operations())\n\n def at(self, position: int) -> Optional[devices.LineQubit]:\n \"\"\"Returns the qubit at the given position, if there is one, else None.\"\"\"\n q = devices.LineQubit(position)\n return q if q in self.qubits else None\n\n def neighbors_of(self, qubit: devices.LineQubit) -> Iterable[devices.LineQubit]:\n \"\"\"Returns the qubits that the given qubit can interact with.\"\"\"\n possibles = [\n devices.LineQubit(qubit.x + 1),\n devices.LineQubit(qubit.x - 1),\n ]\n return [e for e in possibles if e in self.qubits]\n\n def __repr__(self) -> str:\n return (\n f'IonDevice(measurement_duration={self._measurement_duration!r}, '\n f'twoq_gates_duration={self._twoq_gates_duration!r}, '\n f'oneq_gates_duration={self._oneq_gates_duration!r} '\n f'qubits={sorted(self.qubits)!r})'\n )\n\n def __str__(self) -> str:\n diagram = circuits.TextDiagramDrawer()\n\n for q in self.qubits:\n diagram.write(q.x, 0, str(q))\n for q2 in self.neighbors_of(q):\n diagram.grid_line(q.x, 0, q2.x, 0)\n\n return diagram.render(horizontal_spacing=3, vertical_spacing=2, use_unicode_characters=True)\n\n def _repr_pretty_(self, p: Any, cycle: bool):\n \"\"\"iPython (Jupyter) pretty print.\"\"\"\n p.text(\"IonDevice(...)\" if cycle else self.__str__())\n\n def _value_equality_values_(self) -> Any:\n return (\n self._measurement_duration,\n self._twoq_gates_duration,\n self._oneq_gates_duration,\n self.qubits,\n )\n\n\ndef _verify_unique_measurement_keys(operations: Iterable[ops.Operation]):\n seen: Set[str] = set()\n for op in operations:\n if isinstance(op.gate, ops.MeasurementGate):\n meas = op.gate\n key = protocols.measurement_key_name(meas)\n if key in seen:\n raise ValueError(f'Measurement key {key} repeated')\n seen.add(key)\n", "path": "cirq-core/cirq/ion/ion_device.py"}], "after_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Any, FrozenSet, Iterable, Optional, Set, TYPE_CHECKING\n\nfrom cirq import circuits, value, devices, ops, protocols\nfrom cirq.ion import convert_to_ion_gates\n\nif TYPE_CHECKING:\n import cirq\n\n\ndef get_ion_gateset() -> ops.Gateset:\n return ops.Gateset(\n ops.XXPowGate,\n ops.MeasurementGate,\n ops.XPowGate,\n ops.YPowGate,\n ops.ZPowGate,\n ops.PhasedXPowGate,\n unroll_circuit_op=False,\n accept_global_phase_op=False,\n )\n\n\[email protected]_equality\nclass IonDevice(devices.Device):\n \"\"\"A device with qubits placed on a line.\n\n Qubits have all-to-all connectivity.\n \"\"\"\n\n def __init__(\n self,\n measurement_duration: 'cirq.DURATION_LIKE',\n twoq_gates_duration: 'cirq.DURATION_LIKE',\n oneq_gates_duration: 'cirq.DURATION_LIKE',\n qubits: Iterable[devices.LineQubit],\n ) -> None:\n \"\"\"Initializes the description of an ion trap device.\n\n Args:\n measurement_duration: The maximum duration of a measurement.\n twoq_gates_duration: The maximum duration of a two qubit operation.\n oneq_gates_duration: The maximum duration of a single qubit\n operation.\n qubits: Qubits on the device, identified by their x location.\n\n Raises:\n TypeError: If not all the qubits supplied are `cirq.LineQubit`s.\n \"\"\"\n self._measurement_duration = value.Duration(measurement_duration)\n self._twoq_gates_duration = value.Duration(twoq_gates_duration)\n self._oneq_gates_duration = value.Duration(oneq_gates_duration)\n if not all(isinstance(qubit, devices.LineQubit) for qubit in qubits):\n raise TypeError(\n \"All qubits were not of type cirq.LineQubit, instead were \"\n f\"{set(type(qubit) for qubit in qubits)}\"\n )\n self.qubits = frozenset(qubits)\n self.gateset = get_ion_gateset()\n\n def qubit_set(self) -> FrozenSet['cirq.LineQubit']:\n return self.qubits\n\n def qid_pairs(self) -> FrozenSet['cirq.SymmetricalQidPair']:\n \"\"\"Qubits have all-to-all connectivity, so returns all pairs.\n\n Returns:\n All qubit pairs on the device.\n \"\"\"\n qs = self.qubits\n return frozenset([devices.SymmetricalQidPair(q, q2) for q in qs for q2 in qs if q < q2])\n\n def decompose_operation(self, operation: ops.Operation) -> ops.OP_TREE:\n return convert_to_ion_gates.ConvertToIonGates().convert_one(operation)\n\n def decompose_circuit(self, circuit: circuits.Circuit) -> circuits.Circuit:\n return convert_to_ion_gates.ConvertToIonGates().convert_circuit(circuit)\n\n def duration_of(self, operation):\n if isinstance(operation.gate, ops.XXPowGate):\n return self._twoq_gates_duration\n if isinstance(\n operation.gate, (ops.XPowGate, ops.YPowGate, ops.ZPowGate, ops.PhasedXPowGate)\n ):\n return self._oneq_gates_duration\n if isinstance(operation.gate, ops.MeasurementGate):\n return self._measurement_duration\n raise ValueError(f'Unsupported gate type: {operation!r}')\n\n def validate_gate(self, gate: ops.Gate):\n if gate not in self.gateset:\n raise ValueError(f'Unsupported gate type: {gate!r}')\n\n def validate_operation(self, operation):\n if not isinstance(operation, ops.GateOperation):\n raise ValueError(f'Unsupported operation: {operation!r}')\n\n self.validate_gate(operation.gate)\n\n for q in operation.qubits:\n if not isinstance(q, devices.LineQubit):\n raise ValueError(f'Unsupported qubit type: {q!r}')\n if q not in self.qubits:\n raise ValueError(f'Qubit not on device: {q!r}')\n\n def validate_circuit(self, circuit: circuits.AbstractCircuit):\n super().validate_circuit(circuit)\n _verify_unique_measurement_keys(circuit.all_operations())\n\n def at(self, position: int) -> Optional[devices.LineQubit]:\n \"\"\"Returns the qubit at the given position, if there is one, else None.\"\"\"\n q = devices.LineQubit(position)\n return q if q in self.qubits else None\n\n def neighbors_of(self, qubit: devices.LineQubit) -> Iterable[devices.LineQubit]:\n \"\"\"Returns the qubits that the given qubit can interact with.\"\"\"\n possibles = [\n devices.LineQubit(qubit.x + 1),\n devices.LineQubit(qubit.x - 1),\n ]\n return [e for e in possibles if e in self.qubits]\n\n def __repr__(self) -> str:\n return (\n f'IonDevice(measurement_duration={self._measurement_duration!r}, '\n f'twoq_gates_duration={self._twoq_gates_duration!r}, '\n f'oneq_gates_duration={self._oneq_gates_duration!r} '\n f'qubits={sorted(self.qubits)!r})'\n )\n\n def __str__(self) -> str:\n diagram = circuits.TextDiagramDrawer()\n\n for q in self.qubits:\n diagram.write(q.x, 0, str(q))\n for q2 in self.neighbors_of(q):\n diagram.grid_line(q.x, 0, q2.x, 0)\n\n return diagram.render(horizontal_spacing=3, vertical_spacing=2, use_unicode_characters=True)\n\n def _repr_pretty_(self, p: Any, cycle: bool):\n \"\"\"iPython (Jupyter) pretty print.\"\"\"\n p.text(\"IonDevice(...)\" if cycle else self.__str__())\n\n def _value_equality_values_(self) -> Any:\n return (\n self._measurement_duration,\n self._twoq_gates_duration,\n self._oneq_gates_duration,\n self.qubits,\n )\n\n\ndef _verify_unique_measurement_keys(operations: Iterable[ops.Operation]):\n seen: Set[str] = set()\n for op in operations:\n if isinstance(op.gate, ops.MeasurementGate):\n meas = op.gate\n key = protocols.measurement_key_name(meas)\n if key in seen:\n raise ValueError(f'Measurement key {key} repeated')\n seen.add(key)\n", "path": "cirq-core/cirq/ion/ion_device.py"}]} | 2,324 | 296 |
gh_patches_debug_35827 | rasdani/github-patches | git_diff | mesonbuild__meson-12085 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`dependency('vulkan', version: ['>=1.X.X'])` fails on Windows
**Describe the bug**
`dependency('vulkan', version: ['>=1.3.0'])` fails on Windows if a version is specified.
The Vulkan SDK is installed, and the environment variable (`VULKAN_SDK=C:\VulkanSDK\1.3.250.1`) is set.
Edit: The same does work on Linux.
**To Reproduce**
Install a recent Vulkan SDK (>= 1.3.0) and use this meson.build:
```python
project('test-project', 'cpp')
# this does not work
vulkan = dependency('vulkan', version: ['>=1.3.0'])
# this does work
# vulkan = dependency('vulkan')
executable('test-project', ['main.cpp'], dependencies : [vulkan])
```
**Expected behavior**
Vulkan is detected correctly.
**system parameters**
* native build
* Windows 11
* Python 3.9.13
* Meson 1.2.0
* Ninja 1.11.0
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mesonbuild/dependencies/ui.py`
Content:
```
1 # Copyright 2013-2017 The Meson development team
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6
7 # http://www.apache.org/licenses/LICENSE-2.0
8
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 # This file contains the detection logic for external dependencies that
16 # are UI-related.
17 from __future__ import annotations
18
19 import os
20 import subprocess
21 import typing as T
22
23 from .. import mlog
24 from .. import mesonlib
25 from ..mesonlib import (
26 Popen_safe, extract_as_list, version_compare_many
27 )
28 from ..environment import detect_cpu_family
29
30 from .base import DependencyException, DependencyMethods, DependencyTypeName, SystemDependency
31 from .configtool import ConfigToolDependency
32 from .detect import packages
33 from .factory import DependencyFactory
34
35 if T.TYPE_CHECKING:
36 from ..environment import Environment
37
38
39 class GLDependencySystem(SystemDependency):
40 def __init__(self, name: str, environment: 'Environment', kwargs: T.Dict[str, T.Any]) -> None:
41 super().__init__(name, environment, kwargs)
42
43 if self.env.machines[self.for_machine].is_darwin():
44 self.is_found = True
45 # FIXME: Use AppleFrameworks dependency
46 self.link_args = ['-framework', 'OpenGL']
47 # FIXME: Detect version using self.clib_compiler
48 return
49 elif self.env.machines[self.for_machine].is_windows():
50 self.is_found = True
51 # FIXME: Use self.clib_compiler.find_library()
52 self.link_args = ['-lopengl32']
53 # FIXME: Detect version using self.clib_compiler
54 return
55 else:
56 links = self.clib_compiler.find_library('GL', environment, [])
57 has_header = self.clib_compiler.has_header('GL/gl.h', '', environment)[0]
58 if links and has_header:
59 self.is_found = True
60 self.link_args = links
61 elif links:
62 raise DependencyException('Found GL runtime library but no development header files')
63
64 class GnuStepDependency(ConfigToolDependency):
65
66 tools = ['gnustep-config']
67 tool_name = 'gnustep-config'
68
69 def __init__(self, environment: 'Environment', kwargs: T.Dict[str, T.Any]) -> None:
70 super().__init__('gnustep', environment, kwargs, language='objc')
71 if not self.is_found:
72 return
73 self.modules = kwargs.get('modules', [])
74 self.compile_args = self.filter_args(
75 self.get_config_value(['--objc-flags'], 'compile_args'))
76 self.link_args = self.weird_filter(self.get_config_value(
77 ['--gui-libs' if 'gui' in self.modules else '--base-libs'],
78 'link_args'))
79
80 def find_config(self, versions: T.Optional[T.List[str]] = None, returncode: int = 0) -> T.Tuple[T.Optional[T.List[str]], T.Optional[str]]:
81 tool = [self.tools[0]]
82 try:
83 p, out = Popen_safe(tool + ['--help'])[:2]
84 except (FileNotFoundError, PermissionError):
85 return (None, None)
86 if p.returncode != returncode:
87 return (None, None)
88 self.config = tool
89 found_version = self.detect_version()
90 if versions and not version_compare_many(found_version, versions)[0]:
91 return (None, found_version)
92
93 return (tool, found_version)
94
95 @staticmethod
96 def weird_filter(elems: T.List[str]) -> T.List[str]:
97 """When building packages, the output of the enclosing Make is
98 sometimes mixed among the subprocess output. I have no idea why. As a
99 hack filter out everything that is not a flag.
100 """
101 return [e for e in elems if e.startswith('-')]
102
103 @staticmethod
104 def filter_args(args: T.List[str]) -> T.List[str]:
105 """gnustep-config returns a bunch of garbage args such as -O2 and so
106 on. Drop everything that is not needed.
107 """
108 result = []
109 for f in args:
110 if f.startswith('-D') \
111 or f.startswith('-f') \
112 or f.startswith('-I') \
113 or f == '-pthread' \
114 or (f.startswith('-W') and not f == '-Wall'):
115 result.append(f)
116 return result
117
118 def detect_version(self) -> str:
119 gmake = self.get_config_value(['--variable=GNUMAKE'], 'variable')[0]
120 makefile_dir = self.get_config_value(['--variable=GNUSTEP_MAKEFILES'], 'variable')[0]
121 # This Makefile has the GNUStep version set
122 base_make = os.path.join(makefile_dir, 'Additional', 'base.make')
123 # Print the Makefile variable passed as the argument. For instance, if
124 # you run the make target `print-SOME_VARIABLE`, this will print the
125 # value of the variable `SOME_VARIABLE`.
126 printver = "print-%:\n\t@echo '$($*)'"
127 env = os.environ.copy()
128 # See base.make to understand why this is set
129 env['FOUNDATION_LIB'] = 'gnu'
130 p, o, e = Popen_safe([gmake, '-f', '-', '-f', base_make,
131 'print-GNUSTEP_BASE_VERSION'],
132 env=env, write=printver, stdin=subprocess.PIPE)
133 version = o.strip()
134 if not version:
135 mlog.debug("Couldn't detect GNUStep version, falling back to '1'")
136 # Fallback to setting some 1.x version
137 version = '1'
138 return version
139
140 packages['gnustep'] = GnuStepDependency
141
142
143 class SDL2DependencyConfigTool(ConfigToolDependency):
144
145 tools = ['sdl2-config']
146 tool_name = 'sdl2-config'
147
148 def __init__(self, name: str, environment: 'Environment', kwargs: T.Dict[str, T.Any]):
149 super().__init__(name, environment, kwargs)
150 if not self.is_found:
151 return
152 self.compile_args = self.get_config_value(['--cflags'], 'compile_args')
153 self.link_args = self.get_config_value(['--libs'], 'link_args')
154
155
156 class WxDependency(ConfigToolDependency):
157
158 tools = ['wx-config-3.0', 'wx-config-3.1', 'wx-config', 'wx-config-gtk3']
159 tool_name = 'wx-config'
160
161 def __init__(self, environment: 'Environment', kwargs: T.Dict[str, T.Any]):
162 super().__init__('WxWidgets', environment, kwargs, language='cpp')
163 if not self.is_found:
164 return
165 self.requested_modules = self.get_requested(kwargs)
166
167 extra_args = []
168 if self.static:
169 extra_args.append('--static=yes')
170
171 # Check to make sure static is going to work
172 err = Popen_safe(self.config + extra_args)[2]
173 if 'No config found to match' in err:
174 mlog.debug('WxWidgets is missing static libraries.')
175 self.is_found = False
176 return
177
178 # wx-config seems to have a cflags as well but since it requires C++,
179 # this should be good, at least for now.
180 self.compile_args = self.get_config_value(['--cxxflags'] + extra_args + self.requested_modules, 'compile_args')
181 self.link_args = self.get_config_value(['--libs'] + extra_args + self.requested_modules, 'link_args')
182
183 @staticmethod
184 def get_requested(kwargs: T.Dict[str, T.Any]) -> T.List[str]:
185 if 'modules' not in kwargs:
186 return []
187 candidates = extract_as_list(kwargs, 'modules')
188 for c in candidates:
189 if not isinstance(c, str):
190 raise DependencyException('wxwidgets module argument is not a string')
191 return candidates
192
193 packages['wxwidgets'] = WxDependency
194
195 class VulkanDependencySystem(SystemDependency):
196
197 def __init__(self, name: str, environment: 'Environment', kwargs: T.Dict[str, T.Any], language: T.Optional[str] = None) -> None:
198 super().__init__(name, environment, kwargs, language=language)
199
200 try:
201 self.vulkan_sdk = os.environ['VULKAN_SDK']
202 if not os.path.isabs(self.vulkan_sdk):
203 raise DependencyException('VULKAN_SDK must be an absolute path.')
204 except KeyError:
205 self.vulkan_sdk = None
206
207 if self.vulkan_sdk:
208 # TODO: this config might not work on some platforms, fix bugs as reported
209 # we should at least detect other 64-bit platforms (e.g. armv8)
210 lib_name = 'vulkan'
211 lib_dir = 'lib'
212 inc_dir = 'include'
213 if mesonlib.is_windows():
214 lib_name = 'vulkan-1'
215 lib_dir = 'Lib32'
216 inc_dir = 'Include'
217 if detect_cpu_family(self.env.coredata.compilers.host) == 'x86_64':
218 lib_dir = 'Lib'
219
220 # make sure header and lib are valid
221 inc_path = os.path.join(self.vulkan_sdk, inc_dir)
222 header = os.path.join(inc_path, 'vulkan', 'vulkan.h')
223 lib_path = os.path.join(self.vulkan_sdk, lib_dir)
224 find_lib = self.clib_compiler.find_library(lib_name, environment, [lib_path])
225
226 if not find_lib:
227 raise DependencyException('VULKAN_SDK point to invalid directory (no lib)')
228
229 if not os.path.isfile(header):
230 raise DependencyException('VULKAN_SDK point to invalid directory (no include)')
231
232 # XXX: this is very odd, and may deserve being removed
233 self.type_name = DependencyTypeName('vulkan_sdk')
234 self.is_found = True
235 self.compile_args.append('-I' + inc_path)
236 self.link_args.append('-L' + lib_path)
237 self.link_args.append('-l' + lib_name)
238
239 # TODO: find a way to retrieve the version from the sdk?
240 # Usually it is a part of the path to it (but does not have to be)
241 return
242 else:
243 # simply try to guess it, usually works on linux
244 libs = self.clib_compiler.find_library('vulkan', environment, [])
245 if libs is not None and self.clib_compiler.has_header('vulkan/vulkan.h', '', environment, disable_cache=True)[0]:
246 self.is_found = True
247 for lib in libs:
248 self.link_args.append(lib)
249 return
250
251 packages['gl'] = gl_factory = DependencyFactory(
252 'gl',
253 [DependencyMethods.PKGCONFIG, DependencyMethods.SYSTEM],
254 system_class=GLDependencySystem,
255 )
256
257 packages['sdl2'] = sdl2_factory = DependencyFactory(
258 'sdl2',
259 [DependencyMethods.PKGCONFIG, DependencyMethods.CONFIG_TOOL, DependencyMethods.EXTRAFRAMEWORK, DependencyMethods.CMAKE],
260 configtool_class=SDL2DependencyConfigTool,
261 cmake_name='SDL2',
262 )
263
264 packages['vulkan'] = vulkan_factory = DependencyFactory(
265 'vulkan',
266 [DependencyMethods.PKGCONFIG, DependencyMethods.SYSTEM],
267 system_class=VulkanDependencySystem,
268 )
269
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mesonbuild/dependencies/ui.py b/mesonbuild/dependencies/ui.py
--- a/mesonbuild/dependencies/ui.py
+++ b/mesonbuild/dependencies/ui.py
@@ -17,11 +17,13 @@
from __future__ import annotations
import os
+import re
import subprocess
import typing as T
from .. import mlog
from .. import mesonlib
+from ..compilers.compilers import CrossNoRunException
from ..mesonlib import (
Popen_safe, extract_as_list, version_compare_many
)
@@ -235,10 +237,6 @@
self.compile_args.append('-I' + inc_path)
self.link_args.append('-L' + lib_path)
self.link_args.append('-l' + lib_name)
-
- # TODO: find a way to retrieve the version from the sdk?
- # Usually it is a part of the path to it (but does not have to be)
- return
else:
# simply try to guess it, usually works on linux
libs = self.clib_compiler.find_library('vulkan', environment, [])
@@ -246,7 +244,33 @@
self.is_found = True
for lib in libs:
self.link_args.append(lib)
- return
+
+ if self.is_found:
+ get_version = '''\
+#include <stdio.h>
+#include <vulkan/vulkan.h>
+
+int main() {
+ printf("%i.%i.%i", VK_VERSION_MAJOR(VK_HEADER_VERSION_COMPLETE),
+ VK_VERSION_MINOR(VK_HEADER_VERSION_COMPLETE),
+ VK_VERSION_PATCH(VK_HEADER_VERSION_COMPLETE));
+ return 0;
+}
+'''
+ try:
+ run = self.clib_compiler.run(get_version, environment, extra_args=self.compile_args)
+ except CrossNoRunException:
+ run = None
+ if run and run.compiled and run.returncode == 0:
+ self.version = run.stdout
+ elif self.vulkan_sdk:
+ # fall back to heuristics: detect version number in path
+ # matches the default install path on Windows
+ match = re.search(rf'VulkanSDK{re.escape(os.path.sep)}([0-9]+(?:\.[0-9]+)+)', self.vulkan_sdk)
+ if match:
+ self.version = match.group(1)
+ else:
+ mlog.warning(f'Environment variable VULKAN_SDK={self.vulkan_sdk} is present, but Vulkan version could not be extracted.')
packages['gl'] = gl_factory = DependencyFactory(
'gl',
| {"golden_diff": "diff --git a/mesonbuild/dependencies/ui.py b/mesonbuild/dependencies/ui.py\n--- a/mesonbuild/dependencies/ui.py\n+++ b/mesonbuild/dependencies/ui.py\n@@ -17,11 +17,13 @@\n from __future__ import annotations\n \n import os\n+import re\n import subprocess\n import typing as T\n \n from .. import mlog\n from .. import mesonlib\n+from ..compilers.compilers import CrossNoRunException\n from ..mesonlib import (\n Popen_safe, extract_as_list, version_compare_many\n )\n@@ -235,10 +237,6 @@\n self.compile_args.append('-I' + inc_path)\n self.link_args.append('-L' + lib_path)\n self.link_args.append('-l' + lib_name)\n-\n- # TODO: find a way to retrieve the version from the sdk?\n- # Usually it is a part of the path to it (but does not have to be)\n- return\n else:\n # simply try to guess it, usually works on linux\n libs = self.clib_compiler.find_library('vulkan', environment, [])\n@@ -246,7 +244,33 @@\n self.is_found = True\n for lib in libs:\n self.link_args.append(lib)\n- return\n+\n+ if self.is_found:\n+ get_version = '''\\\n+#include <stdio.h>\n+#include <vulkan/vulkan.h>\n+\n+int main() {\n+ printf(\"%i.%i.%i\", VK_VERSION_MAJOR(VK_HEADER_VERSION_COMPLETE),\n+ VK_VERSION_MINOR(VK_HEADER_VERSION_COMPLETE),\n+ VK_VERSION_PATCH(VK_HEADER_VERSION_COMPLETE));\n+ return 0;\n+}\n+'''\n+ try:\n+ run = self.clib_compiler.run(get_version, environment, extra_args=self.compile_args)\n+ except CrossNoRunException:\n+ run = None\n+ if run and run.compiled and run.returncode == 0:\n+ self.version = run.stdout\n+ elif self.vulkan_sdk:\n+ # fall back to heuristics: detect version number in path\n+ # matches the default install path on Windows\n+ match = re.search(rf'VulkanSDK{re.escape(os.path.sep)}([0-9]+(?:\\.[0-9]+)+)', self.vulkan_sdk)\n+ if match:\n+ self.version = match.group(1)\n+ else:\n+ mlog.warning(f'Environment variable VULKAN_SDK={self.vulkan_sdk} is present, but Vulkan version could not be extracted.')\n \n packages['gl'] = gl_factory = DependencyFactory(\n 'gl',\n", "issue": "`dependency('vulkan', version: ['>=1.X.X'])` fails on Windows\n**Describe the bug**\r\n`dependency('vulkan', version: ['>=1.3.0'])` fails on Windows if a version is specified.\r\nThe Vulkan SDK is installed, and the environment variable (`VULKAN_SDK=C:\\VulkanSDK\\1.3.250.1`) is set.\r\n\r\nEdit: The same does work on Linux.\r\n\r\n**To Reproduce**\r\nInstall a recent Vulkan SDK (>= 1.3.0) and use this meson.build:\r\n```python\r\nproject('test-project', 'cpp')\r\n\r\n# this does not work\r\nvulkan = dependency('vulkan', version: ['>=1.3.0'])\r\n# this does work\r\n# vulkan = dependency('vulkan')\r\n\r\nexecutable('test-project', ['main.cpp'], dependencies : [vulkan])\r\n```\r\n\r\n**Expected behavior**\r\nVulkan is detected correctly.\r\n\r\n**system parameters**\r\n* native build\r\n* Windows 11\r\n* Python 3.9.13\r\n* Meson 1.2.0\r\n* Ninja 1.11.0\r\n\n", "before_files": [{"content": "# Copyright 2013-2017 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# This file contains the detection logic for external dependencies that\n# are UI-related.\nfrom __future__ import annotations\n\nimport os\nimport subprocess\nimport typing as T\n\nfrom .. import mlog\nfrom .. import mesonlib\nfrom ..mesonlib import (\n Popen_safe, extract_as_list, version_compare_many\n)\nfrom ..environment import detect_cpu_family\n\nfrom .base import DependencyException, DependencyMethods, DependencyTypeName, SystemDependency\nfrom .configtool import ConfigToolDependency\nfrom .detect import packages\nfrom .factory import DependencyFactory\n\nif T.TYPE_CHECKING:\n from ..environment import Environment\n\n\nclass GLDependencySystem(SystemDependency):\n def __init__(self, name: str, environment: 'Environment', kwargs: T.Dict[str, T.Any]) -> None:\n super().__init__(name, environment, kwargs)\n\n if self.env.machines[self.for_machine].is_darwin():\n self.is_found = True\n # FIXME: Use AppleFrameworks dependency\n self.link_args = ['-framework', 'OpenGL']\n # FIXME: Detect version using self.clib_compiler\n return\n elif self.env.machines[self.for_machine].is_windows():\n self.is_found = True\n # FIXME: Use self.clib_compiler.find_library()\n self.link_args = ['-lopengl32']\n # FIXME: Detect version using self.clib_compiler\n return\n else:\n links = self.clib_compiler.find_library('GL', environment, [])\n has_header = self.clib_compiler.has_header('GL/gl.h', '', environment)[0]\n if links and has_header:\n self.is_found = True\n self.link_args = links\n elif links:\n raise DependencyException('Found GL runtime library but no development header files')\n\nclass GnuStepDependency(ConfigToolDependency):\n\n tools = ['gnustep-config']\n tool_name = 'gnustep-config'\n\n def __init__(self, environment: 'Environment', kwargs: T.Dict[str, T.Any]) -> None:\n super().__init__('gnustep', environment, kwargs, language='objc')\n if not self.is_found:\n return\n self.modules = kwargs.get('modules', [])\n self.compile_args = self.filter_args(\n self.get_config_value(['--objc-flags'], 'compile_args'))\n self.link_args = self.weird_filter(self.get_config_value(\n ['--gui-libs' if 'gui' in self.modules else '--base-libs'],\n 'link_args'))\n\n def find_config(self, versions: T.Optional[T.List[str]] = None, returncode: int = 0) -> T.Tuple[T.Optional[T.List[str]], T.Optional[str]]:\n tool = [self.tools[0]]\n try:\n p, out = Popen_safe(tool + ['--help'])[:2]\n except (FileNotFoundError, PermissionError):\n return (None, None)\n if p.returncode != returncode:\n return (None, None)\n self.config = tool\n found_version = self.detect_version()\n if versions and not version_compare_many(found_version, versions)[0]:\n return (None, found_version)\n\n return (tool, found_version)\n\n @staticmethod\n def weird_filter(elems: T.List[str]) -> T.List[str]:\n \"\"\"When building packages, the output of the enclosing Make is\n sometimes mixed among the subprocess output. I have no idea why. As a\n hack filter out everything that is not a flag.\n \"\"\"\n return [e for e in elems if e.startswith('-')]\n\n @staticmethod\n def filter_args(args: T.List[str]) -> T.List[str]:\n \"\"\"gnustep-config returns a bunch of garbage args such as -O2 and so\n on. Drop everything that is not needed.\n \"\"\"\n result = []\n for f in args:\n if f.startswith('-D') \\\n or f.startswith('-f') \\\n or f.startswith('-I') \\\n or f == '-pthread' \\\n or (f.startswith('-W') and not f == '-Wall'):\n result.append(f)\n return result\n\n def detect_version(self) -> str:\n gmake = self.get_config_value(['--variable=GNUMAKE'], 'variable')[0]\n makefile_dir = self.get_config_value(['--variable=GNUSTEP_MAKEFILES'], 'variable')[0]\n # This Makefile has the GNUStep version set\n base_make = os.path.join(makefile_dir, 'Additional', 'base.make')\n # Print the Makefile variable passed as the argument. For instance, if\n # you run the make target `print-SOME_VARIABLE`, this will print the\n # value of the variable `SOME_VARIABLE`.\n printver = \"print-%:\\n\\t@echo '$($*)'\"\n env = os.environ.copy()\n # See base.make to understand why this is set\n env['FOUNDATION_LIB'] = 'gnu'\n p, o, e = Popen_safe([gmake, '-f', '-', '-f', base_make,\n 'print-GNUSTEP_BASE_VERSION'],\n env=env, write=printver, stdin=subprocess.PIPE)\n version = o.strip()\n if not version:\n mlog.debug(\"Couldn't detect GNUStep version, falling back to '1'\")\n # Fallback to setting some 1.x version\n version = '1'\n return version\n\npackages['gnustep'] = GnuStepDependency\n\n\nclass SDL2DependencyConfigTool(ConfigToolDependency):\n\n tools = ['sdl2-config']\n tool_name = 'sdl2-config'\n\n def __init__(self, name: str, environment: 'Environment', kwargs: T.Dict[str, T.Any]):\n super().__init__(name, environment, kwargs)\n if not self.is_found:\n return\n self.compile_args = self.get_config_value(['--cflags'], 'compile_args')\n self.link_args = self.get_config_value(['--libs'], 'link_args')\n\n\nclass WxDependency(ConfigToolDependency):\n\n tools = ['wx-config-3.0', 'wx-config-3.1', 'wx-config', 'wx-config-gtk3']\n tool_name = 'wx-config'\n\n def __init__(self, environment: 'Environment', kwargs: T.Dict[str, T.Any]):\n super().__init__('WxWidgets', environment, kwargs, language='cpp')\n if not self.is_found:\n return\n self.requested_modules = self.get_requested(kwargs)\n\n extra_args = []\n if self.static:\n extra_args.append('--static=yes')\n\n # Check to make sure static is going to work\n err = Popen_safe(self.config + extra_args)[2]\n if 'No config found to match' in err:\n mlog.debug('WxWidgets is missing static libraries.')\n self.is_found = False\n return\n\n # wx-config seems to have a cflags as well but since it requires C++,\n # this should be good, at least for now.\n self.compile_args = self.get_config_value(['--cxxflags'] + extra_args + self.requested_modules, 'compile_args')\n self.link_args = self.get_config_value(['--libs'] + extra_args + self.requested_modules, 'link_args')\n\n @staticmethod\n def get_requested(kwargs: T.Dict[str, T.Any]) -> T.List[str]:\n if 'modules' not in kwargs:\n return []\n candidates = extract_as_list(kwargs, 'modules')\n for c in candidates:\n if not isinstance(c, str):\n raise DependencyException('wxwidgets module argument is not a string')\n return candidates\n\npackages['wxwidgets'] = WxDependency\n\nclass VulkanDependencySystem(SystemDependency):\n\n def __init__(self, name: str, environment: 'Environment', kwargs: T.Dict[str, T.Any], language: T.Optional[str] = None) -> None:\n super().__init__(name, environment, kwargs, language=language)\n\n try:\n self.vulkan_sdk = os.environ['VULKAN_SDK']\n if not os.path.isabs(self.vulkan_sdk):\n raise DependencyException('VULKAN_SDK must be an absolute path.')\n except KeyError:\n self.vulkan_sdk = None\n\n if self.vulkan_sdk:\n # TODO: this config might not work on some platforms, fix bugs as reported\n # we should at least detect other 64-bit platforms (e.g. armv8)\n lib_name = 'vulkan'\n lib_dir = 'lib'\n inc_dir = 'include'\n if mesonlib.is_windows():\n lib_name = 'vulkan-1'\n lib_dir = 'Lib32'\n inc_dir = 'Include'\n if detect_cpu_family(self.env.coredata.compilers.host) == 'x86_64':\n lib_dir = 'Lib'\n\n # make sure header and lib are valid\n inc_path = os.path.join(self.vulkan_sdk, inc_dir)\n header = os.path.join(inc_path, 'vulkan', 'vulkan.h')\n lib_path = os.path.join(self.vulkan_sdk, lib_dir)\n find_lib = self.clib_compiler.find_library(lib_name, environment, [lib_path])\n\n if not find_lib:\n raise DependencyException('VULKAN_SDK point to invalid directory (no lib)')\n\n if not os.path.isfile(header):\n raise DependencyException('VULKAN_SDK point to invalid directory (no include)')\n\n # XXX: this is very odd, and may deserve being removed\n self.type_name = DependencyTypeName('vulkan_sdk')\n self.is_found = True\n self.compile_args.append('-I' + inc_path)\n self.link_args.append('-L' + lib_path)\n self.link_args.append('-l' + lib_name)\n\n # TODO: find a way to retrieve the version from the sdk?\n # Usually it is a part of the path to it (but does not have to be)\n return\n else:\n # simply try to guess it, usually works on linux\n libs = self.clib_compiler.find_library('vulkan', environment, [])\n if libs is not None and self.clib_compiler.has_header('vulkan/vulkan.h', '', environment, disable_cache=True)[0]:\n self.is_found = True\n for lib in libs:\n self.link_args.append(lib)\n return\n\npackages['gl'] = gl_factory = DependencyFactory(\n 'gl',\n [DependencyMethods.PKGCONFIG, DependencyMethods.SYSTEM],\n system_class=GLDependencySystem,\n)\n\npackages['sdl2'] = sdl2_factory = DependencyFactory(\n 'sdl2',\n [DependencyMethods.PKGCONFIG, DependencyMethods.CONFIG_TOOL, DependencyMethods.EXTRAFRAMEWORK, DependencyMethods.CMAKE],\n configtool_class=SDL2DependencyConfigTool,\n cmake_name='SDL2',\n)\n\npackages['vulkan'] = vulkan_factory = DependencyFactory(\n 'vulkan',\n [DependencyMethods.PKGCONFIG, DependencyMethods.SYSTEM],\n system_class=VulkanDependencySystem,\n)\n", "path": "mesonbuild/dependencies/ui.py"}], "after_files": [{"content": "# Copyright 2013-2017 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# This file contains the detection logic for external dependencies that\n# are UI-related.\nfrom __future__ import annotations\n\nimport os\nimport re\nimport subprocess\nimport typing as T\n\nfrom .. import mlog\nfrom .. import mesonlib\nfrom ..compilers.compilers import CrossNoRunException\nfrom ..mesonlib import (\n Popen_safe, extract_as_list, version_compare_many\n)\nfrom ..environment import detect_cpu_family\n\nfrom .base import DependencyException, DependencyMethods, DependencyTypeName, SystemDependency\nfrom .configtool import ConfigToolDependency\nfrom .detect import packages\nfrom .factory import DependencyFactory\n\nif T.TYPE_CHECKING:\n from ..environment import Environment\n\n\nclass GLDependencySystem(SystemDependency):\n def __init__(self, name: str, environment: 'Environment', kwargs: T.Dict[str, T.Any]) -> None:\n super().__init__(name, environment, kwargs)\n\n if self.env.machines[self.for_machine].is_darwin():\n self.is_found = True\n # FIXME: Use AppleFrameworks dependency\n self.link_args = ['-framework', 'OpenGL']\n # FIXME: Detect version using self.clib_compiler\n return\n elif self.env.machines[self.for_machine].is_windows():\n self.is_found = True\n # FIXME: Use self.clib_compiler.find_library()\n self.link_args = ['-lopengl32']\n # FIXME: Detect version using self.clib_compiler\n return\n else:\n links = self.clib_compiler.find_library('GL', environment, [])\n has_header = self.clib_compiler.has_header('GL/gl.h', '', environment)[0]\n if links and has_header:\n self.is_found = True\n self.link_args = links\n elif links:\n raise DependencyException('Found GL runtime library but no development header files')\n\nclass GnuStepDependency(ConfigToolDependency):\n\n tools = ['gnustep-config']\n tool_name = 'gnustep-config'\n\n def __init__(self, environment: 'Environment', kwargs: T.Dict[str, T.Any]) -> None:\n super().__init__('gnustep', environment, kwargs, language='objc')\n if not self.is_found:\n return\n self.modules = kwargs.get('modules', [])\n self.compile_args = self.filter_args(\n self.get_config_value(['--objc-flags'], 'compile_args'))\n self.link_args = self.weird_filter(self.get_config_value(\n ['--gui-libs' if 'gui' in self.modules else '--base-libs'],\n 'link_args'))\n\n def find_config(self, versions: T.Optional[T.List[str]] = None, returncode: int = 0) -> T.Tuple[T.Optional[T.List[str]], T.Optional[str]]:\n tool = [self.tools[0]]\n try:\n p, out = Popen_safe(tool + ['--help'])[:2]\n except (FileNotFoundError, PermissionError):\n return (None, None)\n if p.returncode != returncode:\n return (None, None)\n self.config = tool\n found_version = self.detect_version()\n if versions and not version_compare_many(found_version, versions)[0]:\n return (None, found_version)\n\n return (tool, found_version)\n\n @staticmethod\n def weird_filter(elems: T.List[str]) -> T.List[str]:\n \"\"\"When building packages, the output of the enclosing Make is\n sometimes mixed among the subprocess output. I have no idea why. As a\n hack filter out everything that is not a flag.\n \"\"\"\n return [e for e in elems if e.startswith('-')]\n\n @staticmethod\n def filter_args(args: T.List[str]) -> T.List[str]:\n \"\"\"gnustep-config returns a bunch of garbage args such as -O2 and so\n on. Drop everything that is not needed.\n \"\"\"\n result = []\n for f in args:\n if f.startswith('-D') \\\n or f.startswith('-f') \\\n or f.startswith('-I') \\\n or f == '-pthread' \\\n or (f.startswith('-W') and not f == '-Wall'):\n result.append(f)\n return result\n\n def detect_version(self) -> str:\n gmake = self.get_config_value(['--variable=GNUMAKE'], 'variable')[0]\n makefile_dir = self.get_config_value(['--variable=GNUSTEP_MAKEFILES'], 'variable')[0]\n # This Makefile has the GNUStep version set\n base_make = os.path.join(makefile_dir, 'Additional', 'base.make')\n # Print the Makefile variable passed as the argument. For instance, if\n # you run the make target `print-SOME_VARIABLE`, this will print the\n # value of the variable `SOME_VARIABLE`.\n printver = \"print-%:\\n\\t@echo '$($*)'\"\n env = os.environ.copy()\n # See base.make to understand why this is set\n env['FOUNDATION_LIB'] = 'gnu'\n p, o, e = Popen_safe([gmake, '-f', '-', '-f', base_make,\n 'print-GNUSTEP_BASE_VERSION'],\n env=env, write=printver, stdin=subprocess.PIPE)\n version = o.strip()\n if not version:\n mlog.debug(\"Couldn't detect GNUStep version, falling back to '1'\")\n # Fallback to setting some 1.x version\n version = '1'\n return version\n\npackages['gnustep'] = GnuStepDependency\n\n\nclass SDL2DependencyConfigTool(ConfigToolDependency):\n\n tools = ['sdl2-config']\n tool_name = 'sdl2-config'\n\n def __init__(self, name: str, environment: 'Environment', kwargs: T.Dict[str, T.Any]):\n super().__init__(name, environment, kwargs)\n if not self.is_found:\n return\n self.compile_args = self.get_config_value(['--cflags'], 'compile_args')\n self.link_args = self.get_config_value(['--libs'], 'link_args')\n\n\nclass WxDependency(ConfigToolDependency):\n\n tools = ['wx-config-3.0', 'wx-config-3.1', 'wx-config', 'wx-config-gtk3']\n tool_name = 'wx-config'\n\n def __init__(self, environment: 'Environment', kwargs: T.Dict[str, T.Any]):\n super().__init__('WxWidgets', environment, kwargs, language='cpp')\n if not self.is_found:\n return\n self.requested_modules = self.get_requested(kwargs)\n\n extra_args = []\n if self.static:\n extra_args.append('--static=yes')\n\n # Check to make sure static is going to work\n err = Popen_safe(self.config + extra_args)[2]\n if 'No config found to match' in err:\n mlog.debug('WxWidgets is missing static libraries.')\n self.is_found = False\n return\n\n # wx-config seems to have a cflags as well but since it requires C++,\n # this should be good, at least for now.\n self.compile_args = self.get_config_value(['--cxxflags'] + extra_args + self.requested_modules, 'compile_args')\n self.link_args = self.get_config_value(['--libs'] + extra_args + self.requested_modules, 'link_args')\n\n @staticmethod\n def get_requested(kwargs: T.Dict[str, T.Any]) -> T.List[str]:\n if 'modules' not in kwargs:\n return []\n candidates = extract_as_list(kwargs, 'modules')\n for c in candidates:\n if not isinstance(c, str):\n raise DependencyException('wxwidgets module argument is not a string')\n return candidates\n\npackages['wxwidgets'] = WxDependency\n\nclass VulkanDependencySystem(SystemDependency):\n\n def __init__(self, name: str, environment: 'Environment', kwargs: T.Dict[str, T.Any], language: T.Optional[str] = None) -> None:\n super().__init__(name, environment, kwargs, language=language)\n\n try:\n self.vulkan_sdk = os.environ['VULKAN_SDK']\n if not os.path.isabs(self.vulkan_sdk):\n raise DependencyException('VULKAN_SDK must be an absolute path.')\n except KeyError:\n self.vulkan_sdk = None\n\n if self.vulkan_sdk:\n # TODO: this config might not work on some platforms, fix bugs as reported\n # we should at least detect other 64-bit platforms (e.g. armv8)\n lib_name = 'vulkan'\n lib_dir = 'lib'\n inc_dir = 'include'\n if mesonlib.is_windows():\n lib_name = 'vulkan-1'\n lib_dir = 'Lib32'\n inc_dir = 'Include'\n if detect_cpu_family(self.env.coredata.compilers.host) == 'x86_64':\n lib_dir = 'Lib'\n\n # make sure header and lib are valid\n inc_path = os.path.join(self.vulkan_sdk, inc_dir)\n header = os.path.join(inc_path, 'vulkan', 'vulkan.h')\n lib_path = os.path.join(self.vulkan_sdk, lib_dir)\n find_lib = self.clib_compiler.find_library(lib_name, environment, [lib_path])\n\n if not find_lib:\n raise DependencyException('VULKAN_SDK point to invalid directory (no lib)')\n\n if not os.path.isfile(header):\n raise DependencyException('VULKAN_SDK point to invalid directory (no include)')\n\n # XXX: this is very odd, and may deserve being removed\n self.type_name = DependencyTypeName('vulkan_sdk')\n self.is_found = True\n self.compile_args.append('-I' + inc_path)\n self.link_args.append('-L' + lib_path)\n self.link_args.append('-l' + lib_name)\n else:\n # simply try to guess it, usually works on linux\n libs = self.clib_compiler.find_library('vulkan', environment, [])\n if libs is not None and self.clib_compiler.has_header('vulkan/vulkan.h', '', environment, disable_cache=True)[0]:\n self.is_found = True\n for lib in libs:\n self.link_args.append(lib)\n\n if self.is_found:\n get_version = '''\\\n#include <stdio.h>\n#include <vulkan/vulkan.h>\n\nint main() {\n printf(\"%i.%i.%i\", VK_VERSION_MAJOR(VK_HEADER_VERSION_COMPLETE),\n VK_VERSION_MINOR(VK_HEADER_VERSION_COMPLETE),\n VK_VERSION_PATCH(VK_HEADER_VERSION_COMPLETE));\n return 0;\n}\n'''\n try:\n run = self.clib_compiler.run(get_version, environment, extra_args=self.compile_args)\n except CrossNoRunException:\n run = None\n if run and run.compiled and run.returncode == 0:\n self.version = run.stdout\n elif self.vulkan_sdk:\n # fall back to heuristics: detect version number in path\n # matches the default install path on Windows\n match = re.search(rf'VulkanSDK{re.escape(os.path.sep)}([0-9]+(?:\\.[0-9]+)+)', self.vulkan_sdk)\n if match:\n self.version = match.group(1)\n else:\n mlog.warning(f'Environment variable VULKAN_SDK={self.vulkan_sdk} is present, but Vulkan version could not be extracted.')\n\npackages['gl'] = gl_factory = DependencyFactory(\n 'gl',\n [DependencyMethods.PKGCONFIG, DependencyMethods.SYSTEM],\n system_class=GLDependencySystem,\n)\n\npackages['sdl2'] = sdl2_factory = DependencyFactory(\n 'sdl2',\n [DependencyMethods.PKGCONFIG, DependencyMethods.CONFIG_TOOL, DependencyMethods.EXTRAFRAMEWORK, DependencyMethods.CMAKE],\n configtool_class=SDL2DependencyConfigTool,\n cmake_name='SDL2',\n)\n\npackages['vulkan'] = vulkan_factory = DependencyFactory(\n 'vulkan',\n [DependencyMethods.PKGCONFIG, DependencyMethods.SYSTEM],\n system_class=VulkanDependencySystem,\n)\n", "path": "mesonbuild/dependencies/ui.py"}]} | 3,693 | 574 |
gh_patches_debug_7834 | rasdani/github-patches | git_diff | python-poetry__poetry-7671 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Poetry 1.4.0: There is no item named `<package>/WHEEL` in the archive
<!--
Hi there! Thank you for submitting a bug report!
Before you submit your issue, please review and follow the instructions at
https://python-poetry.org/docs/contributing/#reporting-bugs. Fully fill out the template below; insufficient
information or bad reproduction instructions will impair the ability of others to help you.
-->
<!-- All the below information must be provided for others to understand and help with your issue. -->
- **Poetry version**: 1.4.0<!-- Replace with version, e.g. from `poetry --version` -->
- **Python version**: 3.11<!-- Replace with the version of Python being used to run Poetry, as reported by `poetry debug info` -->
- **OS version and name**: macOS 13.2.1/MacPorts <!-- Replace with version + name, e.g. Ubuntu 22.04 or macOS 12.6 -->
- **pyproject.toml**: https://gist.github.com/elventear/2410f2f8a05926df27488767cfcab780<!-- Replace with a link to a Gist (https://gist.github.com/) containing your sanitized pyproject.toml -->
<!-- All the below steps should be completed before submitting your issue. Checked checkbox should look like this: [x] -->
- [x] I am on the [latest](https://github.com/python-poetry/poetry/releases/latest) stable Poetry version, installed using a recommended method.
- [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate.
- [x] I have consulted the [FAQ](https://python-poetry.org/docs/faq/) and [blog](https://python-poetry.org/blog/) for any relevant entries or release notes.
- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option) and have included the output below.
## Issue
Hypercorn can't be installed with poetry 1.4.0, it used to be possible to install it:
<!-- Now feel free to write your issue, and please be as descriptive as possible! Make sure to include detailed reproduction steps. -->
<!-- Thanks again 🙌 ❤ -->
```
poetry install
Creating virtualenv test in /path/to/.venv
Updating dependencies
Resolving dependencies... (0.4s)
Writing lock file
Package operations: 8 installs, 0 updates, 0 removals
• Installing h11 (0.14.0)
• Installing hpack (4.0.0)
• Installing hyperframe (6.0.1)
• Installing h2 (4.1.0)
• Installing priority (2.0.0)
• Installing toml (0.10.2)
• Installing wsproto (1.2.0)
• Installing hypercorn (0.14.3): Failed
Stack trace:
10 ~/.local/pipx/venvs/poetry/lib/python3.11/site-packages/poetry/installation/executor.py:271 in _execute_operation
269│
270│ try:
→ 271│ result = self._do_execute_operation(operation)
272│ except EnvCommandError as e:
273│ if e.e.returncode == -2:
9 ~/.local/pipx/venvs/poetry/lib/python3.11/site-packages/poetry/installation/executor.py:357 in _do_execute_operation
355│ return 0
356│
→ 357│ result: int = getattr(self, f"_execute_{method}")(operation)
358│
359│ if result != 0:
8 ~/.local/pipx/venvs/poetry/lib/python3.11/site-packages/poetry/installation/executor.py:477 in _execute_install
475│
476│ def _execute_install(self, operation: Install | Update) -> int:
→ 477│ status_code = self._install(operation)
478│
479│ self._save_url_reference(operation)
7 ~/.local/pipx/venvs/poetry/lib/python3.11/site-packages/poetry/installation/executor.py:535 in _install
533│ self._remove(operation.initial_package)
534│
→ 535│ self._wheel_installer.install(archive)
536│ finally:
537│ if cleanup_archive:
6 ~/.local/pipx/venvs/poetry/lib/python3.11/site-packages/poetry/installation/wheel_installer.py:101 in install
99│ def install(self, wheel: Path) -> None:
100│ with WheelFile.open(Path(wheel.as_posix())) as source:
→ 101│ install(
102│ source=source,
103│ destination=self._destination.for_source(source),
5 ~/.local/pipx/venvs/poetry/lib/python3.11/site-packages/installer/_core.py:77 in install
75│
76│ """
→ 77│ root_scheme = _process_WHEEL_file(source)
78│
79│ # RECORD handling
4 ~/.local/pipx/venvs/poetry/lib/python3.11/site-packages/installer/_core.py:21 in _process_WHEEL_file
19│ Returns the scheme that the archive root should go in.
20│ """
→ 21│ stream = source.read_dist_info("WHEEL")
22│ metadata = parse_metadata_file(stream)
23│
3 ~/.local/pipx/venvs/poetry/lib/python3.11/site-packages/installer/sources.py:139 in read_dist_info
137│ """Get contents, from ``filename`` in the dist-info directory."""
138│ path = posixpath.join(self.dist_info_dir, filename)
→ 139│ return self._zipfile.read(path).decode("utf-8")
140│
141│ def get_contents(self) -> Iterator[WheelContentElement]:
2 /opt/local/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/zipfile.py:1507 in read
1505│ def read(self, name, pwd=None):
1506│ """Return file bytes for name."""
→ 1507│ with self.open(name, "r", pwd) as fp:
1508│ return fp.read()
1509│
1 /opt/local/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/zipfile.py:1544 in open
1542│ else:
1543│ # Get info object for name
→ 1544│ zinfo = self.getinfo(name)
1545│
1546│ if mode == 'w':
KeyError
"There is no item named 'Hypercorn-0.14.3.dist-info/WHEEL' in the archive"
at /opt/local/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/zipfile.py:1473 in getinfo
1469│ def getinfo(self, name):
1470│ """Return the instance of ZipInfo given 'name'."""
1471│ info = self.NameToInfo.get(name)
1472│ if info is None:
→ 1473│ raise KeyError(
1474│ 'There is no item named %r in the archive' % name)
1475│
1476│ return info
```
Wondering if there is a case sensitivity issue. Looking at the wheel, I do see there is a file in `hypercorn-0.14.3.dist-info/WHEEL`.
Latest 1.4.0 release causes unclear Issue with aws_psycopg2 install
- **Poetry version**: `1.4.0`
- **Python version**: `python3.9`
<!-- All the below steps should be completed before submitting your issue. Checked checkbox should look like this: [x] -->
- [x] I am on the [latest](https://github.com/python-poetry/poetry/releases/latest) stable Poetry version, installed using a recommended method.
- [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate.
- [ ] I have consulted the [FAQ](https://python-poetry.org/docs/faq/) and [blog](https://python-poetry.org/blog/) for any relevant entries or release notes.
- [ ] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option) and have included the output below.
## Issue
<!-- Now feel free to write your issue, and please be as descriptive as possible! Make sure to include detailed reproduction steps. -->
<!-- Thanks again 🙌 ❤ -->
When using the latest version, `1.4.0` we get the following runtime error:
```
Installing sqlalchemy (1.4.46)
AssertionError
In /builds/project-0/.cache/pypoetry/artifacts/39/96/f1/0f021ed26225e8d9035c9bac5a886b024840f841af84bd33f2be466dcb/aws_psycopg2-1.3.8-py3-none-any.whl, psycopg2/_psycopg.cpython-38-aarch64-linux-gnu.so is not mentioned in RECORD
at venv/lib/python3.9/site-packages/installer/sources.py:158 in get_contents
154│ if item.filename[-1:] == "/": # looks like a directory
155│ continue
156│
157│ record = record_mapping.pop(item.filename, None)
→ 158│ assert record is not None, "In {}, {} is not mentioned in RECORD".format(
159│ self._zipfile.filename,
160│ item.filename,
161│ ) # should not happen for valid wheels
162│
```
However, when we downgrade to `poetry==1.3.2` this problem goes away...
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/poetry/installation/wheel_installer.py`
Content:
```
1 from __future__ import annotations
2
3 import os
4 import platform
5 import sys
6
7 from pathlib import Path
8 from typing import TYPE_CHECKING
9
10 from installer import install
11 from installer.destinations import SchemeDictionaryDestination
12 from installer.sources import WheelFile
13
14 from poetry.__version__ import __version__
15 from poetry.utils._compat import WINDOWS
16
17
18 if TYPE_CHECKING:
19 from typing import BinaryIO
20
21 from installer.records import RecordEntry
22 from installer.scripts import LauncherKind
23 from installer.utils import Scheme
24
25 from poetry.utils.env import Env
26
27
28 class WheelDestination(SchemeDictionaryDestination):
29 """ """
30
31 def write_to_fs(
32 self,
33 scheme: Scheme,
34 path: Path | str,
35 stream: BinaryIO,
36 is_executable: bool,
37 ) -> RecordEntry:
38 from installer.records import Hash
39 from installer.records import RecordEntry
40 from installer.utils import copyfileobj_with_hashing
41 from installer.utils import make_file_executable
42
43 target_path = Path(self.scheme_dict[scheme]) / path
44 if target_path.exists():
45 # Contrary to the base library we don't raise an error
46 # here since it can break namespace packages (like Poetry's)
47 pass
48
49 parent_folder = target_path.parent
50 if not parent_folder.exists():
51 # Due to the parallel installation it can happen
52 # that two threads try to create the directory.
53 os.makedirs(parent_folder, exist_ok=True)
54
55 with open(target_path, "wb") as f:
56 hash_, size = copyfileobj_with_hashing(stream, f, self.hash_algorithm)
57
58 if is_executable:
59 make_file_executable(target_path)
60
61 return RecordEntry(str(path), Hash(self.hash_algorithm, hash_), size)
62
63 def for_source(self, source: WheelFile) -> WheelDestination:
64 scheme_dict = self.scheme_dict.copy()
65
66 scheme_dict["headers"] = str(Path(scheme_dict["headers"]) / source.distribution)
67
68 return self.__class__(
69 scheme_dict,
70 interpreter=self.interpreter,
71 script_kind=self.script_kind,
72 bytecode_optimization_levels=self.bytecode_optimization_levels,
73 )
74
75
76 class WheelInstaller:
77 def __init__(self, env: Env) -> None:
78 self._env = env
79
80 script_kind: LauncherKind
81 if not WINDOWS:
82 script_kind = "posix"
83 else:
84 if platform.uname()[4].startswith("arm"):
85 script_kind = "win-arm64" if sys.maxsize > 2**32 else "win-arm"
86 else:
87 script_kind = "win-amd64" if sys.maxsize > 2**32 else "win-ia32"
88
89 schemes = self._env.paths
90 schemes["headers"] = schemes["include"]
91
92 self._destination = WheelDestination(
93 schemes, interpreter=self._env.python, script_kind=script_kind
94 )
95
96 def enable_bytecode_compilation(self, enable: bool = True) -> None:
97 self._destination.bytecode_optimization_levels = (1,) if enable else ()
98
99 def install(self, wheel: Path) -> None:
100 with WheelFile.open(Path(wheel.as_posix())) as source:
101 install(
102 source=source,
103 destination=self._destination.for_source(source),
104 # Additional metadata that is generated by the installation tool.
105 additional_metadata={
106 "INSTALLER": f"Poetry {__version__}".encode(),
107 },
108 )
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/poetry/installation/wheel_installer.py b/src/poetry/installation/wheel_installer.py
--- a/src/poetry/installation/wheel_installer.py
+++ b/src/poetry/installation/wheel_installer.py
@@ -97,7 +97,8 @@
self._destination.bytecode_optimization_levels = (1,) if enable else ()
def install(self, wheel: Path) -> None:
- with WheelFile.open(Path(wheel.as_posix())) as source:
+ with WheelFile.open(wheel) as source:
+ source.validate_record()
install(
source=source,
destination=self._destination.for_source(source),
| {"golden_diff": "diff --git a/src/poetry/installation/wheel_installer.py b/src/poetry/installation/wheel_installer.py\n--- a/src/poetry/installation/wheel_installer.py\n+++ b/src/poetry/installation/wheel_installer.py\n@@ -97,7 +97,8 @@\n self._destination.bytecode_optimization_levels = (1,) if enable else ()\n \n def install(self, wheel: Path) -> None:\n- with WheelFile.open(Path(wheel.as_posix())) as source:\n+ with WheelFile.open(wheel) as source:\n+ source.validate_record()\n install(\n source=source,\n destination=self._destination.for_source(source),\n", "issue": "Poetry 1.4.0: There is no item named `<package>/WHEEL` in the archive\n<!--\r\n Hi there! Thank you for submitting a bug report!\r\n\r\n Before you submit your issue, please review and follow the instructions at\r\n https://python-poetry.org/docs/contributing/#reporting-bugs. Fully fill out the template below; insufficient\r\n information or bad reproduction instructions will impair the ability of others to help you.\r\n-->\r\n\r\n<!-- All the below information must be provided for others to understand and help with your issue. -->\r\n- **Poetry version**: 1.4.0<!-- Replace with version, e.g. from `poetry --version` -->\r\n- **Python version**: 3.11<!-- Replace with the version of Python being used to run Poetry, as reported by `poetry debug info` -->\r\n- **OS version and name**: macOS 13.2.1/MacPorts <!-- Replace with version + name, e.g. Ubuntu 22.04 or macOS 12.6 -->\r\n- **pyproject.toml**: https://gist.github.com/elventear/2410f2f8a05926df27488767cfcab780<!-- Replace with a link to a Gist (https://gist.github.com/) containing your sanitized pyproject.toml -->\r\n\r\n<!-- All the below steps should be completed before submitting your issue. Checked checkbox should look like this: [x] -->\r\n- [x] I am on the [latest](https://github.com/python-poetry/poetry/releases/latest) stable Poetry version, installed using a recommended method.\r\n- [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate.\r\n- [x] I have consulted the [FAQ](https://python-poetry.org/docs/faq/) and [blog](https://python-poetry.org/blog/) for any relevant entries or release notes.\r\n- [x] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option) and have included the output below.\r\n\r\n## Issue\r\n\r\nHypercorn can't be installed with poetry 1.4.0, it used to be possible to install it:\r\n\r\n<!-- Now feel free to write your issue, and please be as descriptive as possible! Make sure to include detailed reproduction steps. -->\r\n<!-- Thanks again \ud83d\ude4c \u2764 -->\r\n\r\n```\r\npoetry install \r\nCreating virtualenv test in /path/to/.venv\r\nUpdating dependencies\r\nResolving dependencies... (0.4s)\r\n\r\nWriting lock file\r\n\r\nPackage operations: 8 installs, 0 updates, 0 removals\r\n\r\n \u2022 Installing h11 (0.14.0)\r\n \u2022 Installing hpack (4.0.0)\r\n \u2022 Installing hyperframe (6.0.1)\r\n \u2022 Installing h2 (4.1.0)\r\n \u2022 Installing priority (2.0.0)\r\n \u2022 Installing toml (0.10.2)\r\n \u2022 Installing wsproto (1.2.0)\r\n \u2022 Installing hypercorn (0.14.3): Failed\r\n\r\n Stack trace:\r\n\r\n 10 ~/.local/pipx/venvs/poetry/lib/python3.11/site-packages/poetry/installation/executor.py:271 in _execute_operation\r\n 269\u2502\r\n 270\u2502 try:\r\n \u2192 271\u2502 result = self._do_execute_operation(operation)\r\n 272\u2502 except EnvCommandError as e:\r\n 273\u2502 if e.e.returncode == -2:\r\n\r\n 9 ~/.local/pipx/venvs/poetry/lib/python3.11/site-packages/poetry/installation/executor.py:357 in _do_execute_operation\r\n 355\u2502 return 0\r\n 356\u2502\r\n \u2192 357\u2502 result: int = getattr(self, f\"_execute_{method}\")(operation)\r\n 358\u2502\r\n 359\u2502 if result != 0:\r\n\r\n 8 ~/.local/pipx/venvs/poetry/lib/python3.11/site-packages/poetry/installation/executor.py:477 in _execute_install\r\n 475\u2502\r\n 476\u2502 def _execute_install(self, operation: Install | Update) -> int:\r\n \u2192 477\u2502 status_code = self._install(operation)\r\n 478\u2502\r\n 479\u2502 self._save_url_reference(operation)\r\n\r\n 7 ~/.local/pipx/venvs/poetry/lib/python3.11/site-packages/poetry/installation/executor.py:535 in _install\r\n 533\u2502 self._remove(operation.initial_package)\r\n 534\u2502\r\n \u2192 535\u2502 self._wheel_installer.install(archive)\r\n 536\u2502 finally:\r\n 537\u2502 if cleanup_archive:\r\n\r\n 6 ~/.local/pipx/venvs/poetry/lib/python3.11/site-packages/poetry/installation/wheel_installer.py:101 in install\r\n 99\u2502 def install(self, wheel: Path) -> None:\r\n 100\u2502 with WheelFile.open(Path(wheel.as_posix())) as source:\r\n \u2192 101\u2502 install(\r\n 102\u2502 source=source,\r\n 103\u2502 destination=self._destination.for_source(source),\r\n\r\n 5 ~/.local/pipx/venvs/poetry/lib/python3.11/site-packages/installer/_core.py:77 in install\r\n 75\u2502\r\n 76\u2502 \"\"\"\r\n \u2192 77\u2502 root_scheme = _process_WHEEL_file(source)\r\n 78\u2502\r\n 79\u2502 # RECORD handling\r\n\r\n 4 ~/.local/pipx/venvs/poetry/lib/python3.11/site-packages/installer/_core.py:21 in _process_WHEEL_file\r\n 19\u2502 Returns the scheme that the archive root should go in.\r\n 20\u2502 \"\"\"\r\n \u2192 21\u2502 stream = source.read_dist_info(\"WHEEL\")\r\n 22\u2502 metadata = parse_metadata_file(stream)\r\n 23\u2502\r\n\r\n 3 ~/.local/pipx/venvs/poetry/lib/python3.11/site-packages/installer/sources.py:139 in read_dist_info\r\n 137\u2502 \"\"\"Get contents, from ``filename`` in the dist-info directory.\"\"\"\r\n 138\u2502 path = posixpath.join(self.dist_info_dir, filename)\r\n \u2192 139\u2502 return self._zipfile.read(path).decode(\"utf-8\")\r\n 140\u2502\r\n 141\u2502 def get_contents(self) -> Iterator[WheelContentElement]:\r\n\r\n 2 /opt/local/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/zipfile.py:1507 in read\r\n 1505\u2502 def read(self, name, pwd=None):\r\n 1506\u2502 \"\"\"Return file bytes for name.\"\"\"\r\n \u2192 1507\u2502 with self.open(name, \"r\", pwd) as fp:\r\n 1508\u2502 return fp.read()\r\n 1509\u2502\r\n\r\n 1 /opt/local/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/zipfile.py:1544 in open\r\n 1542\u2502 else:\r\n 1543\u2502 # Get info object for name\r\n \u2192 1544\u2502 zinfo = self.getinfo(name)\r\n 1545\u2502\r\n 1546\u2502 if mode == 'w':\r\n\r\n KeyError\r\n\r\n \"There is no item named 'Hypercorn-0.14.3.dist-info/WHEEL' in the archive\"\r\n\r\n at /opt/local/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/zipfile.py:1473 in getinfo\r\n 1469\u2502 def getinfo(self, name):\r\n 1470\u2502 \"\"\"Return the instance of ZipInfo given 'name'.\"\"\"\r\n 1471\u2502 info = self.NameToInfo.get(name)\r\n 1472\u2502 if info is None:\r\n \u2192 1473\u2502 raise KeyError(\r\n 1474\u2502 'There is no item named %r in the archive' % name)\r\n 1475\u2502\r\n 1476\u2502 return info\r\n```\r\n\r\nWondering if there is a case sensitivity issue. Looking at the wheel, I do see there is a file in `hypercorn-0.14.3.dist-info/WHEEL`. \r\n\nLatest 1.4.0 release causes unclear Issue with aws_psycopg2 install\n- **Poetry version**: `1.4.0`\r\n- **Python version**: `python3.9`\r\n\r\n<!-- All the below steps should be completed before submitting your issue. Checked checkbox should look like this: [x] -->\r\n- [x] I am on the [latest](https://github.com/python-poetry/poetry/releases/latest) stable Poetry version, installed using a recommended method.\r\n- [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate.\r\n- [ ] I have consulted the [FAQ](https://python-poetry.org/docs/faq/) and [blog](https://python-poetry.org/blog/) for any relevant entries or release notes.\r\n- [ ] If an exception occurs when executing a command, I executed it again in debug mode (`-vvv` option) and have included the output below.\r\n\r\n## Issue\r\n<!-- Now feel free to write your issue, and please be as descriptive as possible! Make sure to include detailed reproduction steps. -->\r\n<!-- Thanks again \ud83d\ude4c \u2764 -->\r\n\r\nWhen using the latest version, `1.4.0` we get the following runtime error:\r\n\r\n```\r\n Installing sqlalchemy (1.4.46)\r\n\r\n AssertionError\r\n\r\n In /builds/project-0/.cache/pypoetry/artifacts/39/96/f1/0f021ed26225e8d9035c9bac5a886b024840f841af84bd33f2be466dcb/aws_psycopg2-1.3.8-py3-none-any.whl, psycopg2/_psycopg.cpython-38-aarch64-linux-gnu.so is not mentioned in RECORD\r\n\r\n at venv/lib/python3.9/site-packages/installer/sources.py:158 in get_contents\r\n 154\u2502 if item.filename[-1:] == \"/\": # looks like a directory\r\n 155\u2502 continue\r\n 156\u2502 \r\n 157\u2502 record = record_mapping.pop(item.filename, None)\r\n \u2192 158\u2502 assert record is not None, \"In {}, {} is not mentioned in RECORD\".format(\r\n 159\u2502 self._zipfile.filename,\r\n 160\u2502 item.filename,\r\n 161\u2502 ) # should not happen for valid wheels\r\n 162\u2502 \r\n```\r\n\r\nHowever, when we downgrade to `poetry==1.3.2` this problem goes away...\n", "before_files": [{"content": "from __future__ import annotations\n\nimport os\nimport platform\nimport sys\n\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING\n\nfrom installer import install\nfrom installer.destinations import SchemeDictionaryDestination\nfrom installer.sources import WheelFile\n\nfrom poetry.__version__ import __version__\nfrom poetry.utils._compat import WINDOWS\n\n\nif TYPE_CHECKING:\n from typing import BinaryIO\n\n from installer.records import RecordEntry\n from installer.scripts import LauncherKind\n from installer.utils import Scheme\n\n from poetry.utils.env import Env\n\n\nclass WheelDestination(SchemeDictionaryDestination):\n \"\"\" \"\"\"\n\n def write_to_fs(\n self,\n scheme: Scheme,\n path: Path | str,\n stream: BinaryIO,\n is_executable: bool,\n ) -> RecordEntry:\n from installer.records import Hash\n from installer.records import RecordEntry\n from installer.utils import copyfileobj_with_hashing\n from installer.utils import make_file_executable\n\n target_path = Path(self.scheme_dict[scheme]) / path\n if target_path.exists():\n # Contrary to the base library we don't raise an error\n # here since it can break namespace packages (like Poetry's)\n pass\n\n parent_folder = target_path.parent\n if not parent_folder.exists():\n # Due to the parallel installation it can happen\n # that two threads try to create the directory.\n os.makedirs(parent_folder, exist_ok=True)\n\n with open(target_path, \"wb\") as f:\n hash_, size = copyfileobj_with_hashing(stream, f, self.hash_algorithm)\n\n if is_executable:\n make_file_executable(target_path)\n\n return RecordEntry(str(path), Hash(self.hash_algorithm, hash_), size)\n\n def for_source(self, source: WheelFile) -> WheelDestination:\n scheme_dict = self.scheme_dict.copy()\n\n scheme_dict[\"headers\"] = str(Path(scheme_dict[\"headers\"]) / source.distribution)\n\n return self.__class__(\n scheme_dict,\n interpreter=self.interpreter,\n script_kind=self.script_kind,\n bytecode_optimization_levels=self.bytecode_optimization_levels,\n )\n\n\nclass WheelInstaller:\n def __init__(self, env: Env) -> None:\n self._env = env\n\n script_kind: LauncherKind\n if not WINDOWS:\n script_kind = \"posix\"\n else:\n if platform.uname()[4].startswith(\"arm\"):\n script_kind = \"win-arm64\" if sys.maxsize > 2**32 else \"win-arm\"\n else:\n script_kind = \"win-amd64\" if sys.maxsize > 2**32 else \"win-ia32\"\n\n schemes = self._env.paths\n schemes[\"headers\"] = schemes[\"include\"]\n\n self._destination = WheelDestination(\n schemes, interpreter=self._env.python, script_kind=script_kind\n )\n\n def enable_bytecode_compilation(self, enable: bool = True) -> None:\n self._destination.bytecode_optimization_levels = (1,) if enable else ()\n\n def install(self, wheel: Path) -> None:\n with WheelFile.open(Path(wheel.as_posix())) as source:\n install(\n source=source,\n destination=self._destination.for_source(source),\n # Additional metadata that is generated by the installation tool.\n additional_metadata={\n \"INSTALLER\": f\"Poetry {__version__}\".encode(),\n },\n )\n", "path": "src/poetry/installation/wheel_installer.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport os\nimport platform\nimport sys\n\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING\n\nfrom installer import install\nfrom installer.destinations import SchemeDictionaryDestination\nfrom installer.sources import WheelFile\n\nfrom poetry.__version__ import __version__\nfrom poetry.utils._compat import WINDOWS\n\n\nif TYPE_CHECKING:\n from typing import BinaryIO\n\n from installer.records import RecordEntry\n from installer.scripts import LauncherKind\n from installer.utils import Scheme\n\n from poetry.utils.env import Env\n\n\nclass WheelDestination(SchemeDictionaryDestination):\n \"\"\" \"\"\"\n\n def write_to_fs(\n self,\n scheme: Scheme,\n path: Path | str,\n stream: BinaryIO,\n is_executable: bool,\n ) -> RecordEntry:\n from installer.records import Hash\n from installer.records import RecordEntry\n from installer.utils import copyfileobj_with_hashing\n from installer.utils import make_file_executable\n\n target_path = Path(self.scheme_dict[scheme]) / path\n if target_path.exists():\n # Contrary to the base library we don't raise an error\n # here since it can break namespace packages (like Poetry's)\n pass\n\n parent_folder = target_path.parent\n if not parent_folder.exists():\n # Due to the parallel installation it can happen\n # that two threads try to create the directory.\n os.makedirs(parent_folder, exist_ok=True)\n\n with open(target_path, \"wb\") as f:\n hash_, size = copyfileobj_with_hashing(stream, f, self.hash_algorithm)\n\n if is_executable:\n make_file_executable(target_path)\n\n return RecordEntry(str(path), Hash(self.hash_algorithm, hash_), size)\n\n def for_source(self, source: WheelFile) -> WheelDestination:\n scheme_dict = self.scheme_dict.copy()\n\n scheme_dict[\"headers\"] = str(Path(scheme_dict[\"headers\"]) / source.distribution)\n\n return self.__class__(\n scheme_dict,\n interpreter=self.interpreter,\n script_kind=self.script_kind,\n bytecode_optimization_levels=self.bytecode_optimization_levels,\n )\n\n\nclass WheelInstaller:\n def __init__(self, env: Env) -> None:\n self._env = env\n\n script_kind: LauncherKind\n if not WINDOWS:\n script_kind = \"posix\"\n else:\n if platform.uname()[4].startswith(\"arm\"):\n script_kind = \"win-arm64\" if sys.maxsize > 2**32 else \"win-arm\"\n else:\n script_kind = \"win-amd64\" if sys.maxsize > 2**32 else \"win-ia32\"\n\n schemes = self._env.paths\n schemes[\"headers\"] = schemes[\"include\"]\n\n self._destination = WheelDestination(\n schemes, interpreter=self._env.python, script_kind=script_kind\n )\n\n def enable_bytecode_compilation(self, enable: bool = True) -> None:\n self._destination.bytecode_optimization_levels = (1,) if enable else ()\n\n def install(self, wheel: Path) -> None:\n with WheelFile.open(wheel) as source:\n source.validate_record()\n install(\n source=source,\n destination=self._destination.for_source(source),\n # Additional metadata that is generated by the installation tool.\n additional_metadata={\n \"INSTALLER\": f\"Poetry {__version__}\".encode(),\n },\n )\n", "path": "src/poetry/installation/wheel_installer.py"}]} | 3,748 | 149 |
gh_patches_debug_30357 | rasdani/github-patches | git_diff | canonical__snapcraft-1490 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make script aware of 407 when downloading gradle
builds.snapcraft.io rejects downloading gradle.
```
Downloading https://services.gradle.org/distributions/gradle-4.0.2-bin.zip
Exception in thread "main" java.io.IOException: Unable to tunnel through proxy. Proxy returns "HTTP/1.1 407 Proxy Authentication Required"
```
Source: https://build.snapcraft.io/user/JabRef/jabref/67152
An issue has been submitted - see https://github.com/canonical-websites/build.snapcraft.io/issues/918. I don't know, if this will be fixed or if it is easier to quickyl adapt `gradle.py`.
I assume, gradle.py has to be adapted accordingly? https://github.com/snapcore/snapcraft/blob/master/snapcraft/plugins/gradle.py#L88; if gradlew exists, execute it. If that fails with proxy 407 error (grepping stdout?), execute gradle
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `snapcraft/plugins/ant.py`
Content:
```
1 # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
2 #
3 # Copyright (C) 2015 Canonical Ltd
4 #
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License version 3 as
7 # published by the Free Software Foundation.
8 #
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
13 #
14 # You should have received a copy of the GNU General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16
17 """The ant plugin is useful for ant based parts.
18
19 The ant build system is commonly used to build Java projects.
20 The plugin requires a build.xml in the root of the source tree.
21
22 This plugin uses the common plugin keywords as well as those for "sources".
23 For more information check the 'plugins' topic for the former and the
24 'sources' topic for the latter.
25
26 Additionally, this plugin uses the following plugin-specific keywords:
27
28 - ant-properties:
29 (object)
30 A dictionary of key-value pairs. Set the following properties when
31 running ant.
32
33 - ant-build-targets:
34 (list of strings)
35 Run the given ant targets.
36 """
37
38 import glob
39 import logging
40 import os
41 from urllib.parse import urlsplit
42
43 import snapcraft
44 import snapcraft.common
45 import snapcraft.plugins.jdk
46
47
48 logger = logging.getLogger(__name__)
49
50
51 class AntPlugin(snapcraft.plugins.jdk.JdkPlugin):
52
53 @classmethod
54 def schema(cls):
55 schema = super().schema()
56 schema['properties']['ant-properties'] = {
57 'type': 'object',
58 'default': {},
59 }
60 schema['properties']['ant-build-targets'] = {
61 'type': 'array',
62 'uniqueItems': True,
63 'items': {
64 'type': 'string',
65 },
66 'default': [],
67 }
68 return schema
69
70 def __init__(self, name, options, project):
71 super().__init__(name, options, project)
72 self.build_packages.append('ant')
73
74 @classmethod
75 def get_build_properties(cls):
76 # Inform Snapcraft of the properties associated with building. If these
77 # change in the YAML Snapcraft will consider the build step dirty.
78 return ['ant-build-targets', 'ant-properties']
79
80 def build(self):
81 super().build()
82
83 command = ['ant']
84
85 if self.options.ant_build_targets:
86 command.extend(self.options.ant_build_targets)
87
88 for prop, value in self.options.ant_properties.items():
89 command.extend(['-D{}={}'.format(prop, value)])
90
91 self.run(command)
92 files = glob.glob(os.path.join(self.builddir, 'target', '*.jar'))
93 if files:
94 jardir = os.path.join(self.installdir, 'jar')
95 os.makedirs(jardir)
96 for f in files:
97 base = os.path.basename(f)
98 os.link(f, os.path.join(jardir, base))
99
100 def get_proxy_options(self, scheme):
101 proxy = os.environ.get('{}_proxy'.format(scheme))
102 if proxy:
103 parsed = urlsplit(proxy)
104 if parsed.hostname is not None:
105 yield '-D{}.proxyHost={}'.format(scheme, parsed.hostname)
106 if parsed.port is not None:
107 yield '-D{}.proxyPort={}'.format(scheme, parsed.port)
108
109 def env(self, root):
110 env = super().env(root)
111 jars = glob.glob(os.path.join(self.installdir, 'jar', '*.jar'))
112 if jars:
113 jars = [os.path.join(root, 'jar',
114 os.path.basename(x)) for x in sorted(jars)]
115 env.extend(
116 ['CLASSPATH={}:$CLASSPATH'.format(':'.join(jars))])
117 # Getting ant to use a proxy requires a little work; the JRE doesn't
118 # help as much as it should. (java.net.useSystemProxies=true ought
119 # to do the trick, but it relies on desktop configuration rather
120 # than using the standard environment variables.)
121 ant_opts = []
122 ant_opts.extend(self.get_proxy_options('http'))
123 ant_opts.extend(self.get_proxy_options('https'))
124 if ant_opts:
125 env.append("ANT_OPTS='{}'".format(
126 ' '.join(opt.replace("'", "'\\''") for opt in ant_opts)))
127 return env
128
```
Path: `snapcraft/plugins/gradle.py`
Content:
```
1 # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
2 #
3 # Copyright (C) 2016 Canonical Ltd
4 #
5 # This program is free software: you can redistribute it and/or modify
6 # it under the terms of the GNU General Public License version 3 as
7 # published by the Free Software Foundation.
8 #
9 # This program is distributed in the hope that it will be useful,
10 # but WITHOUT ANY WARRANTY; without even the implied warranty of
11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 # GNU General Public License for more details.
13 #
14 # You should have received a copy of the GNU General Public License
15 # along with this program. If not, see <http://www.gnu.org/licenses/>.
16
17 """This plugin is useful for building parts that use gradle.
18
19 The gradle build system is commonly used to build Java projects.
20 The plugin requires a pom.xml in the root of the source tree.
21
22 This plugin uses the common plugin keywords as well as those for "sources".
23 For more information check the 'plugins' topic for the former and the
24 'sources' topic for the latter.
25
26 Additionally, this plugin uses the following plugin-specific keywords:
27
28 - gradle-options:
29 (list of strings)
30 Flags to pass to the build using the gradle semantics for parameters.
31 The 'jar' option is always passed in as the last parameter.
32
33 - gradle-output-dir:
34 (string; default: 'build/libs')
35 The output directory where the resulting jar or war files from gradle[w]
36 are generated.
37 """
38
39 import glob
40 import logging
41 import os
42 import urllib.parse
43 import snapcraft
44 import snapcraft.common
45 import snapcraft.plugins.jdk
46
47
48 logger = logging.getLogger(__name__)
49
50
51 class GradlePlugin(snapcraft.plugins.jdk.JdkPlugin):
52
53 @classmethod
54 def schema(cls):
55 schema = super().schema()
56 schema['properties']['gradle-options'] = {
57 'type': 'array',
58 'minitems': 1,
59 'uniqueItems': True,
60 'items': {
61 'type': 'string',
62 },
63 'default': [],
64 }
65 schema['properties']['gradle-output-dir'] = {
66 'type': 'string',
67 'default': 'build/libs',
68 }
69
70 return schema
71
72 def __init__(self, name, options, project):
73 super().__init__(name, options, project)
74 filename = os.path.join(os.getcwd(), 'gradlew')
75 if not os.path.isfile(filename):
76 self.build_packages.append('gradle')
77 self.build_packages.append('ca-certificates-java')
78
79 @classmethod
80 def get_build_properties(cls):
81 # Inform Snapcraft of the properties associated with building. If these
82 # change in the YAML Snapcraft will consider the build step dirty.
83 return super().get_build_properties() + ['gradle-options',
84 'gradle-output-dir']
85
86 def build(self):
87 super().build()
88 filename = os.path.join(os.getcwd(), 'gradlew')
89 if os.path.isfile(filename):
90 gradle_cmd = ['./gradlew']
91 else:
92 gradle_cmd = ['gradle']
93 self.run(gradle_cmd +
94 self._get_proxy_options() +
95 self.options.gradle_options + ['jar'])
96
97 src = os.path.join(self.builddir, self.options.gradle_output_dir)
98 jarfiles = glob.glob(os.path.join(src, '*.jar'))
99 warfiles = glob.glob(os.path.join(src, '*.war'))
100
101 if len(jarfiles) > 0:
102 basedir = 'jar'
103 elif len(warfiles) > 0:
104 basedir = 'war'
105 jarfiles = warfiles
106 else:
107 raise RuntimeError("Could not find any built jar files for part")
108
109 snapcraft.file_utils.link_or_copy_tree(
110 src, os.path.join(self.installdir, basedir),
111 copy_function=lambda src, dst:
112 snapcraft.file_utils.link_or_copy(src, dst, self.installdir))
113
114 def _get_proxy_options(self):
115 # XXX This doesn't yet support username and password.
116 # -- elopio - 2016-11-17
117 proxy_options = []
118 for var in ('http', 'https'):
119 proxy = os.environ.get('{}_proxy'.format(var), False)
120 if proxy:
121 parsed_url = urllib.parse.urlparse(proxy)
122 proxy_options.append('-D{}.proxyHost={}'.format(
123 var, parsed_url.hostname))
124 if parsed_url.port:
125 proxy_options.append(
126 '-D{}.proxyPort={}'.format(var, parsed_url.port))
127 return proxy_options
128
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/snapcraft/plugins/ant.py b/snapcraft/plugins/ant.py
--- a/snapcraft/plugins/ant.py
+++ b/snapcraft/plugins/ant.py
@@ -105,6 +105,10 @@
yield '-D{}.proxyHost={}'.format(scheme, parsed.hostname)
if parsed.port is not None:
yield '-D{}.proxyPort={}'.format(scheme, parsed.port)
+ if parsed.username is not None:
+ yield '-D{}.proxyUser={}'.format(scheme, parsed.username)
+ if parsed.password is not None:
+ yield '-D{}.proxyPassword={}'.format(scheme, parsed.password)
def env(self, root):
env = super().env(root)
diff --git a/snapcraft/plugins/gradle.py b/snapcraft/plugins/gradle.py
--- a/snapcraft/plugins/gradle.py
+++ b/snapcraft/plugins/gradle.py
@@ -112,8 +112,6 @@
snapcraft.file_utils.link_or_copy(src, dst, self.installdir))
def _get_proxy_options(self):
- # XXX This doesn't yet support username and password.
- # -- elopio - 2016-11-17
proxy_options = []
for var in ('http', 'https'):
proxy = os.environ.get('{}_proxy'.format(var), False)
@@ -124,4 +122,11 @@
if parsed_url.port:
proxy_options.append(
'-D{}.proxyPort={}'.format(var, parsed_url.port))
+ if parsed_url.username:
+ proxy_options.append(
+ '-D{}.proxyUser={}'.format(var, parsed_url.username))
+ if parsed_url.password:
+ proxy_options.append(
+ '-D{}.proxyPassword={}'.format(
+ var, parsed_url.password))
return proxy_options
| {"golden_diff": "diff --git a/snapcraft/plugins/ant.py b/snapcraft/plugins/ant.py\n--- a/snapcraft/plugins/ant.py\n+++ b/snapcraft/plugins/ant.py\n@@ -105,6 +105,10 @@\n yield '-D{}.proxyHost={}'.format(scheme, parsed.hostname)\n if parsed.port is not None:\n yield '-D{}.proxyPort={}'.format(scheme, parsed.port)\n+ if parsed.username is not None:\n+ yield '-D{}.proxyUser={}'.format(scheme, parsed.username)\n+ if parsed.password is not None:\n+ yield '-D{}.proxyPassword={}'.format(scheme, parsed.password)\n \n def env(self, root):\n env = super().env(root)\ndiff --git a/snapcraft/plugins/gradle.py b/snapcraft/plugins/gradle.py\n--- a/snapcraft/plugins/gradle.py\n+++ b/snapcraft/plugins/gradle.py\n@@ -112,8 +112,6 @@\n snapcraft.file_utils.link_or_copy(src, dst, self.installdir))\n \n def _get_proxy_options(self):\n- # XXX This doesn't yet support username and password.\n- # -- elopio - 2016-11-17\n proxy_options = []\n for var in ('http', 'https'):\n proxy = os.environ.get('{}_proxy'.format(var), False)\n@@ -124,4 +122,11 @@\n if parsed_url.port:\n proxy_options.append(\n '-D{}.proxyPort={}'.format(var, parsed_url.port))\n+ if parsed_url.username:\n+ proxy_options.append(\n+ '-D{}.proxyUser={}'.format(var, parsed_url.username))\n+ if parsed_url.password:\n+ proxy_options.append(\n+ '-D{}.proxyPassword={}'.format(\n+ var, parsed_url.password))\n return proxy_options\n", "issue": "Make script aware of 407 when downloading gradle\nbuilds.snapcraft.io rejects downloading gradle.\r\n\r\n```\r\nDownloading https://services.gradle.org/distributions/gradle-4.0.2-bin.zip\r\n\r\nException in thread \"main\" java.io.IOException: Unable to tunnel through proxy. Proxy returns \"HTTP/1.1 407 Proxy Authentication Required\"\r\n```\r\n\r\nSource: https://build.snapcraft.io/user/JabRef/jabref/67152\r\n\r\nAn issue has been submitted - see https://github.com/canonical-websites/build.snapcraft.io/issues/918. I don't know, if this will be fixed or if it is easier to quickyl adapt `gradle.py`.\r\n\r\nI assume, gradle.py has to be adapted accordingly? https://github.com/snapcore/snapcraft/blob/master/snapcraft/plugins/gradle.py#L88; if gradlew exists, execute it. If that fails with proxy 407 error (grepping stdout?), execute gradle\n", "before_files": [{"content": "# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-\n#\n# Copyright (C) 2015 Canonical Ltd\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 3 as\n# published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"The ant plugin is useful for ant based parts.\n\nThe ant build system is commonly used to build Java projects.\nThe plugin requires a build.xml in the root of the source tree.\n\nThis plugin uses the common plugin keywords as well as those for \"sources\".\nFor more information check the 'plugins' topic for the former and the\n'sources' topic for the latter.\n\nAdditionally, this plugin uses the following plugin-specific keywords:\n\n - ant-properties:\n (object)\n A dictionary of key-value pairs. Set the following properties when\n running ant.\n\n - ant-build-targets:\n (list of strings)\n Run the given ant targets.\n\"\"\"\n\nimport glob\nimport logging\nimport os\nfrom urllib.parse import urlsplit\n\nimport snapcraft\nimport snapcraft.common\nimport snapcraft.plugins.jdk\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass AntPlugin(snapcraft.plugins.jdk.JdkPlugin):\n\n @classmethod\n def schema(cls):\n schema = super().schema()\n schema['properties']['ant-properties'] = {\n 'type': 'object',\n 'default': {},\n }\n schema['properties']['ant-build-targets'] = {\n 'type': 'array',\n 'uniqueItems': True,\n 'items': {\n 'type': 'string',\n },\n 'default': [],\n }\n return schema\n\n def __init__(self, name, options, project):\n super().__init__(name, options, project)\n self.build_packages.append('ant')\n\n @classmethod\n def get_build_properties(cls):\n # Inform Snapcraft of the properties associated with building. If these\n # change in the YAML Snapcraft will consider the build step dirty.\n return ['ant-build-targets', 'ant-properties']\n\n def build(self):\n super().build()\n\n command = ['ant']\n\n if self.options.ant_build_targets:\n command.extend(self.options.ant_build_targets)\n\n for prop, value in self.options.ant_properties.items():\n command.extend(['-D{}={}'.format(prop, value)])\n\n self.run(command)\n files = glob.glob(os.path.join(self.builddir, 'target', '*.jar'))\n if files:\n jardir = os.path.join(self.installdir, 'jar')\n os.makedirs(jardir)\n for f in files:\n base = os.path.basename(f)\n os.link(f, os.path.join(jardir, base))\n\n def get_proxy_options(self, scheme):\n proxy = os.environ.get('{}_proxy'.format(scheme))\n if proxy:\n parsed = urlsplit(proxy)\n if parsed.hostname is not None:\n yield '-D{}.proxyHost={}'.format(scheme, parsed.hostname)\n if parsed.port is not None:\n yield '-D{}.proxyPort={}'.format(scheme, parsed.port)\n\n def env(self, root):\n env = super().env(root)\n jars = glob.glob(os.path.join(self.installdir, 'jar', '*.jar'))\n if jars:\n jars = [os.path.join(root, 'jar',\n os.path.basename(x)) for x in sorted(jars)]\n env.extend(\n ['CLASSPATH={}:$CLASSPATH'.format(':'.join(jars))])\n # Getting ant to use a proxy requires a little work; the JRE doesn't\n # help as much as it should. (java.net.useSystemProxies=true ought\n # to do the trick, but it relies on desktop configuration rather\n # than using the standard environment variables.)\n ant_opts = []\n ant_opts.extend(self.get_proxy_options('http'))\n ant_opts.extend(self.get_proxy_options('https'))\n if ant_opts:\n env.append(\"ANT_OPTS='{}'\".format(\n ' '.join(opt.replace(\"'\", \"'\\\\''\") for opt in ant_opts)))\n return env\n", "path": "snapcraft/plugins/ant.py"}, {"content": "# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-\n#\n# Copyright (C) 2016 Canonical Ltd\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 3 as\n# published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"This plugin is useful for building parts that use gradle.\n\nThe gradle build system is commonly used to build Java projects.\nThe plugin requires a pom.xml in the root of the source tree.\n\nThis plugin uses the common plugin keywords as well as those for \"sources\".\nFor more information check the 'plugins' topic for the former and the\n'sources' topic for the latter.\n\nAdditionally, this plugin uses the following plugin-specific keywords:\n\n - gradle-options:\n (list of strings)\n Flags to pass to the build using the gradle semantics for parameters.\n The 'jar' option is always passed in as the last parameter.\n\n - gradle-output-dir:\n (string; default: 'build/libs')\n The output directory where the resulting jar or war files from gradle[w]\n are generated.\n\"\"\"\n\nimport glob\nimport logging\nimport os\nimport urllib.parse\nimport snapcraft\nimport snapcraft.common\nimport snapcraft.plugins.jdk\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass GradlePlugin(snapcraft.plugins.jdk.JdkPlugin):\n\n @classmethod\n def schema(cls):\n schema = super().schema()\n schema['properties']['gradle-options'] = {\n 'type': 'array',\n 'minitems': 1,\n 'uniqueItems': True,\n 'items': {\n 'type': 'string',\n },\n 'default': [],\n }\n schema['properties']['gradle-output-dir'] = {\n 'type': 'string',\n 'default': 'build/libs',\n }\n\n return schema\n\n def __init__(self, name, options, project):\n super().__init__(name, options, project)\n filename = os.path.join(os.getcwd(), 'gradlew')\n if not os.path.isfile(filename):\n self.build_packages.append('gradle')\n self.build_packages.append('ca-certificates-java')\n\n @classmethod\n def get_build_properties(cls):\n # Inform Snapcraft of the properties associated with building. If these\n # change in the YAML Snapcraft will consider the build step dirty.\n return super().get_build_properties() + ['gradle-options',\n 'gradle-output-dir']\n\n def build(self):\n super().build()\n filename = os.path.join(os.getcwd(), 'gradlew')\n if os.path.isfile(filename):\n gradle_cmd = ['./gradlew']\n else:\n gradle_cmd = ['gradle']\n self.run(gradle_cmd +\n self._get_proxy_options() +\n self.options.gradle_options + ['jar'])\n\n src = os.path.join(self.builddir, self.options.gradle_output_dir)\n jarfiles = glob.glob(os.path.join(src, '*.jar'))\n warfiles = glob.glob(os.path.join(src, '*.war'))\n\n if len(jarfiles) > 0:\n basedir = 'jar'\n elif len(warfiles) > 0:\n basedir = 'war'\n jarfiles = warfiles\n else:\n raise RuntimeError(\"Could not find any built jar files for part\")\n\n snapcraft.file_utils.link_or_copy_tree(\n src, os.path.join(self.installdir, basedir),\n copy_function=lambda src, dst:\n snapcraft.file_utils.link_or_copy(src, dst, self.installdir))\n\n def _get_proxy_options(self):\n # XXX This doesn't yet support username and password.\n # -- elopio - 2016-11-17\n proxy_options = []\n for var in ('http', 'https'):\n proxy = os.environ.get('{}_proxy'.format(var), False)\n if proxy:\n parsed_url = urllib.parse.urlparse(proxy)\n proxy_options.append('-D{}.proxyHost={}'.format(\n var, parsed_url.hostname))\n if parsed_url.port:\n proxy_options.append(\n '-D{}.proxyPort={}'.format(var, parsed_url.port))\n return proxy_options\n", "path": "snapcraft/plugins/gradle.py"}], "after_files": [{"content": "# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-\n#\n# Copyright (C) 2015 Canonical Ltd\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 3 as\n# published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"The ant plugin is useful for ant based parts.\n\nThe ant build system is commonly used to build Java projects.\nThe plugin requires a build.xml in the root of the source tree.\n\nThis plugin uses the common plugin keywords as well as those for \"sources\".\nFor more information check the 'plugins' topic for the former and the\n'sources' topic for the latter.\n\nAdditionally, this plugin uses the following plugin-specific keywords:\n\n - ant-properties:\n (object)\n A dictionary of key-value pairs. Set the following properties when\n running ant.\n\n - ant-build-targets:\n (list of strings)\n Run the given ant targets.\n\"\"\"\n\nimport glob\nimport logging\nimport os\nfrom urllib.parse import urlsplit\n\nimport snapcraft\nimport snapcraft.common\nimport snapcraft.plugins.jdk\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass AntPlugin(snapcraft.plugins.jdk.JdkPlugin):\n\n @classmethod\n def schema(cls):\n schema = super().schema()\n schema['properties']['ant-properties'] = {\n 'type': 'object',\n 'default': {},\n }\n schema['properties']['ant-build-targets'] = {\n 'type': 'array',\n 'uniqueItems': True,\n 'items': {\n 'type': 'string',\n },\n 'default': [],\n }\n return schema\n\n def __init__(self, name, options, project):\n super().__init__(name, options, project)\n self.build_packages.append('ant')\n\n @classmethod\n def get_build_properties(cls):\n # Inform Snapcraft of the properties associated with building. If these\n # change in the YAML Snapcraft will consider the build step dirty.\n return ['ant-build-targets', 'ant-properties']\n\n def build(self):\n super().build()\n\n command = ['ant']\n\n if self.options.ant_build_targets:\n command.extend(self.options.ant_build_targets)\n\n for prop, value in self.options.ant_properties.items():\n command.extend(['-D{}={}'.format(prop, value)])\n\n self.run(command)\n files = glob.glob(os.path.join(self.builddir, 'target', '*.jar'))\n if files:\n jardir = os.path.join(self.installdir, 'jar')\n os.makedirs(jardir)\n for f in files:\n base = os.path.basename(f)\n os.link(f, os.path.join(jardir, base))\n\n def get_proxy_options(self, scheme):\n proxy = os.environ.get('{}_proxy'.format(scheme))\n if proxy:\n parsed = urlsplit(proxy)\n if parsed.hostname is not None:\n yield '-D{}.proxyHost={}'.format(scheme, parsed.hostname)\n if parsed.port is not None:\n yield '-D{}.proxyPort={}'.format(scheme, parsed.port)\n if parsed.username is not None:\n yield '-D{}.proxyUser={}'.format(scheme, parsed.username)\n if parsed.password is not None:\n yield '-D{}.proxyPassword={}'.format(scheme, parsed.password)\n\n def env(self, root):\n env = super().env(root)\n jars = glob.glob(os.path.join(self.installdir, 'jar', '*.jar'))\n if jars:\n jars = [os.path.join(root, 'jar',\n os.path.basename(x)) for x in sorted(jars)]\n env.extend(\n ['CLASSPATH={}:$CLASSPATH'.format(':'.join(jars))])\n # Getting ant to use a proxy requires a little work; the JRE doesn't\n # help as much as it should. (java.net.useSystemProxies=true ought\n # to do the trick, but it relies on desktop configuration rather\n # than using the standard environment variables.)\n ant_opts = []\n ant_opts.extend(self.get_proxy_options('http'))\n ant_opts.extend(self.get_proxy_options('https'))\n if ant_opts:\n env.append(\"ANT_OPTS='{}'\".format(\n ' '.join(opt.replace(\"'\", \"'\\\\''\") for opt in ant_opts)))\n return env\n", "path": "snapcraft/plugins/ant.py"}, {"content": "# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-\n#\n# Copyright (C) 2016 Canonical Ltd\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 3 as\n# published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"This plugin is useful for building parts that use gradle.\n\nThe gradle build system is commonly used to build Java projects.\nThe plugin requires a pom.xml in the root of the source tree.\n\nThis plugin uses the common plugin keywords as well as those for \"sources\".\nFor more information check the 'plugins' topic for the former and the\n'sources' topic for the latter.\n\nAdditionally, this plugin uses the following plugin-specific keywords:\n\n - gradle-options:\n (list of strings)\n Flags to pass to the build using the gradle semantics for parameters.\n The 'jar' option is always passed in as the last parameter.\n\n - gradle-output-dir:\n (string; default: 'build/libs')\n The output directory where the resulting jar or war files from gradle[w]\n are generated.\n\"\"\"\n\nimport glob\nimport logging\nimport os\nimport urllib.parse\nimport snapcraft\nimport snapcraft.common\nimport snapcraft.plugins.jdk\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass GradlePlugin(snapcraft.plugins.jdk.JdkPlugin):\n\n @classmethod\n def schema(cls):\n schema = super().schema()\n schema['properties']['gradle-options'] = {\n 'type': 'array',\n 'minitems': 1,\n 'uniqueItems': True,\n 'items': {\n 'type': 'string',\n },\n 'default': [],\n }\n schema['properties']['gradle-output-dir'] = {\n 'type': 'string',\n 'default': 'build/libs',\n }\n\n return schema\n\n def __init__(self, name, options, project):\n super().__init__(name, options, project)\n filename = os.path.join(os.getcwd(), 'gradlew')\n if not os.path.isfile(filename):\n self.build_packages.append('gradle')\n self.build_packages.append('ca-certificates-java')\n\n @classmethod\n def get_build_properties(cls):\n # Inform Snapcraft of the properties associated with building. If these\n # change in the YAML Snapcraft will consider the build step dirty.\n return super().get_build_properties() + ['gradle-options',\n 'gradle-output-dir']\n\n def build(self):\n super().build()\n filename = os.path.join(os.getcwd(), 'gradlew')\n if os.path.isfile(filename):\n gradle_cmd = ['./gradlew']\n else:\n gradle_cmd = ['gradle']\n self.run(gradle_cmd +\n self._get_proxy_options() +\n self.options.gradle_options + ['jar'])\n\n src = os.path.join(self.builddir, self.options.gradle_output_dir)\n jarfiles = glob.glob(os.path.join(src, '*.jar'))\n warfiles = glob.glob(os.path.join(src, '*.war'))\n\n if len(jarfiles) > 0:\n basedir = 'jar'\n elif len(warfiles) > 0:\n basedir = 'war'\n jarfiles = warfiles\n else:\n raise RuntimeError(\"Could not find any built jar files for part\")\n\n snapcraft.file_utils.link_or_copy_tree(\n src, os.path.join(self.installdir, basedir),\n copy_function=lambda src, dst:\n snapcraft.file_utils.link_or_copy(src, dst, self.installdir))\n\n def _get_proxy_options(self):\n proxy_options = []\n for var in ('http', 'https'):\n proxy = os.environ.get('{}_proxy'.format(var), False)\n if proxy:\n parsed_url = urllib.parse.urlparse(proxy)\n proxy_options.append('-D{}.proxyHost={}'.format(\n var, parsed_url.hostname))\n if parsed_url.port:\n proxy_options.append(\n '-D{}.proxyPort={}'.format(var, parsed_url.port))\n if parsed_url.username:\n proxy_options.append(\n '-D{}.proxyUser={}'.format(var, parsed_url.username))\n if parsed_url.password:\n proxy_options.append(\n '-D{}.proxyPassword={}'.format(\n var, parsed_url.password))\n return proxy_options\n", "path": "snapcraft/plugins/gradle.py"}]} | 3,009 | 418 |
gh_patches_debug_10883 | rasdani/github-patches | git_diff | ansible-collections__community.general-4853 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
sudoers module does not handle state: absent properly
### Summary
The `community.general.sudoers` module does not handle `state: absent` properly.
If the file specified by `name` does not exist, it gets created, with possibly invalid content. As a side effect, `commands` and `user`/`group` is required, although these options do not make a lot of sense with `state: absent`.
If the file specified by `name` exists, it is (correctly) deleted. Of course, the next invocation of the play creates it again.
### Issue Type
Bug Report
### Component Name
sudoers
### Ansible Version
```console (paste below)
$ ansible --version
ansible [core 2.12.5]
config file = ...
configured module search path = [...]
ansible python module location = /usr/lib/python3.9/site-packages/ansible
ansible collection location = /usr/share/ansible/collections
executable location = /usr/lib/python-exec/python3.9/ansible
python version = 3.9.12 (main, Jun 17 2022, 16:04:33) [GCC 11.3.0]
jinja version = 3.1.2
libyaml = True
```
### Community.general Version
```console (paste below)
$ ansible-galaxy collection list community.general
# /usr/lib/python3.9/site-packages/ansible_collections
Collection Version
----------------- -------
community.general 4.8.0
```
### Configuration
```console (paste below)
$ ansible-config dump --only-changed
```
### OS / Environment
Target OS is Debian 11.
### Steps to Reproduce
```yaml (paste below)
---
- hosts: all
gather_facts: false
tasks:
- community.general.sudoers:
name: test
commands: []
user: root
state: absent
- ansible.builtin.stat:
path: /etc/sudoers.d/test
register: _stat_sudoers
- ansible.builtin.assert:
that:
- "not _stat_sudoers.stat.exists"
```
Note: `/etc/sudoers.d/test` is assumed not to exist prior to running this play.
### Expected Results
The play never fails, i.e. `/etc/sudoers.d/test` gets deleted/not created.
### Actual Results
The assertion fails on every second invocation of the play (starting with the first). The file `/etc/sudoers.d/test` gets created by the `sudoers` module.
The assertion passes on every other invocation of the play (starting with the second).
```console (paste below)
PLAY [all] **********************************************************************************************
TASK [community.general.sudoers] ************************************************************************
changed: [test]
TASK [ansible.builtin.stat] *****************************************************************************
ok: [test]
TASK [ansible.builtin.assert] ***************************************************************************
fatal: [test]: FAILED! => {
"assertion": "not _stat_sudoers.stat.exists",
"changed": false,
"evaluated_to": false,
"msg": "Assertion failed"
}
PLAY RECAP **********************************************************************************************
test : ok=2 changed=1 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/modules/system/sudoers.py`
Content:
```
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4
5 # Copyright: (c) 2019, Jon Ellis (@JonEllis) <[email protected]>
6 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
7
8 from __future__ import absolute_import, division, print_function
9 __metaclass__ = type
10
11
12 DOCUMENTATION = '''
13 ---
14 module: sudoers
15 short_description: Manage sudoers files
16 version_added: "4.3.0"
17 description:
18 - This module allows for the manipulation of sudoers files.
19 author:
20 - "Jon Ellis (@JonEllis) <[email protected]>"
21 options:
22 commands:
23 description:
24 - The commands allowed by the sudoers rule.
25 - Multiple can be added by passing a list of commands.
26 - Use C(ALL) for all commands.
27 type: list
28 elements: str
29 group:
30 description:
31 - The name of the group for the sudoers rule.
32 - This option cannot be used in conjunction with I(user).
33 type: str
34 name:
35 required: true
36 description:
37 - The name of the sudoers rule.
38 - This will be used for the filename for the sudoers file managed by this rule.
39 type: str
40 nopassword:
41 description:
42 - Whether a password will be required to run the sudo'd command.
43 default: true
44 type: bool
45 runas:
46 description:
47 - Specify the target user the command(s) will run as.
48 type: str
49 version_added: 4.7.0
50 sudoers_path:
51 description:
52 - The path which sudoers config files will be managed in.
53 default: /etc/sudoers.d
54 type: str
55 state:
56 default: "present"
57 choices:
58 - present
59 - absent
60 description:
61 - Whether the rule should exist or not.
62 type: str
63 user:
64 description:
65 - The name of the user for the sudoers rule.
66 - This option cannot be used in conjunction with I(group).
67 type: str
68 '''
69
70 EXAMPLES = '''
71 - name: Allow the backup user to sudo /usr/local/bin/backup
72 community.general.sudoers:
73 name: allow-backup
74 state: present
75 user: backup
76 commands: /usr/local/bin/backup
77
78 - name: Allow the bob user to run any commands as alice with sudo -u alice
79 community.general.sudoers:
80 name: bob-do-as-alice
81 state: present
82 user: bob
83 runas: alice
84 commands: ALL
85
86 - name: >-
87 Allow the monitoring group to run sudo /usr/local/bin/gather-app-metrics
88 without requiring a password
89 community.general.sudoers:
90 name: monitor-app
91 group: monitoring
92 commands: /usr/local/bin/gather-app-metrics
93
94 - name: >-
95 Allow the alice user to run sudo /bin/systemctl restart my-service or
96 sudo /bin/systemctl reload my-service, but a password is required
97 community.general.sudoers:
98 name: alice-service
99 user: alice
100 commands:
101 - /bin/systemctl restart my-service
102 - /bin/systemctl reload my-service
103 nopassword: false
104
105 - name: Revoke the previous sudo grants given to the alice user
106 community.general.sudoers:
107 name: alice-service
108 state: absent
109 '''
110
111 import os
112 from ansible.module_utils.basic import AnsibleModule
113 from ansible.module_utils.common.text.converters import to_native
114
115
116 class Sudoers(object):
117
118 FILE_MODE = 0o440
119
120 def __init__(self, module):
121 self.check_mode = module.check_mode
122 self.name = module.params['name']
123 self.user = module.params['user']
124 self.group = module.params['group']
125 self.state = module.params['state']
126 self.nopassword = module.params['nopassword']
127 self.runas = module.params['runas']
128 self.sudoers_path = module.params['sudoers_path']
129 self.file = os.path.join(self.sudoers_path, self.name)
130 self.commands = module.params['commands']
131
132 def write(self):
133 if self.check_mode:
134 return
135
136 with open(self.file, 'w') as f:
137 f.write(self.content())
138
139 os.chmod(self.file, self.FILE_MODE)
140
141 def delete(self):
142 if self.check_mode:
143 return
144
145 os.remove(self.file)
146
147 def exists(self):
148 return os.path.exists(self.file)
149
150 def matches(self):
151 with open(self.file, 'r') as f:
152 content_matches = f.read() == self.content()
153
154 current_mode = os.stat(self.file).st_mode & 0o777
155 mode_matches = current_mode == self.FILE_MODE
156
157 return content_matches and mode_matches
158
159 def content(self):
160 if self.user:
161 owner = self.user
162 elif self.group:
163 owner = '%{group}'.format(group=self.group)
164
165 commands_str = ', '.join(self.commands)
166 nopasswd_str = 'NOPASSWD:' if self.nopassword else ''
167 runas_str = '({runas})'.format(runas=self.runas) if self.runas is not None else ''
168 return "{owner} ALL={runas}{nopasswd} {commands}\n".format(owner=owner, runas=runas_str, nopasswd=nopasswd_str, commands=commands_str)
169
170 def run(self):
171 if self.state == 'absent' and self.exists():
172 self.delete()
173 return True
174
175 if self.exists() and self.matches():
176 return False
177
178 self.write()
179 return True
180
181
182 def main():
183 argument_spec = {
184 'commands': {
185 'type': 'list',
186 'elements': 'str',
187 },
188 'group': {},
189 'name': {
190 'required': True,
191 },
192 'nopassword': {
193 'type': 'bool',
194 'default': True,
195 },
196 'runas': {
197 'type': 'str',
198 'default': None,
199 },
200 'sudoers_path': {
201 'type': 'str',
202 'default': '/etc/sudoers.d',
203 },
204 'state': {
205 'default': 'present',
206 'choices': ['present', 'absent'],
207 },
208 'user': {},
209 }
210
211 module = AnsibleModule(
212 argument_spec=argument_spec,
213 mutually_exclusive=[['user', 'group']],
214 supports_check_mode=True,
215 required_if=[('state', 'present', ['commands'])],
216 )
217
218 sudoers = Sudoers(module)
219
220 try:
221 changed = sudoers.run()
222 module.exit_json(changed=changed)
223 except Exception as e:
224 module.fail_json(msg=to_native(e))
225
226
227 if __name__ == '__main__':
228 main()
229
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugins/modules/system/sudoers.py b/plugins/modules/system/sudoers.py
--- a/plugins/modules/system/sudoers.py
+++ b/plugins/modules/system/sudoers.py
@@ -168,9 +168,12 @@
return "{owner} ALL={runas}{nopasswd} {commands}\n".format(owner=owner, runas=runas_str, nopasswd=nopasswd_str, commands=commands_str)
def run(self):
- if self.state == 'absent' and self.exists():
- self.delete()
- return True
+ if self.state == 'absent':
+ if self.exists():
+ self.delete()
+ return True
+ else:
+ return False
if self.exists() and self.matches():
return False
| {"golden_diff": "diff --git a/plugins/modules/system/sudoers.py b/plugins/modules/system/sudoers.py\n--- a/plugins/modules/system/sudoers.py\n+++ b/plugins/modules/system/sudoers.py\n@@ -168,9 +168,12 @@\n return \"{owner} ALL={runas}{nopasswd} {commands}\\n\".format(owner=owner, runas=runas_str, nopasswd=nopasswd_str, commands=commands_str)\n \n def run(self):\n- if self.state == 'absent' and self.exists():\n- self.delete()\n- return True\n+ if self.state == 'absent':\n+ if self.exists():\n+ self.delete()\n+ return True\n+ else:\n+ return False\n \n if self.exists() and self.matches():\n return False\n", "issue": "sudoers module does not handle state: absent properly\n### Summary\n\nThe `community.general.sudoers` module does not handle `state: absent` properly.\r\nIf the file specified by `name` does not exist, it gets created, with possibly invalid content. As a side effect, `commands` and `user`/`group` is required, although these options do not make a lot of sense with `state: absent`.\r\nIf the file specified by `name` exists, it is (correctly) deleted. Of course, the next invocation of the play creates it again.\n\n### Issue Type\n\nBug Report\n\n### Component Name\n\nsudoers\n\n### Ansible Version\n\n```console (paste below)\r\n$ ansible --version\r\nansible [core 2.12.5]\r\n config file = ...\r\n configured module search path = [...]\r\n ansible python module location = /usr/lib/python3.9/site-packages/ansible\r\n ansible collection location = /usr/share/ansible/collections\r\n executable location = /usr/lib/python-exec/python3.9/ansible\r\n python version = 3.9.12 (main, Jun 17 2022, 16:04:33) [GCC 11.3.0]\r\n jinja version = 3.1.2\r\n libyaml = True\r\n```\n\n### Community.general Version\n\n```console (paste below)\r\n$ ansible-galaxy collection list community.general\r\n# /usr/lib/python3.9/site-packages/ansible_collections\r\nCollection Version\r\n----------------- -------\r\ncommunity.general 4.8.0 \r\n```\n\n### Configuration\n\n```console (paste below)\r\n$ ansible-config dump --only-changed\r\n\r\n```\r\n\n\n### OS / Environment\n\nTarget OS is Debian 11.\n\n### Steps to Reproduce\n\n```yaml (paste below)\r\n---\r\n- hosts: all\r\n gather_facts: false\r\n tasks:\r\n - community.general.sudoers:\r\n name: test\r\n commands: []\r\n user: root\r\n state: absent\r\n\r\n - ansible.builtin.stat:\r\n path: /etc/sudoers.d/test\r\n register: _stat_sudoers\r\n\r\n - ansible.builtin.assert:\r\n that:\r\n - \"not _stat_sudoers.stat.exists\"\r\n```\r\nNote: `/etc/sudoers.d/test` is assumed not to exist prior to running this play.\n\n### Expected Results\n\nThe play never fails, i.e. `/etc/sudoers.d/test` gets deleted/not created.\n\n### Actual Results\n\nThe assertion fails on every second invocation of the play (starting with the first). The file `/etc/sudoers.d/test` gets created by the `sudoers` module.\r\nThe assertion passes on every other invocation of the play (starting with the second).\r\n\r\n```console (paste below)\r\nPLAY [all] **********************************************************************************************\r\n\r\nTASK [community.general.sudoers] ************************************************************************\r\nchanged: [test]\r\n\r\nTASK [ansible.builtin.stat] *****************************************************************************\r\nok: [test]\r\n\r\nTASK [ansible.builtin.assert] ***************************************************************************\r\nfatal: [test]: FAILED! => {\r\n \"assertion\": \"not _stat_sudoers.stat.exists\",\r\n \"changed\": false,\r\n \"evaluated_to\": false,\r\n \"msg\": \"Assertion failed\"\r\n}\r\n\r\nPLAY RECAP **********************************************************************************************\r\ntest : ok=2 changed=1 unreachable=0 failed=1 skipped=0 rescued=0 ignored=0\r\n```\n\n### Code of Conduct\n\n- [X] I agree to follow the Ansible Code of Conduct\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\n# Copyright: (c) 2019, Jon Ellis (@JonEllis) <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = '''\n---\nmodule: sudoers\nshort_description: Manage sudoers files\nversion_added: \"4.3.0\"\ndescription:\n - This module allows for the manipulation of sudoers files.\nauthor:\n - \"Jon Ellis (@JonEllis) <[email protected]>\"\noptions:\n commands:\n description:\n - The commands allowed by the sudoers rule.\n - Multiple can be added by passing a list of commands.\n - Use C(ALL) for all commands.\n type: list\n elements: str\n group:\n description:\n - The name of the group for the sudoers rule.\n - This option cannot be used in conjunction with I(user).\n type: str\n name:\n required: true\n description:\n - The name of the sudoers rule.\n - This will be used for the filename for the sudoers file managed by this rule.\n type: str\n nopassword:\n description:\n - Whether a password will be required to run the sudo'd command.\n default: true\n type: bool\n runas:\n description:\n - Specify the target user the command(s) will run as.\n type: str\n version_added: 4.7.0\n sudoers_path:\n description:\n - The path which sudoers config files will be managed in.\n default: /etc/sudoers.d\n type: str\n state:\n default: \"present\"\n choices:\n - present\n - absent\n description:\n - Whether the rule should exist or not.\n type: str\n user:\n description:\n - The name of the user for the sudoers rule.\n - This option cannot be used in conjunction with I(group).\n type: str\n'''\n\nEXAMPLES = '''\n- name: Allow the backup user to sudo /usr/local/bin/backup\n community.general.sudoers:\n name: allow-backup\n state: present\n user: backup\n commands: /usr/local/bin/backup\n\n- name: Allow the bob user to run any commands as alice with sudo -u alice\n community.general.sudoers:\n name: bob-do-as-alice\n state: present\n user: bob\n runas: alice\n commands: ALL\n\n- name: >-\n Allow the monitoring group to run sudo /usr/local/bin/gather-app-metrics\n without requiring a password\n community.general.sudoers:\n name: monitor-app\n group: monitoring\n commands: /usr/local/bin/gather-app-metrics\n\n- name: >-\n Allow the alice user to run sudo /bin/systemctl restart my-service or\n sudo /bin/systemctl reload my-service, but a password is required\n community.general.sudoers:\n name: alice-service\n user: alice\n commands:\n - /bin/systemctl restart my-service\n - /bin/systemctl reload my-service\n nopassword: false\n\n- name: Revoke the previous sudo grants given to the alice user\n community.general.sudoers:\n name: alice-service\n state: absent\n'''\n\nimport os\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.common.text.converters import to_native\n\n\nclass Sudoers(object):\n\n FILE_MODE = 0o440\n\n def __init__(self, module):\n self.check_mode = module.check_mode\n self.name = module.params['name']\n self.user = module.params['user']\n self.group = module.params['group']\n self.state = module.params['state']\n self.nopassword = module.params['nopassword']\n self.runas = module.params['runas']\n self.sudoers_path = module.params['sudoers_path']\n self.file = os.path.join(self.sudoers_path, self.name)\n self.commands = module.params['commands']\n\n def write(self):\n if self.check_mode:\n return\n\n with open(self.file, 'w') as f:\n f.write(self.content())\n\n os.chmod(self.file, self.FILE_MODE)\n\n def delete(self):\n if self.check_mode:\n return\n\n os.remove(self.file)\n\n def exists(self):\n return os.path.exists(self.file)\n\n def matches(self):\n with open(self.file, 'r') as f:\n content_matches = f.read() == self.content()\n\n current_mode = os.stat(self.file).st_mode & 0o777\n mode_matches = current_mode == self.FILE_MODE\n\n return content_matches and mode_matches\n\n def content(self):\n if self.user:\n owner = self.user\n elif self.group:\n owner = '%{group}'.format(group=self.group)\n\n commands_str = ', '.join(self.commands)\n nopasswd_str = 'NOPASSWD:' if self.nopassword else ''\n runas_str = '({runas})'.format(runas=self.runas) if self.runas is not None else ''\n return \"{owner} ALL={runas}{nopasswd} {commands}\\n\".format(owner=owner, runas=runas_str, nopasswd=nopasswd_str, commands=commands_str)\n\n def run(self):\n if self.state == 'absent' and self.exists():\n self.delete()\n return True\n\n if self.exists() and self.matches():\n return False\n\n self.write()\n return True\n\n\ndef main():\n argument_spec = {\n 'commands': {\n 'type': 'list',\n 'elements': 'str',\n },\n 'group': {},\n 'name': {\n 'required': True,\n },\n 'nopassword': {\n 'type': 'bool',\n 'default': True,\n },\n 'runas': {\n 'type': 'str',\n 'default': None,\n },\n 'sudoers_path': {\n 'type': 'str',\n 'default': '/etc/sudoers.d',\n },\n 'state': {\n 'default': 'present',\n 'choices': ['present', 'absent'],\n },\n 'user': {},\n }\n\n module = AnsibleModule(\n argument_spec=argument_spec,\n mutually_exclusive=[['user', 'group']],\n supports_check_mode=True,\n required_if=[('state', 'present', ['commands'])],\n )\n\n sudoers = Sudoers(module)\n\n try:\n changed = sudoers.run()\n module.exit_json(changed=changed)\n except Exception as e:\n module.fail_json(msg=to_native(e))\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/system/sudoers.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\n# Copyright: (c) 2019, Jon Ellis (@JonEllis) <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = '''\n---\nmodule: sudoers\nshort_description: Manage sudoers files\nversion_added: \"4.3.0\"\ndescription:\n - This module allows for the manipulation of sudoers files.\nauthor:\n - \"Jon Ellis (@JonEllis) <[email protected]>\"\noptions:\n commands:\n description:\n - The commands allowed by the sudoers rule.\n - Multiple can be added by passing a list of commands.\n - Use C(ALL) for all commands.\n type: list\n elements: str\n group:\n description:\n - The name of the group for the sudoers rule.\n - This option cannot be used in conjunction with I(user).\n type: str\n name:\n required: true\n description:\n - The name of the sudoers rule.\n - This will be used for the filename for the sudoers file managed by this rule.\n type: str\n nopassword:\n description:\n - Whether a password will be required to run the sudo'd command.\n default: true\n type: bool\n runas:\n description:\n - Specify the target user the command(s) will run as.\n type: str\n version_added: 4.7.0\n sudoers_path:\n description:\n - The path which sudoers config files will be managed in.\n default: /etc/sudoers.d\n type: str\n state:\n default: \"present\"\n choices:\n - present\n - absent\n description:\n - Whether the rule should exist or not.\n type: str\n user:\n description:\n - The name of the user for the sudoers rule.\n - This option cannot be used in conjunction with I(group).\n type: str\n'''\n\nEXAMPLES = '''\n- name: Allow the backup user to sudo /usr/local/bin/backup\n community.general.sudoers:\n name: allow-backup\n state: present\n user: backup\n commands: /usr/local/bin/backup\n\n- name: Allow the bob user to run any commands as alice with sudo -u alice\n community.general.sudoers:\n name: bob-do-as-alice\n state: present\n user: bob\n runas: alice\n commands: ALL\n\n- name: >-\n Allow the monitoring group to run sudo /usr/local/bin/gather-app-metrics\n without requiring a password\n community.general.sudoers:\n name: monitor-app\n group: monitoring\n commands: /usr/local/bin/gather-app-metrics\n\n- name: >-\n Allow the alice user to run sudo /bin/systemctl restart my-service or\n sudo /bin/systemctl reload my-service, but a password is required\n community.general.sudoers:\n name: alice-service\n user: alice\n commands:\n - /bin/systemctl restart my-service\n - /bin/systemctl reload my-service\n nopassword: false\n\n- name: Revoke the previous sudo grants given to the alice user\n community.general.sudoers:\n name: alice-service\n state: absent\n'''\n\nimport os\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.common.text.converters import to_native\n\n\nclass Sudoers(object):\n\n FILE_MODE = 0o440\n\n def __init__(self, module):\n self.check_mode = module.check_mode\n self.name = module.params['name']\n self.user = module.params['user']\n self.group = module.params['group']\n self.state = module.params['state']\n self.nopassword = module.params['nopassword']\n self.runas = module.params['runas']\n self.sudoers_path = module.params['sudoers_path']\n self.file = os.path.join(self.sudoers_path, self.name)\n self.commands = module.params['commands']\n\n def write(self):\n if self.check_mode:\n return\n\n with open(self.file, 'w') as f:\n f.write(self.content())\n\n os.chmod(self.file, self.FILE_MODE)\n\n def delete(self):\n if self.check_mode:\n return\n\n os.remove(self.file)\n\n def exists(self):\n return os.path.exists(self.file)\n\n def matches(self):\n with open(self.file, 'r') as f:\n content_matches = f.read() == self.content()\n\n current_mode = os.stat(self.file).st_mode & 0o777\n mode_matches = current_mode == self.FILE_MODE\n\n return content_matches and mode_matches\n\n def content(self):\n if self.user:\n owner = self.user\n elif self.group:\n owner = '%{group}'.format(group=self.group)\n\n commands_str = ', '.join(self.commands)\n nopasswd_str = 'NOPASSWD:' if self.nopassword else ''\n runas_str = '({runas})'.format(runas=self.runas) if self.runas is not None else ''\n return \"{owner} ALL={runas}{nopasswd} {commands}\\n\".format(owner=owner, runas=runas_str, nopasswd=nopasswd_str, commands=commands_str)\n\n def run(self):\n if self.state == 'absent':\n if self.exists():\n self.delete()\n return True\n else:\n return False\n\n if self.exists() and self.matches():\n return False\n\n self.write()\n return True\n\n\ndef main():\n argument_spec = {\n 'commands': {\n 'type': 'list',\n 'elements': 'str',\n },\n 'group': {},\n 'name': {\n 'required': True,\n },\n 'nopassword': {\n 'type': 'bool',\n 'default': True,\n },\n 'runas': {\n 'type': 'str',\n 'default': None,\n },\n 'sudoers_path': {\n 'type': 'str',\n 'default': '/etc/sudoers.d',\n },\n 'state': {\n 'default': 'present',\n 'choices': ['present', 'absent'],\n },\n 'user': {},\n }\n\n module = AnsibleModule(\n argument_spec=argument_spec,\n mutually_exclusive=[['user', 'group']],\n supports_check_mode=True,\n required_if=[('state', 'present', ['commands'])],\n )\n\n sudoers = Sudoers(module)\n\n try:\n changed = sudoers.run()\n module.exit_json(changed=changed)\n except Exception as e:\n module.fail_json(msg=to_native(e))\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/system/sudoers.py"}]} | 3,103 | 178 |
gh_patches_debug_14555 | rasdani/github-patches | git_diff | inventree__InvenTree-1446 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error exporting BOM in YAML format
The error is:

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `InvenTree/part/admin.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from __future__ import unicode_literals
3
4 from django.contrib import admin
5
6 from import_export.admin import ImportExportModelAdmin
7 from import_export.resources import ModelResource
8 from import_export.fields import Field
9 import import_export.widgets as widgets
10
11 from .models import PartCategory, Part
12 from .models import PartAttachment, PartStar, PartRelated
13 from .models import BomItem
14 from .models import PartParameterTemplate, PartParameter
15 from .models import PartCategoryParameterTemplate
16 from .models import PartTestTemplate
17 from .models import PartSellPriceBreak
18
19 from InvenTree.helpers import normalize
20
21 from stock.models import StockLocation
22 from company.models import SupplierPart
23
24
25 class PartResource(ModelResource):
26 """ Class for managing Part data import/export """
27
28 # ForeignKey fields
29 category = Field(attribute='category', widget=widgets.ForeignKeyWidget(PartCategory))
30
31 default_location = Field(attribute='default_location', widget=widgets.ForeignKeyWidget(StockLocation))
32
33 default_supplier = Field(attribute='default_supplier', widget=widgets.ForeignKeyWidget(SupplierPart))
34
35 category_name = Field(attribute='category__name', readonly=True)
36
37 variant_of = Field(attribute='variant_of', widget=widgets.ForeignKeyWidget(Part))
38
39 suppliers = Field(attribute='supplier_count', readonly=True)
40
41 # Extra calculated meta-data (readonly)
42 in_stock = Field(attribute='total_stock', readonly=True, widget=widgets.IntegerWidget())
43
44 on_order = Field(attribute='on_order', readonly=True, widget=widgets.IntegerWidget())
45
46 used_in = Field(attribute='used_in_count', readonly=True, widget=widgets.IntegerWidget())
47
48 allocated = Field(attribute='allocation_count', readonly=True, widget=widgets.IntegerWidget())
49
50 building = Field(attribute='quantity_being_built', readonly=True, widget=widgets.IntegerWidget())
51
52 class Meta:
53 model = Part
54 skip_unchanged = True
55 report_skipped = False
56 clean_model_instances = True
57 exclude = [
58 'bom_checksum', 'bom_checked_by', 'bom_checked_date',
59 'lft', 'rght', 'tree_id', 'level',
60 ]
61
62 def get_queryset(self):
63 """ Prefetch related data for quicker access """
64
65 query = super().get_queryset()
66 query = query.prefetch_related(
67 'category',
68 'used_in',
69 'builds',
70 'supplier_parts__purchase_order_line_items',
71 'stock_items__allocations'
72 )
73
74 return query
75
76
77 class PartAdmin(ImportExportModelAdmin):
78
79 resource_class = PartResource
80
81 list_display = ('full_name', 'description', 'total_stock', 'category')
82
83 list_filter = ('active', 'assembly', 'is_template', 'virtual')
84
85 search_fields = ('name', 'description', 'category__name', 'category__description', 'IPN')
86
87
88 class PartCategoryResource(ModelResource):
89 """ Class for managing PartCategory data import/export """
90
91 parent = Field(attribute='parent', widget=widgets.ForeignKeyWidget(PartCategory))
92
93 parent_name = Field(attribute='parent__name', readonly=True)
94
95 default_location = Field(attribute='default_location', widget=widgets.ForeignKeyWidget(StockLocation))
96
97 class Meta:
98 model = PartCategory
99 skip_unchanged = True
100 report_skipped = False
101 clean_model_instances = True
102
103 exclude = [
104 # Exclude MPTT internal model fields
105 'lft', 'rght', 'tree_id', 'level',
106 ]
107
108 def after_import(self, dataset, result, using_transactions, dry_run, **kwargs):
109
110 super().after_import(dataset, result, using_transactions, dry_run, **kwargs)
111
112 # Rebuild the PartCategory tree(s)
113 PartCategory.objects.rebuild()
114
115
116 class PartCategoryAdmin(ImportExportModelAdmin):
117
118 resource_class = PartCategoryResource
119
120 list_display = ('name', 'pathstring', 'description')
121
122 search_fields = ('name', 'description')
123
124
125 class PartRelatedAdmin(admin.ModelAdmin):
126 ''' Class to manage PartRelated objects '''
127 pass
128
129
130 class PartAttachmentAdmin(admin.ModelAdmin):
131
132 list_display = ('part', 'attachment', 'comment')
133
134
135 class PartStarAdmin(admin.ModelAdmin):
136
137 list_display = ('part', 'user')
138
139
140 class PartTestTemplateAdmin(admin.ModelAdmin):
141
142 list_display = ('part', 'test_name', 'required')
143
144
145 class BomItemResource(ModelResource):
146 """ Class for managing BomItem data import/export """
147
148 level = Field(attribute='level', readonly=True)
149
150 bom_id = Field(attribute='pk')
151
152 # ID of the parent part
153 parent_part_id = Field(attribute='part', widget=widgets.ForeignKeyWidget(Part))
154
155 # IPN of the parent part
156 parent_part_ipn = Field(attribute='part__IPN', readonly=True)
157
158 # Name of the parent part
159 parent_part_name = Field(attribute='part__name', readonly=True)
160
161 # ID of the sub-part
162 part_id = Field(attribute='sub_part', widget=widgets.ForeignKeyWidget(Part))
163
164 # IPN of the sub-part
165 part_ipn = Field(attribute='sub_part__IPN', readonly=True)
166
167 # Name of the sub-part
168 part_name = Field(attribute='sub_part__name', readonly=True)
169
170 # Description of the sub-part
171 part_description = Field(attribute='sub_part__description', readonly=True)
172
173 # Is the sub-part itself an assembly?
174 sub_assembly = Field(attribute='sub_part__assembly', readonly=True)
175
176 def dehydrate_quantity(self, item):
177 """
178 Special consideration for the 'quantity' field on data export.
179 We do not want a spreadsheet full of "1.0000" (we'd rather "1")
180
181 Ref: https://django-import-export.readthedocs.io/en/latest/getting_started.html#advanced-data-manipulation-on-export
182 """
183 return normalize(item.quantity)
184
185 def before_export(self, queryset, *args, **kwargs):
186
187 self.is_importing = kwargs.get('importing', False)
188
189 def get_fields(self, **kwargs):
190 """
191 If we are exporting for the purposes of generating
192 a 'bom-import' template, there are some fields which
193 we are not interested in.
194 """
195
196 fields = super().get_fields(**kwargs)
197
198 # If we are not generating an "import" template,
199 # just return the complete list of fields
200 if not self.is_importing:
201 return fields
202
203 # Otherwise, remove some fields we are not interested in
204
205 idx = 0
206
207 to_remove = [
208 'level',
209 'bom_id',
210 'parent_part_id',
211 'parent_part_ipn',
212 'parent_part_name',
213 'part_description',
214 'sub_assembly'
215 ]
216
217 while idx < len(fields):
218
219 if fields[idx].column_name.lower() in to_remove:
220 del fields[idx]
221 else:
222 idx += 1
223
224 return fields
225
226 class Meta:
227 model = BomItem
228 skip_unchanged = True
229 report_skipped = False
230 clean_model_instances = True
231
232 exclude = [
233 'checksum',
234 'id',
235 'part',
236 'sub_part',
237 ]
238
239
240 class BomItemAdmin(ImportExportModelAdmin):
241
242 resource_class = BomItemResource
243
244 list_display = ('part', 'sub_part', 'quantity')
245
246 search_fields = ('part__name', 'part__description', 'sub_part__name', 'sub_part__description')
247
248
249 class ParameterTemplateAdmin(ImportExportModelAdmin):
250 list_display = ('name', 'units')
251
252
253 class ParameterResource(ModelResource):
254 """ Class for managing PartParameter data import/export """
255
256 part = Field(attribute='part', widget=widgets.ForeignKeyWidget(Part))
257
258 part_name = Field(attribute='part__name', readonly=True)
259
260 template = Field(attribute='template', widget=widgets.ForeignKeyWidget(PartParameterTemplate))
261
262 template_name = Field(attribute='template__name', readonly=True)
263
264 class Meta:
265 model = PartParameter
266 skip_unchanged = True
267 report_skipped = False
268 clean_model_instance = True
269
270
271 class ParameterAdmin(ImportExportModelAdmin):
272
273 resource_class = ParameterResource
274
275 list_display = ('part', 'template', 'data')
276
277
278 class PartCategoryParameterAdmin(admin.ModelAdmin):
279
280 pass
281
282
283 class PartSellPriceBreakAdmin(admin.ModelAdmin):
284
285 class Meta:
286 model = PartSellPriceBreak
287
288 list_display = ('part', 'quantity', 'price',)
289
290
291 admin.site.register(Part, PartAdmin)
292 admin.site.register(PartCategory, PartCategoryAdmin)
293 admin.site.register(PartRelated, PartRelatedAdmin)
294 admin.site.register(PartAttachment, PartAttachmentAdmin)
295 admin.site.register(PartStar, PartStarAdmin)
296 admin.site.register(BomItem, BomItemAdmin)
297 admin.site.register(PartParameterTemplate, ParameterTemplateAdmin)
298 admin.site.register(PartParameter, ParameterAdmin)
299 admin.site.register(PartCategoryParameterTemplate, PartCategoryParameterAdmin)
300 admin.site.register(PartTestTemplate, PartTestTemplateAdmin)
301 admin.site.register(PartSellPriceBreak, PartSellPriceBreakAdmin)
302
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/InvenTree/part/admin.py b/InvenTree/part/admin.py
--- a/InvenTree/part/admin.py
+++ b/InvenTree/part/admin.py
@@ -16,8 +16,6 @@
from .models import PartTestTemplate
from .models import PartSellPriceBreak
-from InvenTree.helpers import normalize
-
from stock.models import StockLocation
from company.models import SupplierPart
@@ -180,7 +178,7 @@
Ref: https://django-import-export.readthedocs.io/en/latest/getting_started.html#advanced-data-manipulation-on-export
"""
- return normalize(item.quantity)
+ return float(item.quantity)
def before_export(self, queryset, *args, **kwargs):
| {"golden_diff": "diff --git a/InvenTree/part/admin.py b/InvenTree/part/admin.py\n--- a/InvenTree/part/admin.py\n+++ b/InvenTree/part/admin.py\n@@ -16,8 +16,6 @@\n from .models import PartTestTemplate\n from .models import PartSellPriceBreak\n \n-from InvenTree.helpers import normalize\n-\n from stock.models import StockLocation\n from company.models import SupplierPart\n \n@@ -180,7 +178,7 @@\n \n Ref: https://django-import-export.readthedocs.io/en/latest/getting_started.html#advanced-data-manipulation-on-export\n \"\"\"\n- return normalize(item.quantity)\n+ return float(item.quantity)\n \n def before_export(self, queryset, *args, **kwargs):\n", "issue": "Error exporting BOM in YAML format\nThe error is:\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.contrib import admin\n\nfrom import_export.admin import ImportExportModelAdmin\nfrom import_export.resources import ModelResource\nfrom import_export.fields import Field\nimport import_export.widgets as widgets\n\nfrom .models import PartCategory, Part\nfrom .models import PartAttachment, PartStar, PartRelated\nfrom .models import BomItem\nfrom .models import PartParameterTemplate, PartParameter\nfrom .models import PartCategoryParameterTemplate\nfrom .models import PartTestTemplate\nfrom .models import PartSellPriceBreak\n\nfrom InvenTree.helpers import normalize\n\nfrom stock.models import StockLocation\nfrom company.models import SupplierPart\n\n\nclass PartResource(ModelResource):\n \"\"\" Class for managing Part data import/export \"\"\"\n\n # ForeignKey fields\n category = Field(attribute='category', widget=widgets.ForeignKeyWidget(PartCategory))\n \n default_location = Field(attribute='default_location', widget=widgets.ForeignKeyWidget(StockLocation))\n\n default_supplier = Field(attribute='default_supplier', widget=widgets.ForeignKeyWidget(SupplierPart))\n\n category_name = Field(attribute='category__name', readonly=True)\n \n variant_of = Field(attribute='variant_of', widget=widgets.ForeignKeyWidget(Part))\n\n suppliers = Field(attribute='supplier_count', readonly=True)\n\n # Extra calculated meta-data (readonly)\n in_stock = Field(attribute='total_stock', readonly=True, widget=widgets.IntegerWidget())\n\n on_order = Field(attribute='on_order', readonly=True, widget=widgets.IntegerWidget())\n\n used_in = Field(attribute='used_in_count', readonly=True, widget=widgets.IntegerWidget())\n\n allocated = Field(attribute='allocation_count', readonly=True, widget=widgets.IntegerWidget())\n\n building = Field(attribute='quantity_being_built', readonly=True, widget=widgets.IntegerWidget())\n\n class Meta:\n model = Part\n skip_unchanged = True\n report_skipped = False\n clean_model_instances = True\n exclude = [\n 'bom_checksum', 'bom_checked_by', 'bom_checked_date',\n 'lft', 'rght', 'tree_id', 'level',\n ]\n\n def get_queryset(self):\n \"\"\" Prefetch related data for quicker access \"\"\"\n\n query = super().get_queryset()\n query = query.prefetch_related(\n 'category',\n 'used_in',\n 'builds',\n 'supplier_parts__purchase_order_line_items',\n 'stock_items__allocations'\n )\n\n return query\n\n\nclass PartAdmin(ImportExportModelAdmin):\n \n resource_class = PartResource\n\n list_display = ('full_name', 'description', 'total_stock', 'category')\n\n list_filter = ('active', 'assembly', 'is_template', 'virtual')\n\n search_fields = ('name', 'description', 'category__name', 'category__description', 'IPN')\n\n\nclass PartCategoryResource(ModelResource):\n \"\"\" Class for managing PartCategory data import/export \"\"\"\n\n parent = Field(attribute='parent', widget=widgets.ForeignKeyWidget(PartCategory))\n\n parent_name = Field(attribute='parent__name', readonly=True)\n\n default_location = Field(attribute='default_location', widget=widgets.ForeignKeyWidget(StockLocation))\n\n class Meta:\n model = PartCategory\n skip_unchanged = True\n report_skipped = False\n clean_model_instances = True\n\n exclude = [\n # Exclude MPTT internal model fields\n 'lft', 'rght', 'tree_id', 'level',\n ]\n\n def after_import(self, dataset, result, using_transactions, dry_run, **kwargs):\n\n super().after_import(dataset, result, using_transactions, dry_run, **kwargs)\n\n # Rebuild the PartCategory tree(s)\n PartCategory.objects.rebuild()\n\n\nclass PartCategoryAdmin(ImportExportModelAdmin):\n\n resource_class = PartCategoryResource\n\n list_display = ('name', 'pathstring', 'description')\n\n search_fields = ('name', 'description')\n\n\nclass PartRelatedAdmin(admin.ModelAdmin):\n ''' Class to manage PartRelated objects '''\n pass\n\n\nclass PartAttachmentAdmin(admin.ModelAdmin):\n\n list_display = ('part', 'attachment', 'comment')\n\n\nclass PartStarAdmin(admin.ModelAdmin):\n\n list_display = ('part', 'user')\n\n\nclass PartTestTemplateAdmin(admin.ModelAdmin):\n\n list_display = ('part', 'test_name', 'required')\n\n\nclass BomItemResource(ModelResource):\n \"\"\" Class for managing BomItem data import/export \"\"\"\n\n level = Field(attribute='level', readonly=True)\n\n bom_id = Field(attribute='pk')\n\n # ID of the parent part\n parent_part_id = Field(attribute='part', widget=widgets.ForeignKeyWidget(Part))\n\n # IPN of the parent part\n parent_part_ipn = Field(attribute='part__IPN', readonly=True)\n\n # Name of the parent part\n parent_part_name = Field(attribute='part__name', readonly=True)\n\n # ID of the sub-part\n part_id = Field(attribute='sub_part', widget=widgets.ForeignKeyWidget(Part))\n\n # IPN of the sub-part\n part_ipn = Field(attribute='sub_part__IPN', readonly=True)\n\n # Name of the sub-part\n part_name = Field(attribute='sub_part__name', readonly=True)\n\n # Description of the sub-part\n part_description = Field(attribute='sub_part__description', readonly=True)\n\n # Is the sub-part itself an assembly?\n sub_assembly = Field(attribute='sub_part__assembly', readonly=True)\n\n def dehydrate_quantity(self, item):\n \"\"\"\n Special consideration for the 'quantity' field on data export.\n We do not want a spreadsheet full of \"1.0000\" (we'd rather \"1\")\n\n Ref: https://django-import-export.readthedocs.io/en/latest/getting_started.html#advanced-data-manipulation-on-export\n \"\"\"\n return normalize(item.quantity)\n\n def before_export(self, queryset, *args, **kwargs):\n\n self.is_importing = kwargs.get('importing', False)\n\n def get_fields(self, **kwargs):\n \"\"\"\n If we are exporting for the purposes of generating\n a 'bom-import' template, there are some fields which\n we are not interested in.\n \"\"\"\n\n fields = super().get_fields(**kwargs)\n\n # If we are not generating an \"import\" template,\n # just return the complete list of fields\n if not self.is_importing:\n return fields\n\n # Otherwise, remove some fields we are not interested in\n\n idx = 0\n\n to_remove = [\n 'level',\n 'bom_id',\n 'parent_part_id',\n 'parent_part_ipn',\n 'parent_part_name',\n 'part_description',\n 'sub_assembly'\n ]\n\n while idx < len(fields):\n\n if fields[idx].column_name.lower() in to_remove:\n del fields[idx]\n else:\n idx += 1\n\n return fields\n\n class Meta:\n model = BomItem\n skip_unchanged = True\n report_skipped = False\n clean_model_instances = True\n\n exclude = [\n 'checksum',\n 'id',\n 'part',\n 'sub_part',\n ]\n\n\nclass BomItemAdmin(ImportExportModelAdmin):\n\n resource_class = BomItemResource\n\n list_display = ('part', 'sub_part', 'quantity')\n\n search_fields = ('part__name', 'part__description', 'sub_part__name', 'sub_part__description')\n\n\nclass ParameterTemplateAdmin(ImportExportModelAdmin):\n list_display = ('name', 'units')\n\n\nclass ParameterResource(ModelResource):\n \"\"\" Class for managing PartParameter data import/export \"\"\"\n\n part = Field(attribute='part', widget=widgets.ForeignKeyWidget(Part))\n\n part_name = Field(attribute='part__name', readonly=True)\n\n template = Field(attribute='template', widget=widgets.ForeignKeyWidget(PartParameterTemplate))\n\n template_name = Field(attribute='template__name', readonly=True)\n\n class Meta:\n model = PartParameter\n skip_unchanged = True\n report_skipped = False\n clean_model_instance = True\n\n\nclass ParameterAdmin(ImportExportModelAdmin):\n\n resource_class = ParameterResource\n\n list_display = ('part', 'template', 'data')\n\n\nclass PartCategoryParameterAdmin(admin.ModelAdmin):\n\n pass\n\n\nclass PartSellPriceBreakAdmin(admin.ModelAdmin):\n\n class Meta:\n model = PartSellPriceBreak\n\n list_display = ('part', 'quantity', 'price',)\n\n\nadmin.site.register(Part, PartAdmin)\nadmin.site.register(PartCategory, PartCategoryAdmin)\nadmin.site.register(PartRelated, PartRelatedAdmin)\nadmin.site.register(PartAttachment, PartAttachmentAdmin)\nadmin.site.register(PartStar, PartStarAdmin)\nadmin.site.register(BomItem, BomItemAdmin)\nadmin.site.register(PartParameterTemplate, ParameterTemplateAdmin)\nadmin.site.register(PartParameter, ParameterAdmin)\nadmin.site.register(PartCategoryParameterTemplate, PartCategoryParameterAdmin)\nadmin.site.register(PartTestTemplate, PartTestTemplateAdmin)\nadmin.site.register(PartSellPriceBreak, PartSellPriceBreakAdmin)\n", "path": "InvenTree/part/admin.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.contrib import admin\n\nfrom import_export.admin import ImportExportModelAdmin\nfrom import_export.resources import ModelResource\nfrom import_export.fields import Field\nimport import_export.widgets as widgets\n\nfrom .models import PartCategory, Part\nfrom .models import PartAttachment, PartStar, PartRelated\nfrom .models import BomItem\nfrom .models import PartParameterTemplate, PartParameter\nfrom .models import PartCategoryParameterTemplate\nfrom .models import PartTestTemplate\nfrom .models import PartSellPriceBreak\n\nfrom stock.models import StockLocation\nfrom company.models import SupplierPart\n\n\nclass PartResource(ModelResource):\n \"\"\" Class for managing Part data import/export \"\"\"\n\n # ForeignKey fields\n category = Field(attribute='category', widget=widgets.ForeignKeyWidget(PartCategory))\n \n default_location = Field(attribute='default_location', widget=widgets.ForeignKeyWidget(StockLocation))\n\n default_supplier = Field(attribute='default_supplier', widget=widgets.ForeignKeyWidget(SupplierPart))\n\n category_name = Field(attribute='category__name', readonly=True)\n \n variant_of = Field(attribute='variant_of', widget=widgets.ForeignKeyWidget(Part))\n\n suppliers = Field(attribute='supplier_count', readonly=True)\n\n # Extra calculated meta-data (readonly)\n in_stock = Field(attribute='total_stock', readonly=True, widget=widgets.IntegerWidget())\n\n on_order = Field(attribute='on_order', readonly=True, widget=widgets.IntegerWidget())\n\n used_in = Field(attribute='used_in_count', readonly=True, widget=widgets.IntegerWidget())\n\n allocated = Field(attribute='allocation_count', readonly=True, widget=widgets.IntegerWidget())\n\n building = Field(attribute='quantity_being_built', readonly=True, widget=widgets.IntegerWidget())\n\n class Meta:\n model = Part\n skip_unchanged = True\n report_skipped = False\n clean_model_instances = True\n exclude = [\n 'bom_checksum', 'bom_checked_by', 'bom_checked_date',\n 'lft', 'rght', 'tree_id', 'level',\n ]\n\n def get_queryset(self):\n \"\"\" Prefetch related data for quicker access \"\"\"\n\n query = super().get_queryset()\n query = query.prefetch_related(\n 'category',\n 'used_in',\n 'builds',\n 'supplier_parts__purchase_order_line_items',\n 'stock_items__allocations'\n )\n\n return query\n\n\nclass PartAdmin(ImportExportModelAdmin):\n \n resource_class = PartResource\n\n list_display = ('full_name', 'description', 'total_stock', 'category')\n\n list_filter = ('active', 'assembly', 'is_template', 'virtual')\n\n search_fields = ('name', 'description', 'category__name', 'category__description', 'IPN')\n\n\nclass PartCategoryResource(ModelResource):\n \"\"\" Class for managing PartCategory data import/export \"\"\"\n\n parent = Field(attribute='parent', widget=widgets.ForeignKeyWidget(PartCategory))\n\n parent_name = Field(attribute='parent__name', readonly=True)\n\n default_location = Field(attribute='default_location', widget=widgets.ForeignKeyWidget(StockLocation))\n\n class Meta:\n model = PartCategory\n skip_unchanged = True\n report_skipped = False\n clean_model_instances = True\n\n exclude = [\n # Exclude MPTT internal model fields\n 'lft', 'rght', 'tree_id', 'level',\n ]\n\n def after_import(self, dataset, result, using_transactions, dry_run, **kwargs):\n\n super().after_import(dataset, result, using_transactions, dry_run, **kwargs)\n\n # Rebuild the PartCategory tree(s)\n PartCategory.objects.rebuild()\n\n\nclass PartCategoryAdmin(ImportExportModelAdmin):\n\n resource_class = PartCategoryResource\n\n list_display = ('name', 'pathstring', 'description')\n\n search_fields = ('name', 'description')\n\n\nclass PartRelatedAdmin(admin.ModelAdmin):\n ''' Class to manage PartRelated objects '''\n pass\n\n\nclass PartAttachmentAdmin(admin.ModelAdmin):\n\n list_display = ('part', 'attachment', 'comment')\n\n\nclass PartStarAdmin(admin.ModelAdmin):\n\n list_display = ('part', 'user')\n\n\nclass PartTestTemplateAdmin(admin.ModelAdmin):\n\n list_display = ('part', 'test_name', 'required')\n\n\nclass BomItemResource(ModelResource):\n \"\"\" Class for managing BomItem data import/export \"\"\"\n\n level = Field(attribute='level', readonly=True)\n\n bom_id = Field(attribute='pk')\n\n # ID of the parent part\n parent_part_id = Field(attribute='part', widget=widgets.ForeignKeyWidget(Part))\n\n # IPN of the parent part\n parent_part_ipn = Field(attribute='part__IPN', readonly=True)\n\n # Name of the parent part\n parent_part_name = Field(attribute='part__name', readonly=True)\n\n # ID of the sub-part\n part_id = Field(attribute='sub_part', widget=widgets.ForeignKeyWidget(Part))\n\n # IPN of the sub-part\n part_ipn = Field(attribute='sub_part__IPN', readonly=True)\n\n # Name of the sub-part\n part_name = Field(attribute='sub_part__name', readonly=True)\n\n # Description of the sub-part\n part_description = Field(attribute='sub_part__description', readonly=True)\n\n # Is the sub-part itself an assembly?\n sub_assembly = Field(attribute='sub_part__assembly', readonly=True)\n\n def dehydrate_quantity(self, item):\n \"\"\"\n Special consideration for the 'quantity' field on data export.\n We do not want a spreadsheet full of \"1.0000\" (we'd rather \"1\")\n\n Ref: https://django-import-export.readthedocs.io/en/latest/getting_started.html#advanced-data-manipulation-on-export\n \"\"\"\n return float(item.quantity)\n\n def before_export(self, queryset, *args, **kwargs):\n\n self.is_importing = kwargs.get('importing', False)\n\n def get_fields(self, **kwargs):\n \"\"\"\n If we are exporting for the purposes of generating\n a 'bom-import' template, there are some fields which\n we are not interested in.\n \"\"\"\n\n fields = super().get_fields(**kwargs)\n\n # If we are not generating an \"import\" template,\n # just return the complete list of fields\n if not self.is_importing:\n return fields\n\n # Otherwise, remove some fields we are not interested in\n\n idx = 0\n\n to_remove = [\n 'level',\n 'bom_id',\n 'parent_part_id',\n 'parent_part_ipn',\n 'parent_part_name',\n 'part_description',\n 'sub_assembly'\n ]\n\n while idx < len(fields):\n\n if fields[idx].column_name.lower() in to_remove:\n del fields[idx]\n else:\n idx += 1\n\n return fields\n\n class Meta:\n model = BomItem\n skip_unchanged = True\n report_skipped = False\n clean_model_instances = True\n\n exclude = [\n 'checksum',\n 'id',\n 'part',\n 'sub_part',\n ]\n\n\nclass BomItemAdmin(ImportExportModelAdmin):\n\n resource_class = BomItemResource\n\n list_display = ('part', 'sub_part', 'quantity')\n\n search_fields = ('part__name', 'part__description', 'sub_part__name', 'sub_part__description')\n\n\nclass ParameterTemplateAdmin(ImportExportModelAdmin):\n list_display = ('name', 'units')\n\n\nclass ParameterResource(ModelResource):\n \"\"\" Class for managing PartParameter data import/export \"\"\"\n\n part = Field(attribute='part', widget=widgets.ForeignKeyWidget(Part))\n\n part_name = Field(attribute='part__name', readonly=True)\n\n template = Field(attribute='template', widget=widgets.ForeignKeyWidget(PartParameterTemplate))\n\n template_name = Field(attribute='template__name', readonly=True)\n\n class Meta:\n model = PartParameter\n skip_unchanged = True\n report_skipped = False\n clean_model_instance = True\n\n\nclass ParameterAdmin(ImportExportModelAdmin):\n\n resource_class = ParameterResource\n\n list_display = ('part', 'template', 'data')\n\n\nclass PartCategoryParameterAdmin(admin.ModelAdmin):\n\n pass\n\n\nclass PartSellPriceBreakAdmin(admin.ModelAdmin):\n\n class Meta:\n model = PartSellPriceBreak\n\n list_display = ('part', 'quantity', 'price',)\n\n\nadmin.site.register(Part, PartAdmin)\nadmin.site.register(PartCategory, PartCategoryAdmin)\nadmin.site.register(PartRelated, PartRelatedAdmin)\nadmin.site.register(PartAttachment, PartAttachmentAdmin)\nadmin.site.register(PartStar, PartStarAdmin)\nadmin.site.register(BomItem, BomItemAdmin)\nadmin.site.register(PartParameterTemplate, ParameterTemplateAdmin)\nadmin.site.register(PartParameter, ParameterAdmin)\nadmin.site.register(PartCategoryParameterTemplate, PartCategoryParameterAdmin)\nadmin.site.register(PartTestTemplate, PartTestTemplateAdmin)\nadmin.site.register(PartSellPriceBreak, PartSellPriceBreakAdmin)\n", "path": "InvenTree/part/admin.py"}]} | 3,119 | 165 |
gh_patches_debug_8190 | rasdani/github-patches | git_diff | microsoft__presidio-782 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
typo in append_analysis_explenation_text
It seems that there is a typo in the function name.
append_analysis_expl**e**nation_text - > append_analysis_expl**a**nation_text
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `presidio-analyzer/presidio_analyzer/recognizer_result.py`
Content:
```
1 import logging
2 from typing import Dict
3
4 from presidio_analyzer import AnalysisExplanation
5
6
7 class RecognizerResult:
8 """
9 Recognizer Result represents the findings of the detected entity.
10
11 Result of a recognizer analyzing the text.
12
13 :param entity_type: the type of the entity
14 :param start: the start location of the detected entity
15 :param end: the end location of the detected entity
16 :param score: the score of the detection
17 :param analysis_explanation: contains the explanation of why this
18 entity was identified
19 """
20
21 logger = logging.getLogger("presidio-analyzer")
22
23 def __init__(
24 self,
25 entity_type: str,
26 start: int,
27 end: int,
28 score: float,
29 analysis_explanation: AnalysisExplanation = None,
30 ):
31
32 self.entity_type = entity_type
33 self.start = start
34 self.end = end
35 self.score = score
36 self.analysis_explanation = analysis_explanation
37
38 def append_analysis_explenation_text(self, text: str) -> None:
39 """Add text to the analysis explanation."""
40 if self.analysis_explanation:
41 self.analysis_explanation.append_textual_explanation_line(text)
42
43 def to_dict(self) -> Dict:
44 """
45 Serialize self to dictionary.
46
47 :return: a dictionary
48 """
49 return self.__dict__
50
51 @classmethod
52 def from_json(cls, data: Dict) -> "RecognizerResult":
53 """
54 Create RecognizerResult from json.
55
56 :param data: e.g. {
57 "start": 24,
58 "end": 32,
59 "score": 0.8,
60 "entity_type": "NAME"
61 }
62 :return: RecognizerResult
63 """
64 score = data.get("score")
65 entity_type = data.get("entity_type")
66 start = data.get("start")
67 end = data.get("end")
68 return cls(entity_type, start, end, score)
69
70 def __repr__(self) -> str:
71 """Return a string representation of the instance."""
72 return self.__str__()
73
74 def intersects(self, other: "RecognizerResult") -> int:
75 """
76 Check if self intersects with a different RecognizerResult.
77
78 :return: If intersecting, returns the number of
79 intersecting characters.
80 If not, returns 0
81 """
82 # if they do not overlap the intersection is 0
83 if self.end < other.start or other.end < self.start:
84 return 0
85
86 # otherwise the intersection is min(end) - max(start)
87 return min(self.end, other.end) - max(self.start, other.start)
88
89 def contained_in(self, other: "RecognizerResult") -> bool:
90 """
91 Check if self is contained in a different RecognizerResult.
92
93 :return: true if contained
94 """
95 return self.start >= other.start and self.end <= other.end
96
97 def contains(self, other: "RecognizerResult") -> bool:
98 """
99 Check if one result is contained or equal to another result.
100
101 :param other: another RecognizerResult
102 :return: bool
103 """
104 return self.start <= other.start and self.end >= other.end
105
106 def equal_indices(self, other: "RecognizerResult") -> bool:
107 """
108 Check if the indices are equal between two results.
109
110 :param other: another RecognizerResult
111 :return:
112 """
113 return self.start == other.start and self.end == other.end
114
115 def __gt__(self, other: "RecognizerResult") -> bool:
116 """
117 Check if one result is greater by using the results indices in the text.
118
119 :param other: another RecognizerResult
120 :return: bool
121 """
122 if self.start == other.start:
123 return self.end > other.end
124 return self.start > other.start
125
126 def __eq__(self, other: "RecognizerResult") -> bool:
127 """
128 Check two results are equal by using all class fields.
129
130 :param other: another RecognizerResult
131 :return: bool
132 """
133 equal_type = self.entity_type == other.entity_type
134 equal_score = self.score == other.score
135 return self.equal_indices(other) and equal_type and equal_score
136
137 def __hash__(self):
138 """
139 Hash the result data by using all class fields.
140
141 :return: int
142 """
143 return hash(
144 f"{str(self.start)} {str(self.end)} {str(self.score)} {self.entity_type}"
145 )
146
147 def __str__(self) -> str:
148 """Return a string representation of the instance."""
149 return (
150 f"type: {self.entity_type}, "
151 f"start: {self.start}, "
152 f"end: {self.end}, "
153 f"score: {self.score}"
154 )
155
156 def has_conflict(self, other: "RecognizerResult") -> bool:
157 """
158 Check if two recognizer results are conflicted or not.
159
160 I have a conflict if:
161 1. My indices are the same as the other and my score is lower.
162 2. If my indices are contained in another.
163
164 :param other: RecognizerResult
165 :return:
166 """
167 if self.equal_indices(other):
168 return self.score <= other.score
169 return other.contains(self)
170
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/presidio-analyzer/presidio_analyzer/recognizer_result.py b/presidio-analyzer/presidio_analyzer/recognizer_result.py
--- a/presidio-analyzer/presidio_analyzer/recognizer_result.py
+++ b/presidio-analyzer/presidio_analyzer/recognizer_result.py
@@ -35,7 +35,7 @@
self.score = score
self.analysis_explanation = analysis_explanation
- def append_analysis_explenation_text(self, text: str) -> None:
+ def append_analysis_explanation_text(self, text: str) -> None:
"""Add text to the analysis explanation."""
if self.analysis_explanation:
self.analysis_explanation.append_textual_explanation_line(text)
| {"golden_diff": "diff --git a/presidio-analyzer/presidio_analyzer/recognizer_result.py b/presidio-analyzer/presidio_analyzer/recognizer_result.py\n--- a/presidio-analyzer/presidio_analyzer/recognizer_result.py\n+++ b/presidio-analyzer/presidio_analyzer/recognizer_result.py\n@@ -35,7 +35,7 @@\n self.score = score\n self.analysis_explanation = analysis_explanation\n \n- def append_analysis_explenation_text(self, text: str) -> None:\n+ def append_analysis_explanation_text(self, text: str) -> None:\n \"\"\"Add text to the analysis explanation.\"\"\"\n if self.analysis_explanation:\n self.analysis_explanation.append_textual_explanation_line(text)\n", "issue": "typo in append_analysis_explenation_text\nIt seems that there is a typo in the function name.\r\n\r\nappend_analysis_expl**e**nation_text - > append_analysis_expl**a**nation_text\n", "before_files": [{"content": "import logging\nfrom typing import Dict\n\nfrom presidio_analyzer import AnalysisExplanation\n\n\nclass RecognizerResult:\n \"\"\"\n Recognizer Result represents the findings of the detected entity.\n\n Result of a recognizer analyzing the text.\n\n :param entity_type: the type of the entity\n :param start: the start location of the detected entity\n :param end: the end location of the detected entity\n :param score: the score of the detection\n :param analysis_explanation: contains the explanation of why this\n entity was identified\n \"\"\"\n\n logger = logging.getLogger(\"presidio-analyzer\")\n\n def __init__(\n self,\n entity_type: str,\n start: int,\n end: int,\n score: float,\n analysis_explanation: AnalysisExplanation = None,\n ):\n\n self.entity_type = entity_type\n self.start = start\n self.end = end\n self.score = score\n self.analysis_explanation = analysis_explanation\n\n def append_analysis_explenation_text(self, text: str) -> None:\n \"\"\"Add text to the analysis explanation.\"\"\"\n if self.analysis_explanation:\n self.analysis_explanation.append_textual_explanation_line(text)\n\n def to_dict(self) -> Dict:\n \"\"\"\n Serialize self to dictionary.\n\n :return: a dictionary\n \"\"\"\n return self.__dict__\n\n @classmethod\n def from_json(cls, data: Dict) -> \"RecognizerResult\":\n \"\"\"\n Create RecognizerResult from json.\n\n :param data: e.g. {\n \"start\": 24,\n \"end\": 32,\n \"score\": 0.8,\n \"entity_type\": \"NAME\"\n }\n :return: RecognizerResult\n \"\"\"\n score = data.get(\"score\")\n entity_type = data.get(\"entity_type\")\n start = data.get(\"start\")\n end = data.get(\"end\")\n return cls(entity_type, start, end, score)\n\n def __repr__(self) -> str:\n \"\"\"Return a string representation of the instance.\"\"\"\n return self.__str__()\n\n def intersects(self, other: \"RecognizerResult\") -> int:\n \"\"\"\n Check if self intersects with a different RecognizerResult.\n\n :return: If intersecting, returns the number of\n intersecting characters.\n If not, returns 0\n \"\"\"\n # if they do not overlap the intersection is 0\n if self.end < other.start or other.end < self.start:\n return 0\n\n # otherwise the intersection is min(end) - max(start)\n return min(self.end, other.end) - max(self.start, other.start)\n\n def contained_in(self, other: \"RecognizerResult\") -> bool:\n \"\"\"\n Check if self is contained in a different RecognizerResult.\n\n :return: true if contained\n \"\"\"\n return self.start >= other.start and self.end <= other.end\n\n def contains(self, other: \"RecognizerResult\") -> bool:\n \"\"\"\n Check if one result is contained or equal to another result.\n\n :param other: another RecognizerResult\n :return: bool\n \"\"\"\n return self.start <= other.start and self.end >= other.end\n\n def equal_indices(self, other: \"RecognizerResult\") -> bool:\n \"\"\"\n Check if the indices are equal between two results.\n\n :param other: another RecognizerResult\n :return:\n \"\"\"\n return self.start == other.start and self.end == other.end\n\n def __gt__(self, other: \"RecognizerResult\") -> bool:\n \"\"\"\n Check if one result is greater by using the results indices in the text.\n\n :param other: another RecognizerResult\n :return: bool\n \"\"\"\n if self.start == other.start:\n return self.end > other.end\n return self.start > other.start\n\n def __eq__(self, other: \"RecognizerResult\") -> bool:\n \"\"\"\n Check two results are equal by using all class fields.\n\n :param other: another RecognizerResult\n :return: bool\n \"\"\"\n equal_type = self.entity_type == other.entity_type\n equal_score = self.score == other.score\n return self.equal_indices(other) and equal_type and equal_score\n\n def __hash__(self):\n \"\"\"\n Hash the result data by using all class fields.\n\n :return: int\n \"\"\"\n return hash(\n f\"{str(self.start)} {str(self.end)} {str(self.score)} {self.entity_type}\"\n )\n\n def __str__(self) -> str:\n \"\"\"Return a string representation of the instance.\"\"\"\n return (\n f\"type: {self.entity_type}, \"\n f\"start: {self.start}, \"\n f\"end: {self.end}, \"\n f\"score: {self.score}\"\n )\n\n def has_conflict(self, other: \"RecognizerResult\") -> bool:\n \"\"\"\n Check if two recognizer results are conflicted or not.\n\n I have a conflict if:\n 1. My indices are the same as the other and my score is lower.\n 2. If my indices are contained in another.\n\n :param other: RecognizerResult\n :return:\n \"\"\"\n if self.equal_indices(other):\n return self.score <= other.score\n return other.contains(self)\n", "path": "presidio-analyzer/presidio_analyzer/recognizer_result.py"}], "after_files": [{"content": "import logging\nfrom typing import Dict\n\nfrom presidio_analyzer import AnalysisExplanation\n\n\nclass RecognizerResult:\n \"\"\"\n Recognizer Result represents the findings of the detected entity.\n\n Result of a recognizer analyzing the text.\n\n :param entity_type: the type of the entity\n :param start: the start location of the detected entity\n :param end: the end location of the detected entity\n :param score: the score of the detection\n :param analysis_explanation: contains the explanation of why this\n entity was identified\n \"\"\"\n\n logger = logging.getLogger(\"presidio-analyzer\")\n\n def __init__(\n self,\n entity_type: str,\n start: int,\n end: int,\n score: float,\n analysis_explanation: AnalysisExplanation = None,\n ):\n\n self.entity_type = entity_type\n self.start = start\n self.end = end\n self.score = score\n self.analysis_explanation = analysis_explanation\n\n def append_analysis_explanation_text(self, text: str) -> None:\n \"\"\"Add text to the analysis explanation.\"\"\"\n if self.analysis_explanation:\n self.analysis_explanation.append_textual_explanation_line(text)\n\n def to_dict(self) -> Dict:\n \"\"\"\n Serialize self to dictionary.\n\n :return: a dictionary\n \"\"\"\n return self.__dict__\n\n @classmethod\n def from_json(cls, data: Dict) -> \"RecognizerResult\":\n \"\"\"\n Create RecognizerResult from json.\n\n :param data: e.g. {\n \"start\": 24,\n \"end\": 32,\n \"score\": 0.8,\n \"entity_type\": \"NAME\"\n }\n :return: RecognizerResult\n \"\"\"\n score = data.get(\"score\")\n entity_type = data.get(\"entity_type\")\n start = data.get(\"start\")\n end = data.get(\"end\")\n return cls(entity_type, start, end, score)\n\n def __repr__(self) -> str:\n \"\"\"Return a string representation of the instance.\"\"\"\n return self.__str__()\n\n def intersects(self, other: \"RecognizerResult\") -> int:\n \"\"\"\n Check if self intersects with a different RecognizerResult.\n\n :return: If intersecting, returns the number of\n intersecting characters.\n If not, returns 0\n \"\"\"\n # if they do not overlap the intersection is 0\n if self.end < other.start or other.end < self.start:\n return 0\n\n # otherwise the intersection is min(end) - max(start)\n return min(self.end, other.end) - max(self.start, other.start)\n\n def contained_in(self, other: \"RecognizerResult\") -> bool:\n \"\"\"\n Check if self is contained in a different RecognizerResult.\n\n :return: true if contained\n \"\"\"\n return self.start >= other.start and self.end <= other.end\n\n def contains(self, other: \"RecognizerResult\") -> bool:\n \"\"\"\n Check if one result is contained or equal to another result.\n\n :param other: another RecognizerResult\n :return: bool\n \"\"\"\n return self.start <= other.start and self.end >= other.end\n\n def equal_indices(self, other: \"RecognizerResult\") -> bool:\n \"\"\"\n Check if the indices are equal between two results.\n\n :param other: another RecognizerResult\n :return:\n \"\"\"\n return self.start == other.start and self.end == other.end\n\n def __gt__(self, other: \"RecognizerResult\") -> bool:\n \"\"\"\n Check if one result is greater by using the results indices in the text.\n\n :param other: another RecognizerResult\n :return: bool\n \"\"\"\n if self.start == other.start:\n return self.end > other.end\n return self.start > other.start\n\n def __eq__(self, other: \"RecognizerResult\") -> bool:\n \"\"\"\n Check two results are equal by using all class fields.\n\n :param other: another RecognizerResult\n :return: bool\n \"\"\"\n equal_type = self.entity_type == other.entity_type\n equal_score = self.score == other.score\n return self.equal_indices(other) and equal_type and equal_score\n\n def __hash__(self):\n \"\"\"\n Hash the result data by using all class fields.\n\n :return: int\n \"\"\"\n return hash(\n f\"{str(self.start)} {str(self.end)} {str(self.score)} {self.entity_type}\"\n )\n\n def __str__(self) -> str:\n \"\"\"Return a string representation of the instance.\"\"\"\n return (\n f\"type: {self.entity_type}, \"\n f\"start: {self.start}, \"\n f\"end: {self.end}, \"\n f\"score: {self.score}\"\n )\n\n def has_conflict(self, other: \"RecognizerResult\") -> bool:\n \"\"\"\n Check if two recognizer results are conflicted or not.\n\n I have a conflict if:\n 1. My indices are the same as the other and my score is lower.\n 2. If my indices are contained in another.\n\n :param other: RecognizerResult\n :return:\n \"\"\"\n if self.equal_indices(other):\n return self.score <= other.score\n return other.contains(self)\n", "path": "presidio-analyzer/presidio_analyzer/recognizer_result.py"}]} | 1,860 | 172 |
gh_patches_debug_12255 | rasdani/github-patches | git_diff | pypa__pip-5921 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add pre-released package upgrade hints
Before this commit when querying for an installed package using pip if
the package that was returned contained a pre-released version of the
package it was not intuitive that the pre-released version of the
package could not be directly upgraded to without the `--pre` flag.
This commit shows additional information on how to upgrade to
pre-released versions of a package if one is found in the search.
Fixes #5169.
<!---
Thank you for your soon to be pull request. Before you submit this, please
double check to make sure that you've added a news file fragment. In pip we
generate our NEWS.rst from multiple news fragment files, and all pull requests
require either a news file fragment or a marker to indicate they don't require
one.
To read more about adding a news file fragment for your PR, please check out
our documentation at: https://pip.pypa.io/en/latest/development/#adding-a-news-entry
-->
Display pre-release version hint in package version output of "pip search" (and others)
* Pip version: pip 9.0.3 from c:\python36\lib\site-packages (python 3.6)
* Python version: Python 3.6.2
* Operating system: Windows 10
### Description:
In #5168 I learned that version numbers like "2018.04.04a" are interpreted as pre-release versions. Therefore I could not install them without adding the parameter `--pre`.
However, this is _implicit_ knowledge the user must have about interpretations of version numbers and related pip behavior in order to understand the behavior of pip. Otherwise, the user is irritated, just like I was until a couple of minutes ago.
Current situation where 2018.04.04a is not installable when `--pre` is not given:
```
C:\Users\karl.voit>pip search filetags
filetags (2018.04.04a) - Management of simple tags within file names
INSTALLED: 2018.3.19
LATEST: 2018.04.04a
C:\Users\karl.voit>
```
Shouldn't there at least be a hint such as following suggestion?
```
C:\Users\karl.voit>pip search filetags
filetags (2018.04.04a) - Management of simple tags within file names
INSTALLED: 2018.3.19
LATEST: 2018.04.04a (pre-release; install with "pip install --pre")
C:\Users\karl.voit>
```
Maybe this applies for multiple stdout messages.
### What I've run:
See #5168 to give you some background.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pip/_internal/commands/search.py`
Content:
```
1 from __future__ import absolute_import
2
3 import logging
4 import sys
5 import textwrap
6 from collections import OrderedDict
7
8 from pip._vendor import pkg_resources
9 from pip._vendor.packaging.version import parse as parse_version
10 # NOTE: XMLRPC Client is not annotated in typeshed as on 2017-07-17, which is
11 # why we ignore the type on this import
12 from pip._vendor.six.moves import xmlrpc_client # type: ignore
13
14 from pip._internal.cli.base_command import Command
15 from pip._internal.cli.status_codes import NO_MATCHES_FOUND, SUCCESS
16 from pip._internal.download import PipXmlrpcTransport
17 from pip._internal.exceptions import CommandError
18 from pip._internal.models.index import PyPI
19 from pip._internal.utils.compat import get_terminal_size
20 from pip._internal.utils.logging import indent_log
21
22 logger = logging.getLogger(__name__)
23
24
25 class SearchCommand(Command):
26 """Search for PyPI packages whose name or summary contains <query>."""
27 name = 'search'
28 usage = """
29 %prog [options] <query>"""
30 summary = 'Search PyPI for packages.'
31 ignore_require_venv = True
32
33 def __init__(self, *args, **kw):
34 super(SearchCommand, self).__init__(*args, **kw)
35 self.cmd_opts.add_option(
36 '-i', '--index',
37 dest='index',
38 metavar='URL',
39 default=PyPI.pypi_url,
40 help='Base URL of Python Package Index (default %default)')
41
42 self.parser.insert_option_group(0, self.cmd_opts)
43
44 def run(self, options, args):
45 if not args:
46 raise CommandError('Missing required argument (search query).')
47 query = args
48 pypi_hits = self.search(query, options)
49 hits = transform_hits(pypi_hits)
50
51 terminal_width = None
52 if sys.stdout.isatty():
53 terminal_width = get_terminal_size()[0]
54
55 print_results(hits, terminal_width=terminal_width)
56 if pypi_hits:
57 return SUCCESS
58 return NO_MATCHES_FOUND
59
60 def search(self, query, options):
61 index_url = options.index
62 with self._build_session(options) as session:
63 transport = PipXmlrpcTransport(index_url, session)
64 pypi = xmlrpc_client.ServerProxy(index_url, transport)
65 hits = pypi.search({'name': query, 'summary': query}, 'or')
66 return hits
67
68
69 def transform_hits(hits):
70 """
71 The list from pypi is really a list of versions. We want a list of
72 packages with the list of versions stored inline. This converts the
73 list from pypi into one we can use.
74 """
75 packages = OrderedDict()
76 for hit in hits:
77 name = hit['name']
78 summary = hit['summary']
79 version = hit['version']
80
81 if name not in packages.keys():
82 packages[name] = {
83 'name': name,
84 'summary': summary,
85 'versions': [version],
86 }
87 else:
88 packages[name]['versions'].append(version)
89
90 # if this is the highest version, replace summary and score
91 if version == highest_version(packages[name]['versions']):
92 packages[name]['summary'] = summary
93
94 return list(packages.values())
95
96
97 def print_results(hits, name_column_width=None, terminal_width=None):
98 if not hits:
99 return
100 if name_column_width is None:
101 name_column_width = max([
102 len(hit['name']) + len(highest_version(hit.get('versions', ['-'])))
103 for hit in hits
104 ]) + 4
105
106 installed_packages = [p.project_name for p in pkg_resources.working_set]
107 for hit in hits:
108 name = hit['name']
109 summary = hit['summary'] or ''
110 latest = highest_version(hit.get('versions', ['-']))
111 if terminal_width is not None:
112 target_width = terminal_width - name_column_width - 5
113 if target_width > 10:
114 # wrap and indent summary to fit terminal
115 summary = textwrap.wrap(summary, target_width)
116 summary = ('\n' + ' ' * (name_column_width + 3)).join(summary)
117
118 line = '%-*s - %s' % (name_column_width,
119 '%s (%s)' % (name, latest), summary)
120 try:
121 logger.info(line)
122 if name in installed_packages:
123 dist = pkg_resources.get_distribution(name)
124 with indent_log():
125 if dist.version == latest:
126 logger.info('INSTALLED: %s (latest)', dist.version)
127 else:
128 logger.info('INSTALLED: %s', dist.version)
129 logger.info('LATEST: %s', latest)
130 except UnicodeEncodeError:
131 pass
132
133
134 def highest_version(versions):
135 return max(versions, key=parse_version)
136
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/pip/_internal/commands/search.py b/src/pip/_internal/commands/search.py
--- a/src/pip/_internal/commands/search.py
+++ b/src/pip/_internal/commands/search.py
@@ -126,7 +126,11 @@
logger.info('INSTALLED: %s (latest)', dist.version)
else:
logger.info('INSTALLED: %s', dist.version)
- logger.info('LATEST: %s', latest)
+ if parse_version(latest).pre:
+ logger.info('LATEST: %s (pre-release; install'
+ ' with "pip install --pre")', latest)
+ else:
+ logger.info('LATEST: %s', latest)
except UnicodeEncodeError:
pass
| {"golden_diff": "diff --git a/src/pip/_internal/commands/search.py b/src/pip/_internal/commands/search.py\n--- a/src/pip/_internal/commands/search.py\n+++ b/src/pip/_internal/commands/search.py\n@@ -126,7 +126,11 @@\n logger.info('INSTALLED: %s (latest)', dist.version)\n else:\n logger.info('INSTALLED: %s', dist.version)\n- logger.info('LATEST: %s', latest)\n+ if parse_version(latest).pre:\n+ logger.info('LATEST: %s (pre-release; install'\n+ ' with \"pip install --pre\")', latest)\n+ else:\n+ logger.info('LATEST: %s', latest)\n except UnicodeEncodeError:\n pass\n", "issue": "Add pre-released package upgrade hints\nBefore this commit when querying for an installed package using pip if\r\nthe package that was returned contained a pre-released version of the\r\npackage it was not intuitive that the pre-released version of the\r\npackage could not be directly upgraded to without the `--pre` flag.\r\n\r\nThis commit shows additional information on how to upgrade to\r\npre-released versions of a package if one is found in the search.\r\n\r\nFixes #5169.\r\n\r\n<!---\r\nThank you for your soon to be pull request. Before you submit this, please\r\ndouble check to make sure that you've added a news file fragment. In pip we\r\ngenerate our NEWS.rst from multiple news fragment files, and all pull requests\r\nrequire either a news file fragment or a marker to indicate they don't require\r\none.\r\n\r\nTo read more about adding a news file fragment for your PR, please check out\r\nour documentation at: https://pip.pypa.io/en/latest/development/#adding-a-news-entry\r\n-->\r\n\nDisplay pre-release version hint in package version output of \"pip search\" (and others)\n* Pip version: pip 9.0.3 from c:\\python36\\lib\\site-packages (python 3.6)\r\n* Python version: Python 3.6.2\r\n* Operating system: Windows 10\r\n\r\n### Description:\r\n\r\nIn #5168 I learned that version numbers like \"2018.04.04a\" are interpreted as pre-release versions. Therefore I could not install them without adding the parameter `--pre`.\r\n\r\nHowever, this is _implicit_ knowledge the user must have about interpretations of version numbers and related pip behavior in order to understand the behavior of pip. Otherwise, the user is irritated, just like I was until a couple of minutes ago.\r\n\r\nCurrent situation where 2018.04.04a is not installable when `--pre` is not given:\r\n\r\n```\r\nC:\\Users\\karl.voit>pip search filetags\r\nfiletags (2018.04.04a) - Management of simple tags within file names\r\n INSTALLED: 2018.3.19\r\n LATEST: 2018.04.04a\r\n\r\nC:\\Users\\karl.voit>\r\n```\r\n\r\nShouldn't there at least be a hint such as following suggestion?\r\n\r\n```\r\nC:\\Users\\karl.voit>pip search filetags\r\nfiletags (2018.04.04a) - Management of simple tags within file names\r\n INSTALLED: 2018.3.19\r\n LATEST: 2018.04.04a (pre-release; install with \"pip install --pre\")\r\n\r\nC:\\Users\\karl.voit>\r\n```\r\n\r\nMaybe this applies for multiple stdout messages.\r\n\r\n### What I've run:\r\n\r\nSee #5168 to give you some background.\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport logging\nimport sys\nimport textwrap\nfrom collections import OrderedDict\n\nfrom pip._vendor import pkg_resources\nfrom pip._vendor.packaging.version import parse as parse_version\n# NOTE: XMLRPC Client is not annotated in typeshed as on 2017-07-17, which is\n# why we ignore the type on this import\nfrom pip._vendor.six.moves import xmlrpc_client # type: ignore\n\nfrom pip._internal.cli.base_command import Command\nfrom pip._internal.cli.status_codes import NO_MATCHES_FOUND, SUCCESS\nfrom pip._internal.download import PipXmlrpcTransport\nfrom pip._internal.exceptions import CommandError\nfrom pip._internal.models.index import PyPI\nfrom pip._internal.utils.compat import get_terminal_size\nfrom pip._internal.utils.logging import indent_log\n\nlogger = logging.getLogger(__name__)\n\n\nclass SearchCommand(Command):\n \"\"\"Search for PyPI packages whose name or summary contains <query>.\"\"\"\n name = 'search'\n usage = \"\"\"\n %prog [options] <query>\"\"\"\n summary = 'Search PyPI for packages.'\n ignore_require_venv = True\n\n def __init__(self, *args, **kw):\n super(SearchCommand, self).__init__(*args, **kw)\n self.cmd_opts.add_option(\n '-i', '--index',\n dest='index',\n metavar='URL',\n default=PyPI.pypi_url,\n help='Base URL of Python Package Index (default %default)')\n\n self.parser.insert_option_group(0, self.cmd_opts)\n\n def run(self, options, args):\n if not args:\n raise CommandError('Missing required argument (search query).')\n query = args\n pypi_hits = self.search(query, options)\n hits = transform_hits(pypi_hits)\n\n terminal_width = None\n if sys.stdout.isatty():\n terminal_width = get_terminal_size()[0]\n\n print_results(hits, terminal_width=terminal_width)\n if pypi_hits:\n return SUCCESS\n return NO_MATCHES_FOUND\n\n def search(self, query, options):\n index_url = options.index\n with self._build_session(options) as session:\n transport = PipXmlrpcTransport(index_url, session)\n pypi = xmlrpc_client.ServerProxy(index_url, transport)\n hits = pypi.search({'name': query, 'summary': query}, 'or')\n return hits\n\n\ndef transform_hits(hits):\n \"\"\"\n The list from pypi is really a list of versions. We want a list of\n packages with the list of versions stored inline. This converts the\n list from pypi into one we can use.\n \"\"\"\n packages = OrderedDict()\n for hit in hits:\n name = hit['name']\n summary = hit['summary']\n version = hit['version']\n\n if name not in packages.keys():\n packages[name] = {\n 'name': name,\n 'summary': summary,\n 'versions': [version],\n }\n else:\n packages[name]['versions'].append(version)\n\n # if this is the highest version, replace summary and score\n if version == highest_version(packages[name]['versions']):\n packages[name]['summary'] = summary\n\n return list(packages.values())\n\n\ndef print_results(hits, name_column_width=None, terminal_width=None):\n if not hits:\n return\n if name_column_width is None:\n name_column_width = max([\n len(hit['name']) + len(highest_version(hit.get('versions', ['-'])))\n for hit in hits\n ]) + 4\n\n installed_packages = [p.project_name for p in pkg_resources.working_set]\n for hit in hits:\n name = hit['name']\n summary = hit['summary'] or ''\n latest = highest_version(hit.get('versions', ['-']))\n if terminal_width is not None:\n target_width = terminal_width - name_column_width - 5\n if target_width > 10:\n # wrap and indent summary to fit terminal\n summary = textwrap.wrap(summary, target_width)\n summary = ('\\n' + ' ' * (name_column_width + 3)).join(summary)\n\n line = '%-*s - %s' % (name_column_width,\n '%s (%s)' % (name, latest), summary)\n try:\n logger.info(line)\n if name in installed_packages:\n dist = pkg_resources.get_distribution(name)\n with indent_log():\n if dist.version == latest:\n logger.info('INSTALLED: %s (latest)', dist.version)\n else:\n logger.info('INSTALLED: %s', dist.version)\n logger.info('LATEST: %s', latest)\n except UnicodeEncodeError:\n pass\n\n\ndef highest_version(versions):\n return max(versions, key=parse_version)\n", "path": "src/pip/_internal/commands/search.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nimport logging\nimport sys\nimport textwrap\nfrom collections import OrderedDict\n\nfrom pip._vendor import pkg_resources\nfrom pip._vendor.packaging.version import parse as parse_version\n# NOTE: XMLRPC Client is not annotated in typeshed as on 2017-07-17, which is\n# why we ignore the type on this import\nfrom pip._vendor.six.moves import xmlrpc_client # type: ignore\n\nfrom pip._internal.cli.base_command import Command\nfrom pip._internal.cli.status_codes import NO_MATCHES_FOUND, SUCCESS\nfrom pip._internal.download import PipXmlrpcTransport\nfrom pip._internal.exceptions import CommandError\nfrom pip._internal.models.index import PyPI\nfrom pip._internal.utils.compat import get_terminal_size\nfrom pip._internal.utils.logging import indent_log\n\nlogger = logging.getLogger(__name__)\n\n\nclass SearchCommand(Command):\n \"\"\"Search for PyPI packages whose name or summary contains <query>.\"\"\"\n name = 'search'\n usage = \"\"\"\n %prog [options] <query>\"\"\"\n summary = 'Search PyPI for packages.'\n ignore_require_venv = True\n\n def __init__(self, *args, **kw):\n super(SearchCommand, self).__init__(*args, **kw)\n self.cmd_opts.add_option(\n '-i', '--index',\n dest='index',\n metavar='URL',\n default=PyPI.pypi_url,\n help='Base URL of Python Package Index (default %default)')\n\n self.parser.insert_option_group(0, self.cmd_opts)\n\n def run(self, options, args):\n if not args:\n raise CommandError('Missing required argument (search query).')\n query = args\n pypi_hits = self.search(query, options)\n hits = transform_hits(pypi_hits)\n\n terminal_width = None\n if sys.stdout.isatty():\n terminal_width = get_terminal_size()[0]\n\n print_results(hits, terminal_width=terminal_width)\n if pypi_hits:\n return SUCCESS\n return NO_MATCHES_FOUND\n\n def search(self, query, options):\n index_url = options.index\n with self._build_session(options) as session:\n transport = PipXmlrpcTransport(index_url, session)\n pypi = xmlrpc_client.ServerProxy(index_url, transport)\n hits = pypi.search({'name': query, 'summary': query}, 'or')\n return hits\n\n\ndef transform_hits(hits):\n \"\"\"\n The list from pypi is really a list of versions. We want a list of\n packages with the list of versions stored inline. This converts the\n list from pypi into one we can use.\n \"\"\"\n packages = OrderedDict()\n for hit in hits:\n name = hit['name']\n summary = hit['summary']\n version = hit['version']\n\n if name not in packages.keys():\n packages[name] = {\n 'name': name,\n 'summary': summary,\n 'versions': [version],\n }\n else:\n packages[name]['versions'].append(version)\n\n # if this is the highest version, replace summary and score\n if version == highest_version(packages[name]['versions']):\n packages[name]['summary'] = summary\n\n return list(packages.values())\n\n\ndef print_results(hits, name_column_width=None, terminal_width=None):\n if not hits:\n return\n if name_column_width is None:\n name_column_width = max([\n len(hit['name']) + len(highest_version(hit.get('versions', ['-'])))\n for hit in hits\n ]) + 4\n\n installed_packages = [p.project_name for p in pkg_resources.working_set]\n for hit in hits:\n name = hit['name']\n summary = hit['summary'] or ''\n latest = highest_version(hit.get('versions', ['-']))\n if terminal_width is not None:\n target_width = terminal_width - name_column_width - 5\n if target_width > 10:\n # wrap and indent summary to fit terminal\n summary = textwrap.wrap(summary, target_width)\n summary = ('\\n' + ' ' * (name_column_width + 3)).join(summary)\n\n line = '%-*s - %s' % (name_column_width,\n '%s (%s)' % (name, latest), summary)\n try:\n logger.info(line)\n if name in installed_packages:\n dist = pkg_resources.get_distribution(name)\n with indent_log():\n if dist.version == latest:\n logger.info('INSTALLED: %s (latest)', dist.version)\n else:\n logger.info('INSTALLED: %s', dist.version)\n if parse_version(latest).pre:\n logger.info('LATEST: %s (pre-release; install'\n ' with \"pip install --pre\")', latest)\n else:\n logger.info('LATEST: %s', latest)\n except UnicodeEncodeError:\n pass\n\n\ndef highest_version(versions):\n return max(versions, key=parse_version)\n", "path": "src/pip/_internal/commands/search.py"}]} | 2,226 | 175 |
gh_patches_debug_7038 | rasdani/github-patches | git_diff | Textualize__textual-1600 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Populating a `ListView` via constructor vs via inherit and `compose`
There seems to be a subtle difference in the working of a `ListView` if you create one by passing the `ListItem`s to it, vs if you create a custom `ListView` by inheriting from it and using `compose` to populate it. Take the following example code, which places both approaches side-by-side:
```python
from textual.app import App, ComposeResult
from textual.containers import Horizontal
from textual.widgets import Header, Footer, ListView, ListItem, Label
class CustomListView( ListView ):
def __init__( self, choices: list[ str ] ) -> None:
super().__init__()
self._choices = choices
def compose( self ) -> ComposeResult:
"""Compose the child widgets."""
for choice in self._choices:
yield ListItem( Label( choice ) )
class ListViewMakerApp( App[ None ] ):
CSS = """
ListView {
width: 1fr;
height: 1fr;
border: round red;
}
"""
OPTIONS = [ f"This is the nifty test option {n}" for n in range( 20 ) ]
def compose( self ) -> ComposeResult:
yield Header()
yield Horizontal(
ListView(
*[ ListItem( Label( option ) ) for option in self.OPTIONS ]
),
CustomListView( self.OPTIONS )
)
yield Footer()
if __name__ == "__main__":
ListViewMakerApp().run()
```
I feel the `CustomListView` would not be an unusual approach for people to take; perhaps wanting to make their own commonly-used selection list of simple values (or even very complex child values that they want easily handled). Side-by-side the `ListView`s look the same to start with:
<img width="922" alt="Screenshot 2023-01-17 at 11 10 47" src="https://user-images.githubusercontent.com/28237/212884262-8d5252a8-3318-4f72-a3de-ae5f73b5a41c.png">
Note that the non-inherited `ListView` is to the left and that, even though it doesn't have focus, the first item is highlighted. If you <kbd>tab</kbd> into it you can cursor around, etc, just fine. On the other hand notice that the right `ListView` (which is a `CustomListView`) has no obvious highlighted item and if you tab into it nothing gets highlighted.
Further to this, if you (via keyboard) focus the inherited (right hand) `ListView` and then attempt to cursor down, the following exception will be raised:
```
╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮
│ /Users/davep/develop/python/textual-sandbox/.venv/lib/python3.10/site-packages/textual/widgets/_ │
│ list_view.py:127 in action_cursor_down │
│ │
│ 124 │ │ self.emit_no_wait(self.Selected(self, selected_child)) │
│ 125 │ │
│ 126 │ def action_cursor_down(self) -> None: │
│ ❱ 127 │ │ self.index += 1 │
│ 128 │ │
│ 129 │ def action_cursor_up(self) -> None: │
│ 130 │ │ self.index -= 1 │
│ │
│ ╭──────────────────────────── locals ─────────────────────────────╮ │
│ │ self = CustomListView(pseudo_classes={'focus', 'focus-within'}) │ │
│ ╰─────────────────────────────────────────────────────────────────╯ │
╰──────────────────────────────────────────────────────────────────────────────────────────────────╯
TypeError: unsupported operand type(s) for +=: 'NoneType' and 'int'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/textual/widgets/_list_view.py`
Content:
```
1 from __future__ import annotations
2
3 from textual import events
4 from textual.await_remove import AwaitRemove
5 from textual.binding import Binding
6 from textual.containers import Vertical
7 from textual.geometry import clamp
8 from textual.message import Message
9 from textual.reactive import reactive
10 from textual.widget import AwaitMount
11 from textual.widgets._list_item import ListItem
12
13
14 class ListView(Vertical, can_focus=True, can_focus_children=False):
15 """Displays a vertical list of `ListItem`s which can be highlighted
16 and selected using the mouse or keyboard.
17
18 Attributes:
19 index: The index in the list that's currently highlighted.
20 """
21
22 BINDINGS = [
23 Binding("enter", "select_cursor", "Select", show=False),
24 Binding("up", "cursor_up", "Cursor Up", show=False),
25 Binding("down", "cursor_down", "Cursor Down", show=False),
26 ]
27
28 index = reactive(0, always_update=True)
29
30 def __init__(
31 self,
32 *children: ListItem,
33 initial_index: int | None = 0,
34 name: str | None = None,
35 id: str | None = None,
36 classes: str | None = None,
37 ) -> None:
38 """
39 Args:
40 *children: The ListItems to display in the list.
41 initial_index: The index that should be highlighted when the list is first mounted.
42 name: The name of the widget.
43 id: The unique ID of the widget used in CSS/query selection.
44 classes: The CSS classes of the widget.
45 """
46 super().__init__(*children, name=name, id=id, classes=classes)
47 self.index = initial_index
48
49 @property
50 def highlighted_child(self) -> ListItem | None:
51 """ListItem | None: The currently highlighted ListItem,
52 or None if nothing is highlighted.
53 """
54 if self.index is None:
55 return None
56 elif 0 <= self.index < len(self.children):
57 return self.children[self.index]
58
59 def validate_index(self, index: int | None) -> int | None:
60 """Clamp the index to the valid range, or set to None if there's nothing to highlight."""
61 if not self.children or index is None:
62 return None
63 return self._clamp_index(index)
64
65 def _clamp_index(self, index: int) -> int:
66 """Clamp the index to a valid value given the current list of children"""
67 last_index = max(len(self.children) - 1, 0)
68 return clamp(index, 0, last_index)
69
70 def _is_valid_index(self, index: int | None) -> bool:
71 """Return True if the current index is valid given the current list of children"""
72 if index is None:
73 return False
74 return 0 <= index < len(self.children)
75
76 def watch_index(self, old_index: int, new_index: int) -> None:
77 """Updates the highlighting when the index changes."""
78 if self._is_valid_index(old_index):
79 old_child = self.children[old_index]
80 old_child.highlighted = False
81 if self._is_valid_index(new_index):
82 new_child = self.children[new_index]
83 new_child.highlighted = True
84 else:
85 new_child = None
86
87 self._scroll_highlighted_region()
88 self.emit_no_wait(self.Highlighted(self, new_child))
89
90 def append(self, item: ListItem) -> AwaitMount:
91 """Append a new ListItem to the end of the ListView.
92
93 Args:
94 item (ListItem): The ListItem to append.
95
96 Returns:
97 AwaitMount: An awaitable that yields control to the event loop
98 until the DOM has been updated with the new child item.
99 """
100 await_mount = self.mount(item)
101 if len(self) == 1:
102 self.index = 0
103 return await_mount
104
105 def clear(self) -> AwaitRemove:
106 """Clear all items from the ListView.
107
108 Returns:
109 AwaitRemove: An awaitable that yields control to the event loop until
110 the DOM has been updated to reflect all children being removed.
111 """
112 await_remove = self.query("ListView > ListItem").remove()
113 self.index = None
114 return await_remove
115
116 def action_select_cursor(self) -> None:
117 selected_child = self.highlighted_child
118 self.emit_no_wait(self.Selected(self, selected_child))
119
120 def action_cursor_down(self) -> None:
121 self.index += 1
122
123 def action_cursor_up(self) -> None:
124 self.index -= 1
125
126 def on_list_item__child_clicked(self, event: ListItem._ChildClicked) -> None:
127 self.focus()
128 self.index = self.children.index(event.sender)
129 self.emit_no_wait(self.Selected(self, event.sender))
130
131 def _scroll_highlighted_region(self) -> None:
132 """Used to keep the highlighted index within vision"""
133 if self.highlighted_child is not None:
134 self.scroll_to_widget(self.highlighted_child, animate=False)
135
136 def __len__(self):
137 return len(self.children)
138
139 class Highlighted(Message, bubble=True):
140 """Emitted when the highlighted item changes. Highlighted item is controlled using up/down keys.
141
142 Attributes:
143 item (ListItem | None): The highlighted item, if there is one highlighted.
144 """
145
146 def __init__(self, sender: ListView, item: ListItem | None) -> None:
147 super().__init__(sender)
148 self.item = item
149
150 class Selected(Message, bubble=True):
151 """Emitted when a list item is selected, e.g. when you press the enter key on it
152
153 Attributes:
154 item (ListItem): The selected item.
155 """
156
157 def __init__(self, sender: ListView, item: ListItem) -> None:
158 super().__init__(sender)
159 self.item = item
160
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/textual/widgets/_list_view.py b/src/textual/widgets/_list_view.py
--- a/src/textual/widgets/_list_view.py
+++ b/src/textual/widgets/_list_view.py
@@ -50,7 +50,11 @@
classes: The CSS classes of the widget.
"""
super().__init__(*children, name=name, id=id, classes=classes)
- self.index = initial_index
+ self._index = initial_index
+
+ def on_mount(self) -> None:
+ """Ensure the ListView is fully-settled after mounting."""
+ self.index = self._index
@property
def highlighted_child(self) -> ListItem | None:
| {"golden_diff": "diff --git a/src/textual/widgets/_list_view.py b/src/textual/widgets/_list_view.py\n--- a/src/textual/widgets/_list_view.py\n+++ b/src/textual/widgets/_list_view.py\n@@ -50,7 +50,11 @@\n classes: The CSS classes of the widget.\n \"\"\"\n super().__init__(*children, name=name, id=id, classes=classes)\n- self.index = initial_index\n+ self._index = initial_index\n+\n+ def on_mount(self) -> None:\n+ \"\"\"Ensure the ListView is fully-settled after mounting.\"\"\"\n+ self.index = self._index\n \n @property\n def highlighted_child(self) -> ListItem | None:\n", "issue": "Populating a `ListView` via constructor vs via inherit and `compose`\nThere seems to be a subtle difference in the working of a `ListView` if you create one by passing the `ListItem`s to it, vs if you create a custom `ListView` by inheriting from it and using `compose` to populate it. Take the following example code, which places both approaches side-by-side:\r\n\r\n```python\r\nfrom textual.app import App, ComposeResult\r\nfrom textual.containers import Horizontal\r\nfrom textual.widgets import Header, Footer, ListView, ListItem, Label\r\n\r\nclass CustomListView( ListView ):\r\n\r\n def __init__( self, choices: list[ str ] ) -> None:\r\n super().__init__()\r\n self._choices = choices\r\n\r\n def compose( self ) -> ComposeResult:\r\n \"\"\"Compose the child widgets.\"\"\"\r\n for choice in self._choices:\r\n yield ListItem( Label( choice ) )\r\n\r\nclass ListViewMakerApp( App[ None ] ):\r\n\r\n CSS = \"\"\"\r\n ListView {\r\n width: 1fr;\r\n height: 1fr;\r\n border: round red;\r\n }\r\n \"\"\"\r\n\r\n OPTIONS = [ f\"This is the nifty test option {n}\" for n in range( 20 ) ]\r\n\r\n def compose( self ) -> ComposeResult:\r\n yield Header()\r\n yield Horizontal(\r\n ListView(\r\n *[ ListItem( Label( option ) ) for option in self.OPTIONS ]\r\n ),\r\n CustomListView( self.OPTIONS )\r\n )\r\n yield Footer()\r\n\r\nif __name__ == \"__main__\":\r\n ListViewMakerApp().run()\r\n```\r\n\r\nI feel the `CustomListView` would not be an unusual approach for people to take; perhaps wanting to make their own commonly-used selection list of simple values (or even very complex child values that they want easily handled). Side-by-side the `ListView`s look the same to start with:\r\n\r\n<img width=\"922\" alt=\"Screenshot 2023-01-17 at 11 10 47\" src=\"https://user-images.githubusercontent.com/28237/212884262-8d5252a8-3318-4f72-a3de-ae5f73b5a41c.png\">\r\n\r\nNote that the non-inherited `ListView` is to the left and that, even though it doesn't have focus, the first item is highlighted. If you <kbd>tab</kbd> into it you can cursor around, etc, just fine. On the other hand notice that the right `ListView` (which is a `CustomListView`) has no obvious highlighted item and if you tab into it nothing gets highlighted.\r\n\r\nFurther to this, if you (via keyboard) focus the inherited (right hand) `ListView` and then attempt to cursor down, the following exception will be raised:\r\n\r\n```\r\n\u256d\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 Traceback (most recent call last) \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256e\r\n\u2502 /Users/davep/develop/python/textual-sandbox/.venv/lib/python3.10/site-packages/textual/widgets/_ \u2502\r\n\u2502 list_view.py:127 in action_cursor_down \u2502\r\n\u2502 \u2502\r\n\u2502 124 \u2502 \u2502 self.emit_no_wait(self.Selected(self, selected_child)) \u2502\r\n\u2502 125 \u2502 \u2502\r\n\u2502 126 \u2502 def action_cursor_down(self) -> None: \u2502\r\n\u2502 \u2771 127 \u2502 \u2502 self.index += 1 \u2502\r\n\u2502 128 \u2502 \u2502\r\n\u2502 129 \u2502 def action_cursor_up(self) -> None: \u2502\r\n\u2502 130 \u2502 \u2502 self.index -= 1 \u2502\r\n\u2502 \u2502\r\n\u2502 \u256d\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500 locals \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256e \u2502\r\n\u2502 \u2502 self = CustomListView(pseudo_classes={'focus', 'focus-within'}) \u2502 \u2502\r\n\u2502 \u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f \u2502\r\n\u2570\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u256f\r\nTypeError: unsupported operand type(s) for +=: 'NoneType' and 'int'\r\n```\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom textual import events\nfrom textual.await_remove import AwaitRemove\nfrom textual.binding import Binding\nfrom textual.containers import Vertical\nfrom textual.geometry import clamp\nfrom textual.message import Message\nfrom textual.reactive import reactive\nfrom textual.widget import AwaitMount\nfrom textual.widgets._list_item import ListItem\n\n\nclass ListView(Vertical, can_focus=True, can_focus_children=False):\n \"\"\"Displays a vertical list of `ListItem`s which can be highlighted\n and selected using the mouse or keyboard.\n\n Attributes:\n index: The index in the list that's currently highlighted.\n \"\"\"\n\n BINDINGS = [\n Binding(\"enter\", \"select_cursor\", \"Select\", show=False),\n Binding(\"up\", \"cursor_up\", \"Cursor Up\", show=False),\n Binding(\"down\", \"cursor_down\", \"Cursor Down\", show=False),\n ]\n\n index = reactive(0, always_update=True)\n\n def __init__(\n self,\n *children: ListItem,\n initial_index: int | None = 0,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n ) -> None:\n \"\"\"\n Args:\n *children: The ListItems to display in the list.\n initial_index: The index that should be highlighted when the list is first mounted.\n name: The name of the widget.\n id: The unique ID of the widget used in CSS/query selection.\n classes: The CSS classes of the widget.\n \"\"\"\n super().__init__(*children, name=name, id=id, classes=classes)\n self.index = initial_index\n\n @property\n def highlighted_child(self) -> ListItem | None:\n \"\"\"ListItem | None: The currently highlighted ListItem,\n or None if nothing is highlighted.\n \"\"\"\n if self.index is None:\n return None\n elif 0 <= self.index < len(self.children):\n return self.children[self.index]\n\n def validate_index(self, index: int | None) -> int | None:\n \"\"\"Clamp the index to the valid range, or set to None if there's nothing to highlight.\"\"\"\n if not self.children or index is None:\n return None\n return self._clamp_index(index)\n\n def _clamp_index(self, index: int) -> int:\n \"\"\"Clamp the index to a valid value given the current list of children\"\"\"\n last_index = max(len(self.children) - 1, 0)\n return clamp(index, 0, last_index)\n\n def _is_valid_index(self, index: int | None) -> bool:\n \"\"\"Return True if the current index is valid given the current list of children\"\"\"\n if index is None:\n return False\n return 0 <= index < len(self.children)\n\n def watch_index(self, old_index: int, new_index: int) -> None:\n \"\"\"Updates the highlighting when the index changes.\"\"\"\n if self._is_valid_index(old_index):\n old_child = self.children[old_index]\n old_child.highlighted = False\n if self._is_valid_index(new_index):\n new_child = self.children[new_index]\n new_child.highlighted = True\n else:\n new_child = None\n\n self._scroll_highlighted_region()\n self.emit_no_wait(self.Highlighted(self, new_child))\n\n def append(self, item: ListItem) -> AwaitMount:\n \"\"\"Append a new ListItem to the end of the ListView.\n\n Args:\n item (ListItem): The ListItem to append.\n\n Returns:\n AwaitMount: An awaitable that yields control to the event loop\n until the DOM has been updated with the new child item.\n \"\"\"\n await_mount = self.mount(item)\n if len(self) == 1:\n self.index = 0\n return await_mount\n\n def clear(self) -> AwaitRemove:\n \"\"\"Clear all items from the ListView.\n\n Returns:\n AwaitRemove: An awaitable that yields control to the event loop until\n the DOM has been updated to reflect all children being removed.\n \"\"\"\n await_remove = self.query(\"ListView > ListItem\").remove()\n self.index = None\n return await_remove\n\n def action_select_cursor(self) -> None:\n selected_child = self.highlighted_child\n self.emit_no_wait(self.Selected(self, selected_child))\n\n def action_cursor_down(self) -> None:\n self.index += 1\n\n def action_cursor_up(self) -> None:\n self.index -= 1\n\n def on_list_item__child_clicked(self, event: ListItem._ChildClicked) -> None:\n self.focus()\n self.index = self.children.index(event.sender)\n self.emit_no_wait(self.Selected(self, event.sender))\n\n def _scroll_highlighted_region(self) -> None:\n \"\"\"Used to keep the highlighted index within vision\"\"\"\n if self.highlighted_child is not None:\n self.scroll_to_widget(self.highlighted_child, animate=False)\n\n def __len__(self):\n return len(self.children)\n\n class Highlighted(Message, bubble=True):\n \"\"\"Emitted when the highlighted item changes. Highlighted item is controlled using up/down keys.\n\n Attributes:\n item (ListItem | None): The highlighted item, if there is one highlighted.\n \"\"\"\n\n def __init__(self, sender: ListView, item: ListItem | None) -> None:\n super().__init__(sender)\n self.item = item\n\n class Selected(Message, bubble=True):\n \"\"\"Emitted when a list item is selected, e.g. when you press the enter key on it\n\n Attributes:\n item (ListItem): The selected item.\n \"\"\"\n\n def __init__(self, sender: ListView, item: ListItem) -> None:\n super().__init__(sender)\n self.item = item\n", "path": "src/textual/widgets/_list_view.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom textual import events\nfrom textual.await_remove import AwaitRemove\nfrom textual.binding import Binding\nfrom textual.containers import Vertical\nfrom textual.geometry import clamp\nfrom textual.message import Message\nfrom textual.reactive import reactive\nfrom textual.widget import AwaitMount\nfrom textual.widgets._list_item import ListItem\n\n\nclass ListView(Vertical, can_focus=True, can_focus_children=False):\n \"\"\"Displays a vertical list of `ListItem`s which can be highlighted\n and selected using the mouse or keyboard.\n\n Attributes:\n index: The index in the list that's currently highlighted.\n \"\"\"\n\n DEFAULT_CSS = \"\"\"\n ListView {\n scrollbar-size-vertical: 2;\n }\n \"\"\"\n\n BINDINGS = [\n Binding(\"down\", \"cursor_down\", \"Down\", show=False),\n Binding(\"up\", \"cursor_up\", \"Up\", show=False),\n Binding(\"enter\", \"select_cursor\", \"Select\", show=False),\n ]\n\n index = reactive(0, always_update=True)\n\n def __init__(\n self,\n *children: ListItem,\n initial_index: int | None = 0,\n name: str | None = None,\n id: str | None = None,\n classes: str | None = None,\n ) -> None:\n \"\"\"\n Args:\n *children: The ListItems to display in the list.\n initial_index: The index that should be highlighted when the list is first mounted.\n name: The name of the widget.\n id: The unique ID of the widget used in CSS/query selection.\n classes: The CSS classes of the widget.\n \"\"\"\n super().__init__(*children, name=name, id=id, classes=classes)\n self._index = initial_index\n\n def on_mount(self) -> None:\n \"\"\"Ensure the ListView is fully-settled after mounting.\"\"\"\n self.index = self._index\n\n @property\n def highlighted_child(self) -> ListItem | None:\n \"\"\"ListItem | None: The currently highlighted ListItem,\n or None if nothing is highlighted.\n \"\"\"\n if self.index is None:\n return None\n elif 0 <= self.index < len(self.children):\n return self.children[self.index]\n\n def validate_index(self, index: int | None) -> int | None:\n \"\"\"Clamp the index to the valid range, or set to None if there's nothing to highlight.\"\"\"\n if not self.children or index is None:\n return None\n return self._clamp_index(index)\n\n def _clamp_index(self, index: int) -> int:\n \"\"\"Clamp the index to a valid value given the current list of children\"\"\"\n last_index = max(len(self.children) - 1, 0)\n return clamp(index, 0, last_index)\n\n def _is_valid_index(self, index: int | None) -> bool:\n \"\"\"Return True if the current index is valid given the current list of children\"\"\"\n if index is None:\n return False\n return 0 <= index < len(self.children)\n\n def watch_index(self, old_index: int, new_index: int) -> None:\n \"\"\"Updates the highlighting when the index changes.\"\"\"\n if self._is_valid_index(old_index):\n old_child = self.children[old_index]\n old_child.highlighted = False\n if self._is_valid_index(new_index):\n new_child = self.children[new_index]\n new_child.highlighted = True\n else:\n new_child = None\n\n self._scroll_highlighted_region()\n self.emit_no_wait(self.Highlighted(self, new_child))\n\n def append(self, item: ListItem) -> AwaitMount:\n \"\"\"Append a new ListItem to the end of the ListView.\n\n Args:\n item (ListItem): The ListItem to append.\n\n Returns:\n AwaitMount: An awaitable that yields control to the event loop\n until the DOM has been updated with the new child item.\n \"\"\"\n await_mount = self.mount(item)\n if len(self) == 1:\n self.index = 0\n return await_mount\n\n def clear(self) -> AwaitRemove:\n \"\"\"Clear all items from the ListView.\n\n Returns:\n AwaitRemove: An awaitable that yields control to the event loop until\n the DOM has been updated to reflect all children being removed.\n \"\"\"\n await_remove = self.query(\"ListView > ListItem\").remove()\n self.index = None\n return await_remove\n\n def action_select_cursor(self) -> None:\n selected_child = self.highlighted_child\n self.emit_no_wait(self.Selected(self, selected_child))\n\n def action_cursor_down(self) -> None:\n self.index += 1\n\n def action_cursor_up(self) -> None:\n self.index -= 1\n\n def on_list_item__child_clicked(self, event: ListItem._ChildClicked) -> None:\n self.focus()\n self.index = self.children.index(event.sender)\n self.emit_no_wait(self.Selected(self, event.sender))\n\n def _scroll_highlighted_region(self) -> None:\n \"\"\"Used to keep the highlighted index within vision\"\"\"\n if self.highlighted_child is not None:\n self.scroll_to_widget(self.highlighted_child, animate=False)\n\n def __len__(self):\n return len(self.children)\n\n class Highlighted(Message, bubble=True):\n \"\"\"Emitted when the highlighted item changes. Highlighted item is controlled using up/down keys.\n\n Attributes:\n item (ListItem | None): The highlighted item, if there is one highlighted.\n \"\"\"\n\n def __init__(self, sender: ListView, item: ListItem | None) -> None:\n super().__init__(sender)\n self.item = item\n\n class Selected(Message, bubble=True):\n \"\"\"Emitted when a list item is selected, e.g. when you press the enter key on it\n\n Attributes:\n item (ListItem): The selected item.\n \"\"\"\n\n def __init__(self, sender: ListView, item: ListItem) -> None:\n super().__init__(sender)\n self.item = item\n", "path": "src/textual/widgets/_list_view.py"}]} | 2,762 | 152 |
gh_patches_debug_44259 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-1423 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Incorrect error is thrown when id field is not provided for a column object during a batch update
## Description
The following `Patch` request to `/tables/{id}`
```
{
"columns": [
{
"name": "id",
"type": "INTEGER"
},
{
"name": "Center",
"type": "TEXT"
}
]
}
```
fails with an error `{code: 4999, message: "local variable 'name' referenced before assignment", field: null, detail: null}`.
This is happening due to https://github.com/centerofci/mathesar/blob/a8bad8268c8634bc766fa2e4fde271f4ec1a165e/db/columns/operations/alter.py#L215
always expecting a name which is fetched from the `column id` that is missing from the patch request.
## Expected behavior
`id` field of the column object should be required when batch updating using `/tables/{id}`.
## To Reproduce
Using API
- Create a table using `/api/db/v0/tables/` API
- Send a patch request to `/api/db/v0/tables/{id}`
Using frontend client
- Create a table by importing a csv file
- Error will be thrown when clicking on `Finish import`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mathesar/api/serializers/tables.py`
Content:
```
1 from django.urls import reverse
2 from psycopg2.errors import DuplicateTable
3 from rest_framework import serializers, status
4 from sqlalchemy.exc import ProgrammingError
5
6 from db.types.base import get_db_type_enum_from_id
7
8 from mathesar.api.exceptions.validation_exceptions.exceptions import (
9 ColumnSizeMismatchAPIException, DistinctColumnRequiredAPIException,
10 MultipleDataFileAPIException, UnknownDatabaseTypeIdentifier
11 )
12 from mathesar.api.exceptions.database_exceptions.exceptions import DuplicateTableAPIException
13 from mathesar.api.exceptions.database_exceptions.base_exceptions import ProgrammingAPIException
14 from mathesar.api.exceptions.mixins import MathesarErrorMessageMixin
15 from mathesar.api.serializers.columns import SimpleColumnSerializer
16 from mathesar.models import Table, DataFile
17 from mathesar.utils.tables import gen_table_name, create_table_from_datafile, create_empty_table
18
19
20 class TableSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):
21 columns = SimpleColumnSerializer(many=True, required=False)
22 records_url = serializers.SerializerMethodField()
23 constraints_url = serializers.SerializerMethodField()
24 columns_url = serializers.SerializerMethodField()
25 type_suggestions_url = serializers.SerializerMethodField()
26 previews_url = serializers.SerializerMethodField()
27 name = serializers.CharField(required=False, allow_blank=True, default='')
28 data_files = serializers.PrimaryKeyRelatedField(
29 required=False, many=True, queryset=DataFile.objects.all()
30 )
31
32 class Meta:
33 model = Table
34 fields = ['id', 'name', 'schema', 'created_at', 'updated_at', 'import_verified',
35 'columns', 'records_url', 'constraints_url', 'columns_url',
36 'type_suggestions_url', 'previews_url', 'data_files',
37 'has_dependencies']
38
39 def get_records_url(self, obj):
40 if isinstance(obj, Table):
41 # Only get records if we are serializing an existing table
42 request = self.context['request']
43 return request.build_absolute_uri(reverse('table-record-list', kwargs={'table_pk': obj.pk}))
44 else:
45 return None
46
47 def get_constraints_url(self, obj):
48 if isinstance(obj, Table):
49 # Only get constraints if we are serializing an existing table
50 request = self.context['request']
51 return request.build_absolute_uri(reverse('table-constraint-list', kwargs={'table_pk': obj.pk}))
52 else:
53 return None
54
55 def get_columns_url(self, obj):
56 if isinstance(obj, Table):
57 # Only get columns if we are serializing an existing table
58 request = self.context['request']
59 return request.build_absolute_uri(reverse('table-column-list', kwargs={'table_pk': obj.pk}))
60 else:
61 return None
62
63 def get_type_suggestions_url(self, obj):
64 if isinstance(obj, Table):
65 # Only get type suggestions if we are serializing an existing table
66 request = self.context['request']
67 return request.build_absolute_uri(reverse('table-type-suggestions', kwargs={'pk': obj.pk}))
68 else:
69 return None
70
71 def get_previews_url(self, obj):
72 if isinstance(obj, Table):
73 # Only get previews if we are serializing an existing table
74 request = self.context['request']
75 return request.build_absolute_uri(reverse('table-previews', kwargs={'pk': obj.pk}))
76 else:
77 return None
78
79 def validate_data_files(self, data_files):
80 if data_files and len(data_files) > 1:
81 raise MultipleDataFileAPIException()
82 return data_files
83
84 def create(self, validated_data):
85 schema = validated_data['schema']
86 data_files = validated_data.get('data_files')
87 name = validated_data.get('name') or gen_table_name(schema, data_files)
88
89 try:
90 if data_files:
91 table = create_table_from_datafile(data_files, name, schema)
92 else:
93 table = create_empty_table(name, schema)
94 except ProgrammingError as e:
95 if type(e.orig) == DuplicateTable:
96 raise DuplicateTableAPIException(
97 e,
98 message=f"Relation {validated_data['name']} already exists in schema {schema.id}",
99 field="name",
100 status_code=status.HTTP_400_BAD_REQUEST
101 )
102 else:
103 raise ProgrammingAPIException(e)
104 return table
105
106
107 class TablePreviewSerializer(MathesarErrorMessageMixin, serializers.Serializer):
108 name = serializers.CharField(required=False)
109 columns = SimpleColumnSerializer(many=True)
110
111 def validate_columns(self, columns):
112 table = self.context['table']
113 column_names = [col["name"] for col in columns]
114 if not len(column_names) == len(set(column_names)):
115 raise DistinctColumnRequiredAPIException()
116 if not len(columns) == len(table.sa_columns):
117 raise ColumnSizeMismatchAPIException()
118 for column in columns:
119 db_type_id = column['type']
120 db_type = get_db_type_enum_from_id(db_type_id)
121 if db_type is None:
122 raise UnknownDatabaseTypeIdentifier(db_type_id=db_type_id)
123 return columns
124
```
Path: `mathesar/api/db/viewsets/tables.py`
Content:
```
1 from django_filters import rest_framework as filters
2 from psycopg2.errors import InvalidTextRepresentation, CheckViolation
3 from rest_framework import status, viewsets
4 from rest_framework.decorators import action
5 from rest_framework.mixins import ListModelMixin, RetrieveModelMixin, CreateModelMixin
6 from rest_framework.response import Response
7 from sqlalchemy.exc import DataError, IntegrityError
8
9 from mathesar.api.exceptions.database_exceptions import (
10 exceptions as database_api_exceptions,
11 base_exceptions as database_base_api_exceptions,
12 )
13 from mathesar.api.exceptions.generic_exceptions import base_exceptions as base_api_exceptions
14 from db.types.exceptions import UnsupportedTypeException
15 from mathesar.api.dj_filters import TableFilter
16 from mathesar.api.pagination import DefaultLimitOffsetPagination
17 from mathesar.api.serializers.tables import TableSerializer, TablePreviewSerializer
18 from mathesar.models import Table
19 from mathesar.utils.tables import (
20 get_table_column_types
21 )
22
23
24 class TableViewSet(CreateModelMixin, RetrieveModelMixin, ListModelMixin, viewsets.GenericViewSet):
25 serializer_class = TableSerializer
26 pagination_class = DefaultLimitOffsetPagination
27 filter_backends = (filters.DjangoFilterBackend,)
28 filterset_class = TableFilter
29
30 def get_queryset(self):
31 return Table.objects.all().order_by('-created_at')
32
33 def partial_update(self, request, pk=None):
34 serializer = TableSerializer(
35 data=request.data, context={'request': request}, partial=True
36 )
37 serializer.is_valid(raise_exception=True)
38 table = self.get_object()
39
40 # Save the fields that are stored in the model.
41 present_model_fields = []
42 for model_field in table.MODEL_FIELDS:
43 if model_field in serializer.validated_data:
44 setattr(table, model_field, serializer.validated_data[model_field])
45 present_model_fields.append(model_field)
46 table.save(update_fields=present_model_fields)
47 for key in present_model_fields:
48 del serializer.validated_data[key]
49
50 # Save the fields that are stored in the underlying DB.
51 try:
52 table.update_sa_table(serializer.validated_data)
53 except ValueError as e:
54 raise base_api_exceptions.ValueAPIException(e, status_code=status.HTTP_400_BAD_REQUEST)
55
56 # Reload the table to avoid cached properties
57 table = self.get_object()
58 serializer = TableSerializer(table, context={'request': request})
59 return Response(serializer.data)
60
61 def destroy(self, request, pk=None):
62 table = self.get_object()
63 table.delete_sa_table()
64 table.delete()
65 return Response(status=status.HTTP_204_NO_CONTENT)
66
67 @action(methods=['get'], detail=True)
68 def type_suggestions(self, request, pk=None):
69 table = self.get_object()
70 col_types = get_table_column_types(table)
71 return Response(col_types)
72
73 @action(methods=['post'], detail=True)
74 def previews(self, request, pk=None):
75 table = self.get_object()
76 serializer = TablePreviewSerializer(data=request.data, context={"request": request, 'table': table})
77 serializer.is_valid(raise_exception=True)
78 columns_field_key = "columns"
79 columns = serializer.data[columns_field_key]
80 table_data = TableSerializer(table, context={"request": request}).data
81 try:
82 preview_records = table.get_preview(columns)
83 except (DataError, IntegrityError) as e:
84 if type(e.orig) == InvalidTextRepresentation or type(e.orig) == CheckViolation:
85 raise database_api_exceptions.InvalidTypeCastAPIException(
86 e,
87 status_code=status.HTTP_400_BAD_REQUEST,
88 field='columns'
89 )
90 else:
91 raise database_base_api_exceptions.IntegrityAPIException(
92 e,
93 status_code=status.HTTP_400_BAD_REQUEST,
94 field='columns'
95 )
96 except UnsupportedTypeException as e:
97 raise database_api_exceptions.UnsupportedTypeAPIException(
98 e,
99 field='columns',
100 status_code=status.HTTP_400_BAD_REQUEST
101 )
102 table_data.update(
103 {
104 # There's no way to reflect actual column data without
105 # creating a view, so we just use the submission, assuming
106 # no errors means we changed to the desired names and types
107 "columns": columns,
108 "records": preview_records
109 }
110 )
111
112 return Response(table_data)
113
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mathesar/api/db/viewsets/tables.py b/mathesar/api/db/viewsets/tables.py
--- a/mathesar/api/db/viewsets/tables.py
+++ b/mathesar/api/db/viewsets/tables.py
@@ -10,7 +10,6 @@
exceptions as database_api_exceptions,
base_exceptions as database_base_api_exceptions,
)
-from mathesar.api.exceptions.generic_exceptions import base_exceptions as base_api_exceptions
from db.types.exceptions import UnsupportedTypeException
from mathesar.api.dj_filters import TableFilter
from mathesar.api.pagination import DefaultLimitOffsetPagination
@@ -31,27 +30,12 @@
return Table.objects.all().order_by('-created_at')
def partial_update(self, request, pk=None):
+ table = self.get_object()
serializer = TableSerializer(
- data=request.data, context={'request': request}, partial=True
+ table, data=request.data, context={'request': request}, partial=True
)
serializer.is_valid(raise_exception=True)
- table = self.get_object()
-
- # Save the fields that are stored in the model.
- present_model_fields = []
- for model_field in table.MODEL_FIELDS:
- if model_field in serializer.validated_data:
- setattr(table, model_field, serializer.validated_data[model_field])
- present_model_fields.append(model_field)
- table.save(update_fields=present_model_fields)
- for key in present_model_fields:
- del serializer.validated_data[key]
-
- # Save the fields that are stored in the underlying DB.
- try:
- table.update_sa_table(serializer.validated_data)
- except ValueError as e:
- raise base_api_exceptions.ValueAPIException(e, status_code=status.HTTP_400_BAD_REQUEST)
+ serializer.save()
# Reload the table to avoid cached properties
table = self.get_object()
diff --git a/mathesar/api/serializers/tables.py b/mathesar/api/serializers/tables.py
--- a/mathesar/api/serializers/tables.py
+++ b/mathesar/api/serializers/tables.py
@@ -1,6 +1,7 @@
from django.urls import reverse
from psycopg2.errors import DuplicateTable
from rest_framework import serializers, status
+from rest_framework.exceptions import ValidationError
from sqlalchemy.exc import ProgrammingError
from db.types.base import get_db_type_enum_from_id
@@ -11,6 +12,8 @@
)
from mathesar.api.exceptions.database_exceptions.exceptions import DuplicateTableAPIException
from mathesar.api.exceptions.database_exceptions.base_exceptions import ProgrammingAPIException
+from mathesar.api.exceptions.validation_exceptions import base_exceptions as base_validation_exceptions
+from mathesar.api.exceptions.generic_exceptions import base_exceptions as base_api_exceptions
from mathesar.api.exceptions.mixins import MathesarErrorMessageMixin
from mathesar.api.serializers.columns import SimpleColumnSerializer
from mathesar.models import Table, DataFile
@@ -103,6 +106,35 @@
raise ProgrammingAPIException(e)
return table
+ def update(self, instance, validated_data):
+ if self.partial:
+ # Save the fields that are stored in the model.
+ present_model_fields = []
+ for model_field in instance.MODEL_FIELDS:
+ if model_field in validated_data:
+ setattr(instance, model_field, validated_data[model_field])
+ present_model_fields.append(model_field)
+ instance.save(update_fields=present_model_fields)
+ for key in present_model_fields:
+ del validated_data[key]
+ # Save the fields that are stored in the underlying DB.
+ try:
+ instance.update_sa_table(validated_data)
+ except ValueError as e:
+ raise base_api_exceptions.ValueAPIException(e, status_code=status.HTTP_400_BAD_REQUEST)
+ return instance
+
+ def validate(self, data):
+ if self.partial:
+ columns = data.get('columns', None)
+ if columns is not None:
+ for col in columns:
+ id = col.get('id', None)
+ if id is None:
+ message = "'id' field is required while batch updating columns."
+ raise base_validation_exceptions.MathesarValidationException(ValidationError, message=message)
+ return data
+
class TablePreviewSerializer(MathesarErrorMessageMixin, serializers.Serializer):
name = serializers.CharField(required=False)
| {"golden_diff": "diff --git a/mathesar/api/db/viewsets/tables.py b/mathesar/api/db/viewsets/tables.py\n--- a/mathesar/api/db/viewsets/tables.py\n+++ b/mathesar/api/db/viewsets/tables.py\n@@ -10,7 +10,6 @@\n exceptions as database_api_exceptions,\n base_exceptions as database_base_api_exceptions,\n )\n-from mathesar.api.exceptions.generic_exceptions import base_exceptions as base_api_exceptions\n from db.types.exceptions import UnsupportedTypeException\n from mathesar.api.dj_filters import TableFilter\n from mathesar.api.pagination import DefaultLimitOffsetPagination\n@@ -31,27 +30,12 @@\n return Table.objects.all().order_by('-created_at')\n \n def partial_update(self, request, pk=None):\n+ table = self.get_object()\n serializer = TableSerializer(\n- data=request.data, context={'request': request}, partial=True\n+ table, data=request.data, context={'request': request}, partial=True\n )\n serializer.is_valid(raise_exception=True)\n- table = self.get_object()\n-\n- # Save the fields that are stored in the model.\n- present_model_fields = []\n- for model_field in table.MODEL_FIELDS:\n- if model_field in serializer.validated_data:\n- setattr(table, model_field, serializer.validated_data[model_field])\n- present_model_fields.append(model_field)\n- table.save(update_fields=present_model_fields)\n- for key in present_model_fields:\n- del serializer.validated_data[key]\n-\n- # Save the fields that are stored in the underlying DB.\n- try:\n- table.update_sa_table(serializer.validated_data)\n- except ValueError as e:\n- raise base_api_exceptions.ValueAPIException(e, status_code=status.HTTP_400_BAD_REQUEST)\n+ serializer.save()\n \n # Reload the table to avoid cached properties\n table = self.get_object()\ndiff --git a/mathesar/api/serializers/tables.py b/mathesar/api/serializers/tables.py\n--- a/mathesar/api/serializers/tables.py\n+++ b/mathesar/api/serializers/tables.py\n@@ -1,6 +1,7 @@\n from django.urls import reverse\n from psycopg2.errors import DuplicateTable\n from rest_framework import serializers, status\n+from rest_framework.exceptions import ValidationError\n from sqlalchemy.exc import ProgrammingError\n \n from db.types.base import get_db_type_enum_from_id\n@@ -11,6 +12,8 @@\n )\n from mathesar.api.exceptions.database_exceptions.exceptions import DuplicateTableAPIException\n from mathesar.api.exceptions.database_exceptions.base_exceptions import ProgrammingAPIException\n+from mathesar.api.exceptions.validation_exceptions import base_exceptions as base_validation_exceptions\n+from mathesar.api.exceptions.generic_exceptions import base_exceptions as base_api_exceptions\n from mathesar.api.exceptions.mixins import MathesarErrorMessageMixin\n from mathesar.api.serializers.columns import SimpleColumnSerializer\n from mathesar.models import Table, DataFile\n@@ -103,6 +106,35 @@\n raise ProgrammingAPIException(e)\n return table\n \n+ def update(self, instance, validated_data):\n+ if self.partial:\n+ # Save the fields that are stored in the model.\n+ present_model_fields = []\n+ for model_field in instance.MODEL_FIELDS:\n+ if model_field in validated_data:\n+ setattr(instance, model_field, validated_data[model_field])\n+ present_model_fields.append(model_field)\n+ instance.save(update_fields=present_model_fields)\n+ for key in present_model_fields:\n+ del validated_data[key]\n+ # Save the fields that are stored in the underlying DB.\n+ try:\n+ instance.update_sa_table(validated_data)\n+ except ValueError as e:\n+ raise base_api_exceptions.ValueAPIException(e, status_code=status.HTTP_400_BAD_REQUEST)\n+ return instance\n+\n+ def validate(self, data):\n+ if self.partial:\n+ columns = data.get('columns', None)\n+ if columns is not None:\n+ for col in columns:\n+ id = col.get('id', None)\n+ if id is None:\n+ message = \"'id' field is required while batch updating columns.\"\n+ raise base_validation_exceptions.MathesarValidationException(ValidationError, message=message)\n+ return data\n+\n \n class TablePreviewSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n name = serializers.CharField(required=False)\n", "issue": "Incorrect error is thrown when id field is not provided for a column object during a batch update\n## Description\r\nThe following `Patch` request to `/tables/{id}` \r\n\r\n```\r\n\r\n{\r\n \"columns\": [\r\n {\r\n \"name\": \"id\",\r\n \"type\": \"INTEGER\"\r\n },\r\n {\r\n \"name\": \"Center\",\r\n \"type\": \"TEXT\"\r\n }\r\n ]\r\n}\r\n```\r\n\r\n fails with an error `{code: 4999, message: \"local variable 'name' referenced before assignment\", field: null, detail: null}`.\r\n\r\nThis is happening due to https://github.com/centerofci/mathesar/blob/a8bad8268c8634bc766fa2e4fde271f4ec1a165e/db/columns/operations/alter.py#L215\r\n\r\nalways expecting a name which is fetched from the `column id` that is missing from the patch request.\r\n\r\n## Expected behavior\r\n\r\n`id` field of the column object should be required when batch updating using `/tables/{id}`.\r\n\r\n## To Reproduce\r\nUsing API \r\n- Create a table using `/api/db/v0/tables/` API \r\n- Send a patch request to `/api/db/v0/tables/{id}`\r\n\r\nUsing frontend client\r\n- Create a table by importing a csv file\r\n- Error will be thrown when clicking on `Finish import`\r\n\r\n\n", "before_files": [{"content": "from django.urls import reverse\nfrom psycopg2.errors import DuplicateTable\nfrom rest_framework import serializers, status\nfrom sqlalchemy.exc import ProgrammingError\n\nfrom db.types.base import get_db_type_enum_from_id\n\nfrom mathesar.api.exceptions.validation_exceptions.exceptions import (\n ColumnSizeMismatchAPIException, DistinctColumnRequiredAPIException,\n MultipleDataFileAPIException, UnknownDatabaseTypeIdentifier\n)\nfrom mathesar.api.exceptions.database_exceptions.exceptions import DuplicateTableAPIException\nfrom mathesar.api.exceptions.database_exceptions.base_exceptions import ProgrammingAPIException\nfrom mathesar.api.exceptions.mixins import MathesarErrorMessageMixin\nfrom mathesar.api.serializers.columns import SimpleColumnSerializer\nfrom mathesar.models import Table, DataFile\nfrom mathesar.utils.tables import gen_table_name, create_table_from_datafile, create_empty_table\n\n\nclass TableSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):\n columns = SimpleColumnSerializer(many=True, required=False)\n records_url = serializers.SerializerMethodField()\n constraints_url = serializers.SerializerMethodField()\n columns_url = serializers.SerializerMethodField()\n type_suggestions_url = serializers.SerializerMethodField()\n previews_url = serializers.SerializerMethodField()\n name = serializers.CharField(required=False, allow_blank=True, default='')\n data_files = serializers.PrimaryKeyRelatedField(\n required=False, many=True, queryset=DataFile.objects.all()\n )\n\n class Meta:\n model = Table\n fields = ['id', 'name', 'schema', 'created_at', 'updated_at', 'import_verified',\n 'columns', 'records_url', 'constraints_url', 'columns_url',\n 'type_suggestions_url', 'previews_url', 'data_files',\n 'has_dependencies']\n\n def get_records_url(self, obj):\n if isinstance(obj, Table):\n # Only get records if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-record-list', kwargs={'table_pk': obj.pk}))\n else:\n return None\n\n def get_constraints_url(self, obj):\n if isinstance(obj, Table):\n # Only get constraints if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-constraint-list', kwargs={'table_pk': obj.pk}))\n else:\n return None\n\n def get_columns_url(self, obj):\n if isinstance(obj, Table):\n # Only get columns if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-column-list', kwargs={'table_pk': obj.pk}))\n else:\n return None\n\n def get_type_suggestions_url(self, obj):\n if isinstance(obj, Table):\n # Only get type suggestions if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-type-suggestions', kwargs={'pk': obj.pk}))\n else:\n return None\n\n def get_previews_url(self, obj):\n if isinstance(obj, Table):\n # Only get previews if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-previews', kwargs={'pk': obj.pk}))\n else:\n return None\n\n def validate_data_files(self, data_files):\n if data_files and len(data_files) > 1:\n raise MultipleDataFileAPIException()\n return data_files\n\n def create(self, validated_data):\n schema = validated_data['schema']\n data_files = validated_data.get('data_files')\n name = validated_data.get('name') or gen_table_name(schema, data_files)\n\n try:\n if data_files:\n table = create_table_from_datafile(data_files, name, schema)\n else:\n table = create_empty_table(name, schema)\n except ProgrammingError as e:\n if type(e.orig) == DuplicateTable:\n raise DuplicateTableAPIException(\n e,\n message=f\"Relation {validated_data['name']} already exists in schema {schema.id}\",\n field=\"name\",\n status_code=status.HTTP_400_BAD_REQUEST\n )\n else:\n raise ProgrammingAPIException(e)\n return table\n\n\nclass TablePreviewSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n name = serializers.CharField(required=False)\n columns = SimpleColumnSerializer(many=True)\n\n def validate_columns(self, columns):\n table = self.context['table']\n column_names = [col[\"name\"] for col in columns]\n if not len(column_names) == len(set(column_names)):\n raise DistinctColumnRequiredAPIException()\n if not len(columns) == len(table.sa_columns):\n raise ColumnSizeMismatchAPIException()\n for column in columns:\n db_type_id = column['type']\n db_type = get_db_type_enum_from_id(db_type_id)\n if db_type is None:\n raise UnknownDatabaseTypeIdentifier(db_type_id=db_type_id)\n return columns\n", "path": "mathesar/api/serializers/tables.py"}, {"content": "from django_filters import rest_framework as filters\nfrom psycopg2.errors import InvalidTextRepresentation, CheckViolation\nfrom rest_framework import status, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.mixins import ListModelMixin, RetrieveModelMixin, CreateModelMixin\nfrom rest_framework.response import Response\nfrom sqlalchemy.exc import DataError, IntegrityError\n\nfrom mathesar.api.exceptions.database_exceptions import (\n exceptions as database_api_exceptions,\n base_exceptions as database_base_api_exceptions,\n)\nfrom mathesar.api.exceptions.generic_exceptions import base_exceptions as base_api_exceptions\nfrom db.types.exceptions import UnsupportedTypeException\nfrom mathesar.api.dj_filters import TableFilter\nfrom mathesar.api.pagination import DefaultLimitOffsetPagination\nfrom mathesar.api.serializers.tables import TableSerializer, TablePreviewSerializer\nfrom mathesar.models import Table\nfrom mathesar.utils.tables import (\n get_table_column_types\n)\n\n\nclass TableViewSet(CreateModelMixin, RetrieveModelMixin, ListModelMixin, viewsets.GenericViewSet):\n serializer_class = TableSerializer\n pagination_class = DefaultLimitOffsetPagination\n filter_backends = (filters.DjangoFilterBackend,)\n filterset_class = TableFilter\n\n def get_queryset(self):\n return Table.objects.all().order_by('-created_at')\n\n def partial_update(self, request, pk=None):\n serializer = TableSerializer(\n data=request.data, context={'request': request}, partial=True\n )\n serializer.is_valid(raise_exception=True)\n table = self.get_object()\n\n # Save the fields that are stored in the model.\n present_model_fields = []\n for model_field in table.MODEL_FIELDS:\n if model_field in serializer.validated_data:\n setattr(table, model_field, serializer.validated_data[model_field])\n present_model_fields.append(model_field)\n table.save(update_fields=present_model_fields)\n for key in present_model_fields:\n del serializer.validated_data[key]\n\n # Save the fields that are stored in the underlying DB.\n try:\n table.update_sa_table(serializer.validated_data)\n except ValueError as e:\n raise base_api_exceptions.ValueAPIException(e, status_code=status.HTTP_400_BAD_REQUEST)\n\n # Reload the table to avoid cached properties\n table = self.get_object()\n serializer = TableSerializer(table, context={'request': request})\n return Response(serializer.data)\n\n def destroy(self, request, pk=None):\n table = self.get_object()\n table.delete_sa_table()\n table.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n @action(methods=['get'], detail=True)\n def type_suggestions(self, request, pk=None):\n table = self.get_object()\n col_types = get_table_column_types(table)\n return Response(col_types)\n\n @action(methods=['post'], detail=True)\n def previews(self, request, pk=None):\n table = self.get_object()\n serializer = TablePreviewSerializer(data=request.data, context={\"request\": request, 'table': table})\n serializer.is_valid(raise_exception=True)\n columns_field_key = \"columns\"\n columns = serializer.data[columns_field_key]\n table_data = TableSerializer(table, context={\"request\": request}).data\n try:\n preview_records = table.get_preview(columns)\n except (DataError, IntegrityError) as e:\n if type(e.orig) == InvalidTextRepresentation or type(e.orig) == CheckViolation:\n raise database_api_exceptions.InvalidTypeCastAPIException(\n e,\n status_code=status.HTTP_400_BAD_REQUEST,\n field='columns'\n )\n else:\n raise database_base_api_exceptions.IntegrityAPIException(\n e,\n status_code=status.HTTP_400_BAD_REQUEST,\n field='columns'\n )\n except UnsupportedTypeException as e:\n raise database_api_exceptions.UnsupportedTypeAPIException(\n e,\n field='columns',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n table_data.update(\n {\n # There's no way to reflect actual column data without\n # creating a view, so we just use the submission, assuming\n # no errors means we changed to the desired names and types\n \"columns\": columns,\n \"records\": preview_records\n }\n )\n\n return Response(table_data)\n", "path": "mathesar/api/db/viewsets/tables.py"}], "after_files": [{"content": "from django.urls import reverse\nfrom psycopg2.errors import DuplicateTable\nfrom rest_framework import serializers, status\nfrom rest_framework.exceptions import ValidationError\nfrom sqlalchemy.exc import ProgrammingError\n\nfrom db.types.base import get_db_type_enum_from_id\n\nfrom mathesar.api.exceptions.validation_exceptions.exceptions import (\n ColumnSizeMismatchAPIException, DistinctColumnRequiredAPIException,\n MultipleDataFileAPIException, UnknownDatabaseTypeIdentifier\n)\nfrom mathesar.api.exceptions.database_exceptions.exceptions import DuplicateTableAPIException\nfrom mathesar.api.exceptions.database_exceptions.base_exceptions import ProgrammingAPIException\nfrom mathesar.api.exceptions.validation_exceptions import base_exceptions as base_validation_exceptions\nfrom mathesar.api.exceptions.generic_exceptions import base_exceptions as base_api_exceptions\nfrom mathesar.api.exceptions.mixins import MathesarErrorMessageMixin\nfrom mathesar.api.serializers.columns import SimpleColumnSerializer\nfrom mathesar.models import Table, DataFile\nfrom mathesar.utils.tables import gen_table_name, create_table_from_datafile, create_empty_table\n\n\nclass TableSerializer(MathesarErrorMessageMixin, serializers.ModelSerializer):\n columns = SimpleColumnSerializer(many=True, required=False)\n records_url = serializers.SerializerMethodField()\n constraints_url = serializers.SerializerMethodField()\n columns_url = serializers.SerializerMethodField()\n type_suggestions_url = serializers.SerializerMethodField()\n previews_url = serializers.SerializerMethodField()\n name = serializers.CharField(required=False, allow_blank=True, default='')\n data_files = serializers.PrimaryKeyRelatedField(\n required=False, many=True, queryset=DataFile.objects.all()\n )\n\n class Meta:\n model = Table\n fields = ['id', 'name', 'schema', 'created_at', 'updated_at', 'import_verified',\n 'columns', 'records_url', 'constraints_url', 'columns_url',\n 'type_suggestions_url', 'previews_url', 'data_files',\n 'has_dependencies']\n\n def get_records_url(self, obj):\n if isinstance(obj, Table):\n # Only get records if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-record-list', kwargs={'table_pk': obj.pk}))\n else:\n return None\n\n def get_constraints_url(self, obj):\n if isinstance(obj, Table):\n # Only get constraints if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-constraint-list', kwargs={'table_pk': obj.pk}))\n else:\n return None\n\n def get_columns_url(self, obj):\n if isinstance(obj, Table):\n # Only get columns if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-column-list', kwargs={'table_pk': obj.pk}))\n else:\n return None\n\n def get_type_suggestions_url(self, obj):\n if isinstance(obj, Table):\n # Only get type suggestions if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-type-suggestions', kwargs={'pk': obj.pk}))\n else:\n return None\n\n def get_previews_url(self, obj):\n if isinstance(obj, Table):\n # Only get previews if we are serializing an existing table\n request = self.context['request']\n return request.build_absolute_uri(reverse('table-previews', kwargs={'pk': obj.pk}))\n else:\n return None\n\n def validate_data_files(self, data_files):\n if data_files and len(data_files) > 1:\n raise MultipleDataFileAPIException()\n return data_files\n\n def create(self, validated_data):\n schema = validated_data['schema']\n data_files = validated_data.get('data_files')\n name = validated_data.get('name') or gen_table_name(schema, data_files)\n\n try:\n if data_files:\n table = create_table_from_datafile(data_files, name, schema)\n else:\n table = create_empty_table(name, schema)\n except ProgrammingError as e:\n if type(e.orig) == DuplicateTable:\n raise DuplicateTableAPIException(\n e,\n message=f\"Relation {validated_data['name']} already exists in schema {schema.id}\",\n field=\"name\",\n status_code=status.HTTP_400_BAD_REQUEST\n )\n else:\n raise ProgrammingAPIException(e)\n return table\n\n def update(self, instance, validated_data):\n if self.partial:\n # Save the fields that are stored in the model.\n present_model_fields = []\n for model_field in instance.MODEL_FIELDS:\n if model_field in validated_data:\n setattr(instance, model_field, validated_data[model_field])\n present_model_fields.append(model_field)\n instance.save(update_fields=present_model_fields)\n for key in present_model_fields:\n del validated_data[key]\n # Save the fields that are stored in the underlying DB.\n try:\n instance.update_sa_table(validated_data)\n except ValueError as e:\n raise base_api_exceptions.ValueAPIException(e, status_code=status.HTTP_400_BAD_REQUEST)\n return instance\n\n def validate(self, data):\n if self.partial:\n columns = data.get('columns', None)\n if columns is not None:\n for col in columns:\n id = col.get('id', None)\n if id is None:\n message = \"'id' field is required while batch updating columns.\"\n raise base_validation_exceptions.MathesarValidationException(ValidationError, message=message)\n return data\n\n\nclass TablePreviewSerializer(MathesarErrorMessageMixin, serializers.Serializer):\n name = serializers.CharField(required=False)\n columns = SimpleColumnSerializer(many=True)\n\n def validate_columns(self, columns):\n table = self.context['table']\n column_names = [col[\"name\"] for col in columns]\n if not len(column_names) == len(set(column_names)):\n raise DistinctColumnRequiredAPIException()\n if not len(columns) == len(table.sa_columns):\n raise ColumnSizeMismatchAPIException()\n for column in columns:\n db_type_id = column['type']\n db_type = get_db_type_enum_from_id(db_type_id)\n if db_type is None:\n raise UnknownDatabaseTypeIdentifier(db_type_id=db_type_id)\n return columns\n", "path": "mathesar/api/serializers/tables.py"}, {"content": "from django_filters import rest_framework as filters\nfrom psycopg2.errors import InvalidTextRepresentation, CheckViolation\nfrom rest_framework import status, viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.mixins import ListModelMixin, RetrieveModelMixin, CreateModelMixin\nfrom rest_framework.response import Response\nfrom sqlalchemy.exc import DataError, IntegrityError\n\nfrom mathesar.api.exceptions.database_exceptions import (\n exceptions as database_api_exceptions,\n base_exceptions as database_base_api_exceptions,\n)\nfrom db.types.exceptions import UnsupportedTypeException\nfrom mathesar.api.dj_filters import TableFilter\nfrom mathesar.api.pagination import DefaultLimitOffsetPagination\nfrom mathesar.api.serializers.tables import TableSerializer, TablePreviewSerializer\nfrom mathesar.models import Table\nfrom mathesar.utils.tables import (\n get_table_column_types\n)\n\n\nclass TableViewSet(CreateModelMixin, RetrieveModelMixin, ListModelMixin, viewsets.GenericViewSet):\n serializer_class = TableSerializer\n pagination_class = DefaultLimitOffsetPagination\n filter_backends = (filters.DjangoFilterBackend,)\n filterset_class = TableFilter\n\n def get_queryset(self):\n return Table.objects.all().order_by('-created_at')\n\n def partial_update(self, request, pk=None):\n table = self.get_object()\n serializer = TableSerializer(\n table, data=request.data, context={'request': request}, partial=True\n )\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n # Reload the table to avoid cached properties\n table = self.get_object()\n serializer = TableSerializer(table, context={'request': request})\n return Response(serializer.data)\n\n def destroy(self, request, pk=None):\n table = self.get_object()\n table.delete_sa_table()\n table.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n @action(methods=['get'], detail=True)\n def type_suggestions(self, request, pk=None):\n table = self.get_object()\n col_types = get_table_column_types(table)\n return Response(col_types)\n\n @action(methods=['post'], detail=True)\n def previews(self, request, pk=None):\n table = self.get_object()\n serializer = TablePreviewSerializer(data=request.data, context={\"request\": request, 'table': table})\n serializer.is_valid(raise_exception=True)\n columns_field_key = \"columns\"\n columns = serializer.data[columns_field_key]\n table_data = TableSerializer(table, context={\"request\": request}).data\n try:\n preview_records = table.get_preview(columns)\n except (DataError, IntegrityError) as e:\n if type(e.orig) == InvalidTextRepresentation or type(e.orig) == CheckViolation:\n raise database_api_exceptions.InvalidTypeCastAPIException(\n e,\n status_code=status.HTTP_400_BAD_REQUEST,\n field='columns'\n )\n else:\n raise database_base_api_exceptions.IntegrityAPIException(\n e,\n status_code=status.HTTP_400_BAD_REQUEST,\n field='columns'\n )\n except UnsupportedTypeException as e:\n raise database_api_exceptions.UnsupportedTypeAPIException(\n e,\n field='columns',\n status_code=status.HTTP_400_BAD_REQUEST\n )\n table_data.update(\n {\n # There's no way to reflect actual column data without\n # creating a view, so we just use the submission, assuming\n # no errors means we changed to the desired names and types\n \"columns\": columns,\n \"records\": preview_records\n }\n )\n\n return Response(table_data)\n", "path": "mathesar/api/db/viewsets/tables.py"}]} | 3,003 | 928 |
gh_patches_debug_8203 | rasdani/github-patches | git_diff | beeware__toga-2049 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
iOS App has no attribute 'hide_cursor'
**Describe the bug**
Calling the method `hide_cursor()` on a `toga.App` causes the app to crash with:
```
2021-03-28 13:35:22.700314-0700 Find [938:323815] AttributeError: 'App' object has no attribute 'hide_cursor'
```
**To Reproduce**
Steps to reproduce the behavior:
1. Create a simple iOS App
2. inside the main method of your application class (derived from `toga.App`) call: `self.hide_cursor()`
3. App crashes with stacktrace
4. See error
```
Error in long running handler: 'App' object has no attribute 'hide_cursor'
Traceback (most recent call last):
File "/private/var/containers/Bundle/Application/75DCDE12-8E7D-4D0A-97D1-B999813DBD9B/Find.app/Library/Application Support/app_packages/toga/handlers.py", line 13, in long_running_task
delay = next(generator)
File "/private/var/containers/Bundle/Application/75DCDE12-8E7D-4D0A-97D1-B999813DBD9B/Find.app/Library/Application Support/app/find/app.py", line 84, in select_distance_task
self.hide_cursor()
File "/private/var/containers/Bundle/Application/75DCDE12-8E7D-4D0A-97D1-B999813DBD9B/Find.app/Library/Application Support/app_packages/toga/app.py", line 412, in hide_cursor
self._impl.hide_cursor()
AttributeError: 'App' object has no attribute 'hide_cursor'
```
**Expected behavior**
The method is documented [here](https://toga.readthedocs.io/en/latest/reference/api/app.html#toga.app.App.hide_cursor)
Not sure what to expect, really, (the description, rather tautologically, says "hides cursors") but at the very least not to crash?
What I was really trying to achieve was to hide the keyboard from view (also because the `focus()` method on non-input fields seems to be a no-op?).
**Screenshots**
If applicable, add screenshots to help explain your problem.
**Environment:**
- Operating System: iOS 14.4
- Python version: 3.8
- Software versions:
toga==0.2.15
toga-cocoa==0.2.15
toga-core==0.2.15
**Additional context**
It would be good to have a way to make the "soft" keyboard disappear in iOS, as it obscures half the screen, and Toga does not appear to have a means to achieve that.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `iOS/src/toga_iOS/app.py`
Content:
```
1 import asyncio
2
3 from rubicon.objc import objc_method
4 from rubicon.objc.eventloop import EventLoopPolicy, iOSLifecycle
5
6 from toga_iOS.libs import UIResponder
7 from toga_iOS.window import Window
8
9
10 class MainWindow(Window):
11 pass
12
13
14 class PythonAppDelegate(UIResponder):
15 @objc_method
16 def applicationDidBecomeActive_(self, application) -> None:
17 print("App became active.")
18
19 @objc_method
20 def applicationWillResignActive_(self, application) -> None:
21 print("App about to leave foreground.", flush=True)
22
23 @objc_method
24 def applicationDidEnterBackground_(self, application) -> None:
25 print("App entered background.")
26
27 @objc_method
28 def applicationWillEnterForeground_(self, application) -> None:
29 print("App about to enter foreground.")
30
31 @objc_method
32 def application_didFinishLaunchingWithOptions_(
33 self, application, launchOptions
34 ) -> bool:
35 print("App finished launching.")
36 App.app.native = application
37 App.app.create()
38 return True
39
40 @objc_method
41 def applicationWillTerminate_(self, application) -> None:
42 print("App about to Terminate.")
43
44 @objc_method
45 def application_didChangeStatusBarOrientation_(
46 self, application, oldStatusBarOrientation: int
47 ) -> None:
48 """This callback is invoked when rotating the device from landscape to portrait
49 and vice versa."""
50 App.app.interface.main_window.content.refresh()
51
52
53 class App:
54 def __init__(self, interface):
55 self.interface = interface
56 self.interface._impl = self
57 # Native instance doesn't exist until the lifecycle completes.
58 self.native = None
59
60 # Add a reference for the PythonAppDelegate class to use.
61 App.app = self
62
63 asyncio.set_event_loop_policy(EventLoopPolicy())
64 self.loop = asyncio.new_event_loop()
65
66 def create(self):
67 """Calls the startup method on the interface."""
68 self.interface._startup()
69
70 def open_document(self, fileURL):
71 """Add a new document to this app."""
72 pass
73
74 def main_loop(self):
75 # Main loop is non-blocking on iOS. The app loop is integrated with the
76 # main iOS event loop, so this call will return; however, it will leave
77 # the app in a state such that asyncio events will be scheduled on the
78 # iOS event loop.
79 self.loop.run_forever_cooperatively(lifecycle=iOSLifecycle())
80
81 def set_main_window(self, window):
82 pass
83
84 def show_about_dialog(self):
85 self.interface.factory.not_implemented("App.show_about_dialog()")
86
87 def beep(self):
88 self.interface.factory.not_implemented("App.beep()")
89
90 def exit(self):
91 pass
92
```
Path: `android/src/toga_android/app.py`
Content:
```
1 import asyncio
2
3 from rubicon.java import android_events
4
5 import toga
6 from toga.command import Group
7
8 from .libs.activity import IPythonApp, MainActivity
9 from .libs.android.graphics import Drawable
10 from .libs.android.view import Menu, MenuItem
11 from .window import Window
12
13 # `MainWindow` is defined here in `app.py`, not `window.py`, to mollify the test suite.
14 MainWindow = Window
15
16
17 class TogaApp(IPythonApp):
18 last_intent_requestcode = (
19 -1
20 ) # always increment before using it for invoking new Intents
21 running_intents = {} # dictionary for currently running Intents
22 menuitem_mapping = {} # dictionary for mapping menuitems to commands
23
24 def __init__(self, app):
25 super().__init__()
26 self._impl = app
27 MainActivity.setPythonApp(self)
28 print("Python app launched & stored in Android Activity class")
29
30 def onCreate(self):
31 print("Toga app: onCreate")
32
33 def onStart(self):
34 print("Toga app: onStart")
35
36 def onResume(self):
37 print("Toga app: onResume")
38
39 def onPause(self):
40 print("Toga app: onPause")
41
42 def onStop(self):
43 print("Toga app: onStop")
44
45 def onDestroy(self):
46 print("Toga app: onDestroy")
47
48 def onRestart(self):
49 print("Toga app: onRestart")
50
51 def onActivityResult(self, requestCode, resultCode, resultData):
52 """Callback method, called from MainActivity when an Intent ends.
53
54 :param int requestCode: The integer request code originally supplied to startActivityForResult(),
55 allowing you to identify who this result came from.
56 :param int resultCode: The integer result code returned by the child activity through its setResult().
57 :param Intent resultData: An Intent, which can return result data to the caller (various data can be attached
58 to Intent "extras").
59 """
60 print(
61 f"Toga app: onActivityResult, requestCode={requestCode}, resultData={resultData}"
62 )
63 try:
64 # remove Intent from the list of running Intents,
65 # and set the result of the intent.
66 result_future = self.running_intents.pop(requestCode)
67 result_future.set_result(
68 {"resultCode": resultCode, "resultData": resultData}
69 )
70 except KeyError:
71 print("No intent matching request code {requestCode}")
72
73 def onConfigurationChanged(self, new_config):
74 pass
75
76 def onOptionsItemSelected(self, menuitem):
77 consumed = False
78 try:
79 cmd = self.menuitem_mapping[menuitem.getItemId()]
80 consumed = True
81 if cmd.action is not None:
82 cmd.action(menuitem)
83 except KeyError:
84 print("menu item id not found in menuitem_mapping dictionary!")
85 return consumed
86
87 def onPrepareOptionsMenu(self, menu):
88 menu.clear()
89 itemid = 0
90 menulist = {} # dictionary with all menus
91 self.menuitem_mapping.clear()
92
93 # create option menu
94 for cmd in self._impl.interface.commands:
95 if cmd == toga.SECTION_BREAK or cmd == toga.GROUP_BREAK:
96 continue
97 if cmd in self._impl.interface.main_window.toolbar:
98 continue # do not show toolbar commands in the option menu (except when overflowing)
99
100 grouppath = cmd.group.path
101 if grouppath[0] != Group.COMMANDS:
102 # only the Commands group (and its subgroups) are supported
103 # other groups should eventually go into the navigation drawer
104 continue
105 if cmd.group.key in menulist:
106 menugroup = menulist[cmd.group.key]
107 else:
108 # create all missing submenus
109 parentmenu = menu
110 for group in grouppath:
111 groupkey = group.key
112 if groupkey in menulist:
113 menugroup = menulist[groupkey]
114 else:
115 if group.text == toga.Group.COMMANDS.text:
116 menulist[groupkey] = menu
117 menugroup = menu
118 else:
119 itemid += 1
120 order = Menu.NONE if group.order is None else group.order
121 menugroup = parentmenu.addSubMenu(
122 Menu.NONE, itemid, order, group.text
123 ) # groupId, itemId, order, title
124 menulist[groupkey] = menugroup
125 parentmenu = menugroup
126 # create menu item
127 itemid += 1
128 order = Menu.NONE if cmd.order is None else cmd.order
129 menuitem = menugroup.add(
130 Menu.NONE, itemid, order, cmd.text
131 ) # groupId, itemId, order, title
132 menuitem.setShowAsActionFlags(MenuItem.SHOW_AS_ACTION_NEVER)
133 menuitem.setEnabled(cmd.enabled)
134 self.menuitem_mapping[
135 itemid
136 ] = cmd # store itemid for use in onOptionsItemSelected
137
138 # create toolbar actions
139 if self._impl.interface.main_window:
140 for cmd in self._impl.interface.main_window.toolbar:
141 if cmd == toga.SECTION_BREAK or cmd == toga.GROUP_BREAK:
142 continue
143 itemid += 1
144 order = Menu.NONE if cmd.order is None else cmd.order
145 menuitem = menu.add(
146 Menu.NONE, itemid, order, cmd.text
147 ) # groupId, itemId, order, title
148 menuitem.setShowAsActionFlags(
149 MenuItem.SHOW_AS_ACTION_IF_ROOM
150 ) # toolbar button / item in options menu on overflow
151 menuitem.setEnabled(cmd.enabled)
152 if cmd.icon:
153 icon = Drawable.createFromPath(str(cmd.icon._impl.path))
154 if icon:
155 menuitem.setIcon(icon)
156 else:
157 print("Could not create icon: " + str(cmd.icon._impl.path))
158 self.menuitem_mapping[
159 itemid
160 ] = cmd # store itemid for use in onOptionsItemSelected
161
162 return True
163
164 @property
165 def native(self):
166 # We access `MainActivity.singletonThis` freshly each time, rather than
167 # storing a reference in `__init__()`, because it's not safe to use the
168 # same reference over time because `rubicon-java` creates a JNI local
169 # reference.
170 return MainActivity.singletonThis
171
172
173 class App:
174 def __init__(self, interface):
175 self.interface = interface
176 self.interface._impl = self
177 self._listener = None
178
179 self.loop = android_events.AndroidEventLoop()
180
181 @property
182 def native(self):
183 return self._listener.native if self._listener else None
184
185 def create(self):
186 # The `_listener` listens for activity event callbacks. For simplicity,
187 # the app's `.native` is the listener's native Java class.
188 self._listener = TogaApp(self)
189 # Call user code to populate the main window
190 self.interface._startup()
191
192 def open_document(self, fileURL):
193 print("Can't open document %s (yet)" % fileURL)
194
195 def main_loop(self):
196 # In order to support user asyncio code, start the Python/Android cooperative event loop.
197 self.loop.run_forever_cooperatively()
198
199 # On Android, Toga UI integrates automatically into the main Android event loop by virtue
200 # of the Android Activity system.
201 self.create()
202
203 def set_main_window(self, window):
204 pass
205
206 def show_about_dialog(self):
207 self.interface.factory.not_implemented("App.show_about_dialog()")
208
209 def beep(self):
210 self.interface.factory.not_implemented("App.beep()")
211
212 def exit(self):
213 pass
214
215 async def intent_result(self, intent):
216 """Calls an Intent and waits for its result.
217
218 A RuntimeError will be raised when the Intent cannot be invoked.
219
220 :param Intent intent: The Intent to call
221 :returns: A Dictionary containing "resultCode" (int) and "resultData" (Intent or None)
222 :rtype: dict
223 """
224 try:
225 self._listener.last_intent_requestcode += 1
226 code = self._listener.last_intent_requestcode
227
228 result_future = asyncio.Future()
229 self._listener.running_intents[code] = result_future
230
231 self.native.startActivityForResult(intent, code)
232 await result_future
233 return result_future.result()
234 except AttributeError:
235 raise RuntimeError("No appropriate Activity found to handle this intent.")
236
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/android/src/toga_android/app.py b/android/src/toga_android/app.py
--- a/android/src/toga_android/app.py
+++ b/android/src/toga_android/app.py
@@ -233,3 +233,9 @@
return result_future.result()
except AttributeError:
raise RuntimeError("No appropriate Activity found to handle this intent.")
+
+ def hide_cursor(self):
+ pass
+
+ def show_cursor(self):
+ pass
diff --git a/iOS/src/toga_iOS/app.py b/iOS/src/toga_iOS/app.py
--- a/iOS/src/toga_iOS/app.py
+++ b/iOS/src/toga_iOS/app.py
@@ -89,3 +89,9 @@
def exit(self):
pass
+
+ def hide_cursor(self):
+ pass
+
+ def show_cursor(self):
+ pass
| {"golden_diff": "diff --git a/android/src/toga_android/app.py b/android/src/toga_android/app.py\n--- a/android/src/toga_android/app.py\n+++ b/android/src/toga_android/app.py\n@@ -233,3 +233,9 @@\n return result_future.result()\n except AttributeError:\n raise RuntimeError(\"No appropriate Activity found to handle this intent.\")\n+\n+ def hide_cursor(self):\n+ pass\n+\n+ def show_cursor(self):\n+ pass\ndiff --git a/iOS/src/toga_iOS/app.py b/iOS/src/toga_iOS/app.py\n--- a/iOS/src/toga_iOS/app.py\n+++ b/iOS/src/toga_iOS/app.py\n@@ -89,3 +89,9 @@\n \n def exit(self):\n pass\n+\n+ def hide_cursor(self):\n+ pass\n+\n+ def show_cursor(self):\n+ pass\n", "issue": "iOS App has no attribute 'hide_cursor'\n**Describe the bug**\r\nCalling the method `hide_cursor()` on a `toga.App` causes the app to crash with:\r\n```\r\n2021-03-28 13:35:22.700314-0700 Find [938:323815] AttributeError: 'App' object has no attribute 'hide_cursor'\r\n```\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Create a simple iOS App\r\n2. inside the main method of your application class (derived from `toga.App`) call: `self.hide_cursor()`\r\n3. App crashes with stacktrace\r\n4. See error\r\n\r\n```\r\n Error in long running handler: 'App' object has no attribute 'hide_cursor'\r\n Traceback (most recent call last):\r\n File \"/private/var/containers/Bundle/Application/75DCDE12-8E7D-4D0A-97D1-B999813DBD9B/Find.app/Library/Application Support/app_packages/toga/handlers.py\", line 13, in long_running_task\r\n delay = next(generator)\r\n File \"/private/var/containers/Bundle/Application/75DCDE12-8E7D-4D0A-97D1-B999813DBD9B/Find.app/Library/Application Support/app/find/app.py\", line 84, in select_distance_task\r\n self.hide_cursor()\r\n File \"/private/var/containers/Bundle/Application/75DCDE12-8E7D-4D0A-97D1-B999813DBD9B/Find.app/Library/Application Support/app_packages/toga/app.py\", line 412, in hide_cursor\r\n self._impl.hide_cursor()\r\n AttributeError: 'App' object has no attribute 'hide_cursor'\r\n```\r\n\r\n**Expected behavior**\r\nThe method is documented [here](https://toga.readthedocs.io/en/latest/reference/api/app.html#toga.app.App.hide_cursor)\r\n\r\nNot sure what to expect, really, (the description, rather tautologically, says \"hides cursors\") but at the very least not to crash?\r\n\r\nWhat I was really trying to achieve was to hide the keyboard from view (also because the `focus()` method on non-input fields seems to be a no-op?).\r\n\r\n**Screenshots**\r\nIf applicable, add screenshots to help explain your problem.\r\n\r\n**Environment:**\r\n - Operating System: iOS 14.4\r\n - Python version: 3.8\r\n - Software versions:\r\n toga==0.2.15\r\ntoga-cocoa==0.2.15\r\ntoga-core==0.2.15\r\n\r\n**Additional context**\r\n\r\nIt would be good to have a way to make the \"soft\" keyboard disappear in iOS, as it obscures half the screen, and Toga does not appear to have a means to achieve that.\r\n\n", "before_files": [{"content": "import asyncio\n\nfrom rubicon.objc import objc_method\nfrom rubicon.objc.eventloop import EventLoopPolicy, iOSLifecycle\n\nfrom toga_iOS.libs import UIResponder\nfrom toga_iOS.window import Window\n\n\nclass MainWindow(Window):\n pass\n\n\nclass PythonAppDelegate(UIResponder):\n @objc_method\n def applicationDidBecomeActive_(self, application) -> None:\n print(\"App became active.\")\n\n @objc_method\n def applicationWillResignActive_(self, application) -> None:\n print(\"App about to leave foreground.\", flush=True)\n\n @objc_method\n def applicationDidEnterBackground_(self, application) -> None:\n print(\"App entered background.\")\n\n @objc_method\n def applicationWillEnterForeground_(self, application) -> None:\n print(\"App about to enter foreground.\")\n\n @objc_method\n def application_didFinishLaunchingWithOptions_(\n self, application, launchOptions\n ) -> bool:\n print(\"App finished launching.\")\n App.app.native = application\n App.app.create()\n return True\n\n @objc_method\n def applicationWillTerminate_(self, application) -> None:\n print(\"App about to Terminate.\")\n\n @objc_method\n def application_didChangeStatusBarOrientation_(\n self, application, oldStatusBarOrientation: int\n ) -> None:\n \"\"\"This callback is invoked when rotating the device from landscape to portrait\n and vice versa.\"\"\"\n App.app.interface.main_window.content.refresh()\n\n\nclass App:\n def __init__(self, interface):\n self.interface = interface\n self.interface._impl = self\n # Native instance doesn't exist until the lifecycle completes.\n self.native = None\n\n # Add a reference for the PythonAppDelegate class to use.\n App.app = self\n\n asyncio.set_event_loop_policy(EventLoopPolicy())\n self.loop = asyncio.new_event_loop()\n\n def create(self):\n \"\"\"Calls the startup method on the interface.\"\"\"\n self.interface._startup()\n\n def open_document(self, fileURL):\n \"\"\"Add a new document to this app.\"\"\"\n pass\n\n def main_loop(self):\n # Main loop is non-blocking on iOS. The app loop is integrated with the\n # main iOS event loop, so this call will return; however, it will leave\n # the app in a state such that asyncio events will be scheduled on the\n # iOS event loop.\n self.loop.run_forever_cooperatively(lifecycle=iOSLifecycle())\n\n def set_main_window(self, window):\n pass\n\n def show_about_dialog(self):\n self.interface.factory.not_implemented(\"App.show_about_dialog()\")\n\n def beep(self):\n self.interface.factory.not_implemented(\"App.beep()\")\n\n def exit(self):\n pass\n", "path": "iOS/src/toga_iOS/app.py"}, {"content": "import asyncio\n\nfrom rubicon.java import android_events\n\nimport toga\nfrom toga.command import Group\n\nfrom .libs.activity import IPythonApp, MainActivity\nfrom .libs.android.graphics import Drawable\nfrom .libs.android.view import Menu, MenuItem\nfrom .window import Window\n\n# `MainWindow` is defined here in `app.py`, not `window.py`, to mollify the test suite.\nMainWindow = Window\n\n\nclass TogaApp(IPythonApp):\n last_intent_requestcode = (\n -1\n ) # always increment before using it for invoking new Intents\n running_intents = {} # dictionary for currently running Intents\n menuitem_mapping = {} # dictionary for mapping menuitems to commands\n\n def __init__(self, app):\n super().__init__()\n self._impl = app\n MainActivity.setPythonApp(self)\n print(\"Python app launched & stored in Android Activity class\")\n\n def onCreate(self):\n print(\"Toga app: onCreate\")\n\n def onStart(self):\n print(\"Toga app: onStart\")\n\n def onResume(self):\n print(\"Toga app: onResume\")\n\n def onPause(self):\n print(\"Toga app: onPause\")\n\n def onStop(self):\n print(\"Toga app: onStop\")\n\n def onDestroy(self):\n print(\"Toga app: onDestroy\")\n\n def onRestart(self):\n print(\"Toga app: onRestart\")\n\n def onActivityResult(self, requestCode, resultCode, resultData):\n \"\"\"Callback method, called from MainActivity when an Intent ends.\n\n :param int requestCode: The integer request code originally supplied to startActivityForResult(),\n allowing you to identify who this result came from.\n :param int resultCode: The integer result code returned by the child activity through its setResult().\n :param Intent resultData: An Intent, which can return result data to the caller (various data can be attached\n to Intent \"extras\").\n \"\"\"\n print(\n f\"Toga app: onActivityResult, requestCode={requestCode}, resultData={resultData}\"\n )\n try:\n # remove Intent from the list of running Intents,\n # and set the result of the intent.\n result_future = self.running_intents.pop(requestCode)\n result_future.set_result(\n {\"resultCode\": resultCode, \"resultData\": resultData}\n )\n except KeyError:\n print(\"No intent matching request code {requestCode}\")\n\n def onConfigurationChanged(self, new_config):\n pass\n\n def onOptionsItemSelected(self, menuitem):\n consumed = False\n try:\n cmd = self.menuitem_mapping[menuitem.getItemId()]\n consumed = True\n if cmd.action is not None:\n cmd.action(menuitem)\n except KeyError:\n print(\"menu item id not found in menuitem_mapping dictionary!\")\n return consumed\n\n def onPrepareOptionsMenu(self, menu):\n menu.clear()\n itemid = 0\n menulist = {} # dictionary with all menus\n self.menuitem_mapping.clear()\n\n # create option menu\n for cmd in self._impl.interface.commands:\n if cmd == toga.SECTION_BREAK or cmd == toga.GROUP_BREAK:\n continue\n if cmd in self._impl.interface.main_window.toolbar:\n continue # do not show toolbar commands in the option menu (except when overflowing)\n\n grouppath = cmd.group.path\n if grouppath[0] != Group.COMMANDS:\n # only the Commands group (and its subgroups) are supported\n # other groups should eventually go into the navigation drawer\n continue\n if cmd.group.key in menulist:\n menugroup = menulist[cmd.group.key]\n else:\n # create all missing submenus\n parentmenu = menu\n for group in grouppath:\n groupkey = group.key\n if groupkey in menulist:\n menugroup = menulist[groupkey]\n else:\n if group.text == toga.Group.COMMANDS.text:\n menulist[groupkey] = menu\n menugroup = menu\n else:\n itemid += 1\n order = Menu.NONE if group.order is None else group.order\n menugroup = parentmenu.addSubMenu(\n Menu.NONE, itemid, order, group.text\n ) # groupId, itemId, order, title\n menulist[groupkey] = menugroup\n parentmenu = menugroup\n # create menu item\n itemid += 1\n order = Menu.NONE if cmd.order is None else cmd.order\n menuitem = menugroup.add(\n Menu.NONE, itemid, order, cmd.text\n ) # groupId, itemId, order, title\n menuitem.setShowAsActionFlags(MenuItem.SHOW_AS_ACTION_NEVER)\n menuitem.setEnabled(cmd.enabled)\n self.menuitem_mapping[\n itemid\n ] = cmd # store itemid for use in onOptionsItemSelected\n\n # create toolbar actions\n if self._impl.interface.main_window:\n for cmd in self._impl.interface.main_window.toolbar:\n if cmd == toga.SECTION_BREAK or cmd == toga.GROUP_BREAK:\n continue\n itemid += 1\n order = Menu.NONE if cmd.order is None else cmd.order\n menuitem = menu.add(\n Menu.NONE, itemid, order, cmd.text\n ) # groupId, itemId, order, title\n menuitem.setShowAsActionFlags(\n MenuItem.SHOW_AS_ACTION_IF_ROOM\n ) # toolbar button / item in options menu on overflow\n menuitem.setEnabled(cmd.enabled)\n if cmd.icon:\n icon = Drawable.createFromPath(str(cmd.icon._impl.path))\n if icon:\n menuitem.setIcon(icon)\n else:\n print(\"Could not create icon: \" + str(cmd.icon._impl.path))\n self.menuitem_mapping[\n itemid\n ] = cmd # store itemid for use in onOptionsItemSelected\n\n return True\n\n @property\n def native(self):\n # We access `MainActivity.singletonThis` freshly each time, rather than\n # storing a reference in `__init__()`, because it's not safe to use the\n # same reference over time because `rubicon-java` creates a JNI local\n # reference.\n return MainActivity.singletonThis\n\n\nclass App:\n def __init__(self, interface):\n self.interface = interface\n self.interface._impl = self\n self._listener = None\n\n self.loop = android_events.AndroidEventLoop()\n\n @property\n def native(self):\n return self._listener.native if self._listener else None\n\n def create(self):\n # The `_listener` listens for activity event callbacks. For simplicity,\n # the app's `.native` is the listener's native Java class.\n self._listener = TogaApp(self)\n # Call user code to populate the main window\n self.interface._startup()\n\n def open_document(self, fileURL):\n print(\"Can't open document %s (yet)\" % fileURL)\n\n def main_loop(self):\n # In order to support user asyncio code, start the Python/Android cooperative event loop.\n self.loop.run_forever_cooperatively()\n\n # On Android, Toga UI integrates automatically into the main Android event loop by virtue\n # of the Android Activity system.\n self.create()\n\n def set_main_window(self, window):\n pass\n\n def show_about_dialog(self):\n self.interface.factory.not_implemented(\"App.show_about_dialog()\")\n\n def beep(self):\n self.interface.factory.not_implemented(\"App.beep()\")\n\n def exit(self):\n pass\n\n async def intent_result(self, intent):\n \"\"\"Calls an Intent and waits for its result.\n\n A RuntimeError will be raised when the Intent cannot be invoked.\n\n :param Intent intent: The Intent to call\n :returns: A Dictionary containing \"resultCode\" (int) and \"resultData\" (Intent or None)\n :rtype: dict\n \"\"\"\n try:\n self._listener.last_intent_requestcode += 1\n code = self._listener.last_intent_requestcode\n\n result_future = asyncio.Future()\n self._listener.running_intents[code] = result_future\n\n self.native.startActivityForResult(intent, code)\n await result_future\n return result_future.result()\n except AttributeError:\n raise RuntimeError(\"No appropriate Activity found to handle this intent.\")\n", "path": "android/src/toga_android/app.py"}], "after_files": [{"content": "import asyncio\n\nfrom rubicon.objc import objc_method\nfrom rubicon.objc.eventloop import EventLoopPolicy, iOSLifecycle\n\nfrom toga_iOS.libs import UIResponder\nfrom toga_iOS.window import Window\n\n\nclass MainWindow(Window):\n pass\n\n\nclass PythonAppDelegate(UIResponder):\n @objc_method\n def applicationDidBecomeActive_(self, application) -> None:\n print(\"App became active.\")\n\n @objc_method\n def applicationWillResignActive_(self, application) -> None:\n print(\"App about to leave foreground.\", flush=True)\n\n @objc_method\n def applicationDidEnterBackground_(self, application) -> None:\n print(\"App entered background.\")\n\n @objc_method\n def applicationWillEnterForeground_(self, application) -> None:\n print(\"App about to enter foreground.\")\n\n @objc_method\n def application_didFinishLaunchingWithOptions_(\n self, application, launchOptions\n ) -> bool:\n print(\"App finished launching.\")\n App.app.native = application\n App.app.create()\n return True\n\n @objc_method\n def applicationWillTerminate_(self, application) -> None:\n print(\"App about to Terminate.\")\n\n @objc_method\n def application_didChangeStatusBarOrientation_(\n self, application, oldStatusBarOrientation: int\n ) -> None:\n \"\"\"This callback is invoked when rotating the device from landscape to portrait\n and vice versa.\"\"\"\n App.app.interface.main_window.content.refresh()\n\n\nclass App:\n def __init__(self, interface):\n self.interface = interface\n self.interface._impl = self\n # Native instance doesn't exist until the lifecycle completes.\n self.native = None\n\n # Add a reference for the PythonAppDelegate class to use.\n App.app = self\n\n asyncio.set_event_loop_policy(EventLoopPolicy())\n self.loop = asyncio.new_event_loop()\n\n def create(self):\n \"\"\"Calls the startup method on the interface.\"\"\"\n self.interface.startup()\n\n def open_document(self, fileURL):\n \"\"\"Add a new document to this app.\"\"\"\n pass\n\n def main_loop(self):\n # Main loop is non-blocking on iOS. The app loop is integrated with the\n # main iOS event loop, so this call will return; however, it will leave\n # the app in a state such that asyncio events will be scheduled on the\n # iOS event loop.\n self.loop.run_forever_cooperatively(lifecycle=iOSLifecycle())\n\n def set_main_window(self, window):\n pass\n\n def show_about_dialog(self):\n self.interface.factory.not_implemented(\"App.show_about_dialog()\")\n\n def beep(self):\n self.interface.factory.not_implemented(\"App.beep()\")\n\n def exit(self):\n pass\n\n def hide_cursor(self):\n pass\n\n def show_cursor(self):\n pass\n", "path": "iOS/src/toga_iOS/app.py"}, {"content": "import asyncio\n\nfrom rubicon.java import android_events\n\nimport toga\nfrom toga.command import Group\n\nfrom .libs.activity import IPythonApp, MainActivity\nfrom .libs.android.graphics import Drawable\nfrom .libs.android.view import Menu, MenuItem\nfrom .window import Window\n\n# `MainWindow` is defined here in `app.py`, not `window.py`, to mollify the test suite.\nMainWindow = Window\n\n\nclass TogaApp(IPythonApp):\n last_intent_requestcode = (\n -1\n ) # always increment before using it for invoking new Intents\n running_intents = {} # dictionary for currently running Intents\n menuitem_mapping = {} # dictionary for mapping menuitems to commands\n\n def __init__(self, app):\n super().__init__()\n self._impl = app\n MainActivity.setPythonApp(self)\n print(\"Python app launched & stored in Android Activity class\")\n\n def onCreate(self):\n print(\"Toga app: onCreate\")\n\n def onStart(self):\n print(\"Toga app: onStart\")\n\n def onResume(self):\n print(\"Toga app: onResume\")\n\n def onPause(self):\n print(\"Toga app: onPause\")\n\n def onStop(self):\n print(\"Toga app: onStop\")\n\n def onDestroy(self):\n print(\"Toga app: onDestroy\")\n\n def onRestart(self):\n print(\"Toga app: onRestart\")\n\n def onActivityResult(self, requestCode, resultCode, resultData):\n \"\"\"Callback method, called from MainActivity when an Intent ends.\n\n :param int requestCode: The integer request code originally supplied to startActivityForResult(),\n allowing you to identify who this result came from.\n :param int resultCode: The integer result code returned by the child activity through its setResult().\n :param Intent resultData: An Intent, which can return result data to the caller (various data can be attached\n to Intent \"extras\").\n \"\"\"\n print(\n f\"Toga app: onActivityResult, requestCode={requestCode}, resultData={resultData}\"\n )\n try:\n # remove Intent from the list of running Intents,\n # and set the result of the intent.\n result_future = self.running_intents.pop(requestCode)\n result_future.set_result(\n {\"resultCode\": resultCode, \"resultData\": resultData}\n )\n except KeyError:\n print(\"No intent matching request code {requestCode}\")\n\n def onConfigurationChanged(self, new_config):\n pass\n\n def onOptionsItemSelected(self, menuitem):\n consumed = False\n try:\n cmd = self.menuitem_mapping[menuitem.getItemId()]\n consumed = True\n if cmd.action is not None:\n cmd.action(menuitem)\n except KeyError:\n print(\"menu item id not found in menuitem_mapping dictionary!\")\n return consumed\n\n def onPrepareOptionsMenu(self, menu):\n menu.clear()\n itemid = 0\n menulist = {} # dictionary with all menus\n self.menuitem_mapping.clear()\n\n # create option menu\n for cmd in self._impl.interface.commands:\n if cmd == toga.SECTION_BREAK or cmd == toga.GROUP_BREAK:\n continue\n if cmd in self._impl.interface.main_window.toolbar:\n continue # do not show toolbar commands in the option menu (except when overflowing)\n\n grouppath = cmd.group.path\n if grouppath[0] != Group.COMMANDS:\n # only the Commands group (and its subgroups) are supported\n # other groups should eventually go into the navigation drawer\n continue\n if cmd.group.key in menulist:\n menugroup = menulist[cmd.group.key]\n else:\n # create all missing submenus\n parentmenu = menu\n for group in grouppath:\n groupkey = group.key\n if groupkey in menulist:\n menugroup = menulist[groupkey]\n else:\n if group.text == toga.Group.COMMANDS.text:\n menulist[groupkey] = menu\n menugroup = menu\n else:\n itemid += 1\n order = Menu.NONE if group.order is None else group.order\n menugroup = parentmenu.addSubMenu(\n Menu.NONE, itemid, order, group.text\n ) # groupId, itemId, order, title\n menulist[groupkey] = menugroup\n parentmenu = menugroup\n # create menu item\n itemid += 1\n order = Menu.NONE if cmd.order is None else cmd.order\n menuitem = menugroup.add(\n Menu.NONE, itemid, order, cmd.text\n ) # groupId, itemId, order, title\n menuitem.setShowAsActionFlags(MenuItem.SHOW_AS_ACTION_NEVER)\n menuitem.setEnabled(cmd.enabled)\n self.menuitem_mapping[\n itemid\n ] = cmd # store itemid for use in onOptionsItemSelected\n\n # create toolbar actions\n if self._impl.interface.main_window:\n for cmd in self._impl.interface.main_window.toolbar:\n if cmd == toga.SECTION_BREAK or cmd == toga.GROUP_BREAK:\n continue\n itemid += 1\n order = Menu.NONE if cmd.order is None else cmd.order\n menuitem = menu.add(\n Menu.NONE, itemid, order, cmd.text\n ) # groupId, itemId, order, title\n menuitem.setShowAsActionFlags(\n MenuItem.SHOW_AS_ACTION_IF_ROOM\n ) # toolbar button / item in options menu on overflow\n menuitem.setEnabled(cmd.enabled)\n if cmd.icon:\n icon = Drawable.createFromPath(str(cmd.icon._impl.path))\n if icon:\n menuitem.setIcon(icon)\n else:\n print(\"Could not create icon: \" + str(cmd.icon._impl.path))\n self.menuitem_mapping[\n itemid\n ] = cmd # store itemid for use in onOptionsItemSelected\n\n return True\n\n @property\n def native(self):\n # We access `MainActivity.singletonThis` freshly each time, rather than\n # storing a reference in `__init__()`, because it's not safe to use the\n # same reference over time because `rubicon-java` creates a JNI local\n # reference.\n return MainActivity.singletonThis\n\n\nclass App:\n def __init__(self, interface):\n self.interface = interface\n self.interface._impl = self\n self._listener = None\n\n self.loop = android_events.AndroidEventLoop()\n\n @property\n def native(self):\n return self._listener.native if self._listener else None\n\n def create(self):\n # The `_listener` listens for activity event callbacks. For simplicity,\n # the app's `.native` is the listener's native Java class.\n self._listener = TogaApp(self)\n # Call user code to populate the main window\n self.interface.startup()\n\n def open_document(self, fileURL):\n print(\"Can't open document %s (yet)\" % fileURL)\n\n def main_loop(self):\n # In order to support user asyncio code, start the Python/Android cooperative event loop.\n self.loop.run_forever_cooperatively()\n\n # On Android, Toga UI integrates automatically into the main Android event loop by virtue\n # of the Android Activity system.\n self.create()\n\n def set_main_window(self, window):\n pass\n\n def show_about_dialog(self):\n self.interface.factory.not_implemented(\"App.show_about_dialog()\")\n\n def beep(self):\n self.interface.factory.not_implemented(\"App.beep()\")\n\n def exit(self):\n pass\n\n async def intent_result(self, intent):\n \"\"\"Calls an Intent and waits for its result.\n\n A RuntimeError will be raised when the Intent cannot be invoked.\n\n :param Intent intent: The Intent to call\n :returns: A Dictionary containing \"resultCode\" (int) and \"resultData\" (Intent or None)\n :rtype: dict\n \"\"\"\n try:\n self._listener.last_intent_requestcode += 1\n code = self._listener.last_intent_requestcode\n\n result_future = asyncio.Future()\n self._listener.running_intents[code] = result_future\n\n self.native.startActivityForResult(intent, code)\n await result_future\n return result_future.result()\n except AttributeError:\n raise RuntimeError(\"No appropriate Activity found to handle this intent.\")\n\n def hide_cursor(self):\n pass\n\n def show_cursor(self):\n pass\n", "path": "android/src/toga_android/app.py"}]} | 4,062 | 194 |
gh_patches_debug_21089 | rasdani/github-patches | git_diff | opsdroid__opsdroid-225 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow usage of env vars in config
The configuration should be parsed for environment variables when loaded. This would allow for secrets like api keys to be kept outside of the opsdroid configuration.
#### Example
```yaml
connectors:
- name: slack
default-room: '#general'
bot-name: "opsdroid"
icon-emoji: ":robot:"
api-token: "$SLACK_API_KEY"
```
In this example `$SLACK_API_KEY` would be replaced with the contents of the environment variable of the same name.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opsdroid/loader.py`
Content:
```
1 """Class for loading in modules to OpsDroid."""
2
3 import logging
4 import os
5 import sys
6 import shutil
7 import subprocess
8 import importlib
9 import yaml
10 from opsdroid.const import (
11 DEFAULT_GIT_URL, MODULES_DIRECTORY, DEFAULT_MODULES_PATH,
12 DEFAULT_MODULE_BRANCH, DEFAULT_CONFIG_PATH, EXAMPLE_CONFIG_FILE,
13 DEFAULT_MODULE_DEPS_PATH)
14
15
16 _LOGGER = logging.getLogger(__name__)
17
18
19 class Loader:
20 """Class to load in config and modules."""
21
22 def __init__(self, opsdroid):
23 """Create object with opsdroid instance."""
24 self.opsdroid = opsdroid
25 self.modules_directory = None
26 self.current_import_config = None
27 _LOGGER.debug("Loaded loader")
28
29 @staticmethod
30 def import_module(config):
31 """Import module namespace as variable and return it."""
32 try:
33 module = importlib.import_module(
34 config["module_path"] + "." + config["name"])
35 _LOGGER.debug("Loaded " + config["type"] + ": " +
36 config["module_path"])
37 return module
38 except ImportError as error:
39 _LOGGER.debug("Failed to load " + config["type"] +
40 " " + config["module_path"] + "." + config["name"])
41 _LOGGER.debug(error)
42
43 try:
44 module = importlib.import_module(
45 config["module_path"])
46 _LOGGER.debug("Loaded " + config["type"] + ": " +
47 config["module_path"])
48 return module
49 except ImportError as error:
50 _LOGGER.debug("Failed to load " + config["type"] +
51 " " + config["module_path"])
52 _LOGGER.debug(error)
53
54 _LOGGER.error("Failed to load " + config["type"] +
55 " " + config["module_path"])
56 return None
57
58 @staticmethod
59 def check_cache(config):
60 """Remove module if 'no-cache' set in config."""
61 if "no-cache" in config \
62 and config["no-cache"]:
63 _LOGGER.debug("'no-cache' set, removing " + config["install_path"])
64 if os.path.isdir(config["install_path"]):
65 shutil.rmtree(config["install_path"])
66 if os.path.isfile(config["install_path"] + ".py"):
67 os.remove(config["install_path"] + ".py")
68
69 def build_module_path(self, path_type, config):
70 """Generate the module path from name and type."""
71 if path_type == "import":
72 return MODULES_DIRECTORY + "." + config["type"] + \
73 "." + config["name"]
74 elif path_type == "install":
75 return self.modules_directory + "/" + config["type"] + \
76 "/" + config["name"]
77
78 @staticmethod
79 def git_clone(git_url, install_path, branch):
80 """Clone a git repo to a location and wait for finish."""
81 process = subprocess.Popen(["git", "clone", "-b", branch,
82 git_url, install_path], shell=False,
83 stdout=subprocess.PIPE,
84 stderr=subprocess.PIPE)
85 for output in process.communicate():
86 if output != "":
87 for line in output.splitlines():
88 _LOGGER.debug(str(line).strip())
89 process.wait()
90
91 @staticmethod
92 def pip_install_deps(requirements_path):
93 """Pip install a requirements.txt file and wait for finish."""
94 process = subprocess.Popen(["pip", "install",
95 "--target={}".format(
96 DEFAULT_MODULE_DEPS_PATH),
97 "--ignore-installed",
98 "-r", requirements_path],
99 shell=False,
100 stdout=subprocess.PIPE,
101 stderr=subprocess.PIPE)
102 for output in process.communicate():
103 if output != "":
104 for line in output.splitlines():
105 _LOGGER.debug(str(line).strip())
106 process.wait()
107
108 @staticmethod
109 def create_default_config(config_path):
110 """Create a default config file based on the included example."""
111 _LOGGER.info("Creating %s.", config_path)
112 config_dir, _ = os.path.split(config_path)
113 if not os.path.isdir(config_dir):
114 os.makedirs(config_dir)
115 shutil.copyfile(EXAMPLE_CONFIG_FILE, config_path)
116 return config_path
117
118 @staticmethod
119 def _reload_modules(modules):
120 for module in modules:
121 importlib.reload(module["module"])
122
123 def load_config_file(self, config_paths):
124 """Load a yaml config file from path."""
125 config_path = ""
126 for possible_path in config_paths:
127 if not os.path.isfile(possible_path):
128 _LOGGER.debug("Config file " + possible_path +
129 " not found")
130 else:
131 config_path = possible_path
132 break
133
134 if not config_path:
135 _LOGGER.info("No configuration files found.")
136 config_path = self.create_default_config(DEFAULT_CONFIG_PATH)
137
138 try:
139 with open(config_path, 'r') as stream:
140 _LOGGER.info("Loaded config from %s", config_path)
141 return yaml.load(stream)
142 except yaml.YAMLError as error:
143 self.opsdroid.critical(error, 1)
144 except FileNotFoundError as error:
145 self.opsdroid.critical(str(error), 1)
146
147 def setup_modules_directory(self, config):
148 """Create and configure the modules directory."""
149 module_path = os.path.expanduser(
150 config.get("module-path", DEFAULT_MODULES_PATH))
151 sys.path.append(module_path)
152
153 if not os.path.isdir(module_path):
154 os.makedirs(module_path, exist_ok=True)
155
156 self.modules_directory = os.path.join(module_path, MODULES_DIRECTORY)
157
158 # Create modules directory if doesn't exist
159 if not os.path.isdir(self.modules_directory):
160 os.makedirs(self.modules_directory)
161
162 def load_modules_from_config(self, config):
163 """Load all module types based on config."""
164 _LOGGER.debug("Loading modules from config")
165
166 self.setup_modules_directory(config)
167
168 connectors, databases, skills = None, None, None
169
170 if 'databases' in config.keys():
171 databases = self._load_modules('database', config['databases'])
172 else:
173 _LOGGER.warning("No databases in configuration")
174
175 if 'skills' in config.keys():
176 skills = self._load_modules('skill', config['skills'])
177 self.opsdroid.skills = []
178 self._reload_modules(skills)
179 else:
180 self.opsdroid.critical(
181 "No skills in configuration, at least 1 required", 1)
182
183 if 'connectors' in config.keys():
184 connectors = self._load_modules('connector', config['connectors'])
185 else:
186 self.opsdroid.critical(
187 "No connectors in configuration, at least 1 required", 1)
188
189 return connectors, databases, skills
190
191 def _load_modules(self, modules_type, modules):
192 """Install and load modules."""
193 _LOGGER.debug("Loading " + modules_type + " modules")
194 loaded_modules = []
195
196 if not os.path.isdir(DEFAULT_MODULE_DEPS_PATH):
197 os.makedirs(DEFAULT_MODULE_DEPS_PATH)
198 sys.path.append(DEFAULT_MODULE_DEPS_PATH)
199
200 for module in modules:
201
202 # Set up module config
203 config = module
204 config = {} if config is None else config
205 config["name"] = module["name"]
206 config["type"] = modules_type
207 config["module_path"] = self.build_module_path("import", config)
208 config["install_path"] = self.build_module_path("install", config)
209 if "branch" not in config:
210 config["branch"] = DEFAULT_MODULE_BRANCH
211
212 # Remove module for reinstall if no-cache set
213 self.check_cache(config)
214
215 # Install module
216 self._install_module(config)
217
218 # Import module
219 self.current_import_config = config
220 module = self.import_module(config)
221 if module is not None:
222 loaded_modules.append({
223 "module": module,
224 "config": config})
225 else:
226 _LOGGER.error(
227 "Module " + config["name"] + " failed to import")
228
229 return loaded_modules
230
231 def _install_module(self, config):
232 # pylint: disable=R0201
233 """Install a module."""
234 _LOGGER.debug("Installing " + config["name"])
235
236 if os.path.isdir(config["install_path"]) or \
237 os.path.isfile(config["install_path"] + ".py"):
238 # TODO Allow for updating or reinstalling of modules
239 _LOGGER.debug("Module " + config["name"] +
240 " already installed, skipping")
241 return
242
243 if "path" in config:
244 self._install_local_module(config)
245 else:
246 self._install_git_module(config)
247
248 if os.path.isdir(config["install_path"]):
249 _LOGGER.debug("Installed " + config["name"] +
250 " to " + config["install_path"])
251 else:
252 _LOGGER.debug("Install of " + config["name"] + " failed")
253
254 # Install module dependancies
255 if os.path.isfile(config["install_path"] + "/requirements.txt"):
256 self.pip_install_deps(config["install_path"] +
257 "/requirements.txt")
258
259 def _install_git_module(self, config):
260 """Install a module from a git repository."""
261 if config is not None and "repo" in config:
262 git_url = config["repo"]
263 else:
264 git_url = DEFAULT_GIT_URL + config["type"] + \
265 "-" + config["name"] + ".git"
266
267 if any(prefix in git_url for prefix in ["http", "https", "ssh"]):
268 # TODO Test if url or ssh path exists
269 # TODO Handle github authentication
270 _LOGGER.debug("Cloning from remote repository")
271 self.git_clone(git_url, config["install_path"],
272 config["branch"])
273 else:
274 if os.path.isdir(git_url):
275 _LOGGER.debug("Cloning from local repository")
276 self.git_clone(git_url, config["install_path"],
277 config["branch"])
278 else:
279 _LOGGER.debug("Could not find local git repo " + git_url)
280
281 @staticmethod
282 def _install_local_module(config):
283 """Install a module from a local path."""
284 installed = False
285 config["path"] = os.path.expanduser(config["path"])
286
287 installdir, _ = os.path.split(config["install_path"])
288 if not os.path.isdir(installdir):
289 os.makedirs(installdir, exist_ok=True)
290
291 if os.path.isdir(config["path"]):
292 shutil.copytree(config["path"], config["install_path"])
293 installed = True
294
295 if os.path.isfile(config["path"]):
296 os.makedirs(config["install_path"], exist_ok=True)
297 shutil.copyfile(config["path"], config["install_path"] +
298 "/__init__.py")
299 installed = True
300
301 if not installed:
302 _LOGGER.error("Failed to install from " + config["path"])
303
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opsdroid/loader.py b/opsdroid/loader.py
--- a/opsdroid/loader.py
+++ b/opsdroid/loader.py
@@ -6,6 +6,7 @@
import shutil
import subprocess
import importlib
+import re
import yaml
from opsdroid.const import (
DEFAULT_GIT_URL, MODULES_DIRECTORY, DEFAULT_MODULES_PATH,
@@ -135,6 +136,17 @@
_LOGGER.info("No configuration files found.")
config_path = self.create_default_config(DEFAULT_CONFIG_PATH)
+ env_var_pattern = re.compile(r'^\$([A-Z_]*)$')
+ yaml.add_implicit_resolver("!envvar", env_var_pattern)
+
+ def envvar_constructor(loader, node):
+ """Yaml parser for env vars."""
+ value = loader.construct_scalar(node)
+ [env_var] = env_var_pattern.match(value).groups()
+ return os.environ[env_var]
+
+ yaml.add_constructor('!envvar', envvar_constructor)
+
try:
with open(config_path, 'r') as stream:
_LOGGER.info("Loaded config from %s", config_path)
| {"golden_diff": "diff --git a/opsdroid/loader.py b/opsdroid/loader.py\n--- a/opsdroid/loader.py\n+++ b/opsdroid/loader.py\n@@ -6,6 +6,7 @@\n import shutil\n import subprocess\n import importlib\n+import re\n import yaml\n from opsdroid.const import (\n DEFAULT_GIT_URL, MODULES_DIRECTORY, DEFAULT_MODULES_PATH,\n@@ -135,6 +136,17 @@\n _LOGGER.info(\"No configuration files found.\")\n config_path = self.create_default_config(DEFAULT_CONFIG_PATH)\n \n+ env_var_pattern = re.compile(r'^\\$([A-Z_]*)$')\n+ yaml.add_implicit_resolver(\"!envvar\", env_var_pattern)\n+\n+ def envvar_constructor(loader, node):\n+ \"\"\"Yaml parser for env vars.\"\"\"\n+ value = loader.construct_scalar(node)\n+ [env_var] = env_var_pattern.match(value).groups()\n+ return os.environ[env_var]\n+\n+ yaml.add_constructor('!envvar', envvar_constructor)\n+\n try:\n with open(config_path, 'r') as stream:\n _LOGGER.info(\"Loaded config from %s\", config_path)\n", "issue": "Allow usage of env vars in config\nThe configuration should be parsed for environment variables when loaded. This would allow for secrets like api keys to be kept outside of the opsdroid configuration.\r\n\r\n#### Example\r\n\r\n```yaml\r\nconnectors:\r\n - name: slack\r\n default-room: '#general'\r\n bot-name: \"opsdroid\"\r\n icon-emoji: \":robot:\"\r\n api-token: \"$SLACK_API_KEY\"\r\n```\r\nIn this example `$SLACK_API_KEY` would be replaced with the contents of the environment variable of the same name.\n", "before_files": [{"content": "\"\"\"Class for loading in modules to OpsDroid.\"\"\"\n\nimport logging\nimport os\nimport sys\nimport shutil\nimport subprocess\nimport importlib\nimport yaml\nfrom opsdroid.const import (\n DEFAULT_GIT_URL, MODULES_DIRECTORY, DEFAULT_MODULES_PATH,\n DEFAULT_MODULE_BRANCH, DEFAULT_CONFIG_PATH, EXAMPLE_CONFIG_FILE,\n DEFAULT_MODULE_DEPS_PATH)\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Loader:\n \"\"\"Class to load in config and modules.\"\"\"\n\n def __init__(self, opsdroid):\n \"\"\"Create object with opsdroid instance.\"\"\"\n self.opsdroid = opsdroid\n self.modules_directory = None\n self.current_import_config = None\n _LOGGER.debug(\"Loaded loader\")\n\n @staticmethod\n def import_module(config):\n \"\"\"Import module namespace as variable and return it.\"\"\"\n try:\n module = importlib.import_module(\n config[\"module_path\"] + \".\" + config[\"name\"])\n _LOGGER.debug(\"Loaded \" + config[\"type\"] + \": \" +\n config[\"module_path\"])\n return module\n except ImportError as error:\n _LOGGER.debug(\"Failed to load \" + config[\"type\"] +\n \" \" + config[\"module_path\"] + \".\" + config[\"name\"])\n _LOGGER.debug(error)\n\n try:\n module = importlib.import_module(\n config[\"module_path\"])\n _LOGGER.debug(\"Loaded \" + config[\"type\"] + \": \" +\n config[\"module_path\"])\n return module\n except ImportError as error:\n _LOGGER.debug(\"Failed to load \" + config[\"type\"] +\n \" \" + config[\"module_path\"])\n _LOGGER.debug(error)\n\n _LOGGER.error(\"Failed to load \" + config[\"type\"] +\n \" \" + config[\"module_path\"])\n return None\n\n @staticmethod\n def check_cache(config):\n \"\"\"Remove module if 'no-cache' set in config.\"\"\"\n if \"no-cache\" in config \\\n and config[\"no-cache\"]:\n _LOGGER.debug(\"'no-cache' set, removing \" + config[\"install_path\"])\n if os.path.isdir(config[\"install_path\"]):\n shutil.rmtree(config[\"install_path\"])\n if os.path.isfile(config[\"install_path\"] + \".py\"):\n os.remove(config[\"install_path\"] + \".py\")\n\n def build_module_path(self, path_type, config):\n \"\"\"Generate the module path from name and type.\"\"\"\n if path_type == \"import\":\n return MODULES_DIRECTORY + \".\" + config[\"type\"] + \\\n \".\" + config[\"name\"]\n elif path_type == \"install\":\n return self.modules_directory + \"/\" + config[\"type\"] + \\\n \"/\" + config[\"name\"]\n\n @staticmethod\n def git_clone(git_url, install_path, branch):\n \"\"\"Clone a git repo to a location and wait for finish.\"\"\"\n process = subprocess.Popen([\"git\", \"clone\", \"-b\", branch,\n git_url, install_path], shell=False,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n for output in process.communicate():\n if output != \"\":\n for line in output.splitlines():\n _LOGGER.debug(str(line).strip())\n process.wait()\n\n @staticmethod\n def pip_install_deps(requirements_path):\n \"\"\"Pip install a requirements.txt file and wait for finish.\"\"\"\n process = subprocess.Popen([\"pip\", \"install\",\n \"--target={}\".format(\n DEFAULT_MODULE_DEPS_PATH),\n \"--ignore-installed\",\n \"-r\", requirements_path],\n shell=False,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n for output in process.communicate():\n if output != \"\":\n for line in output.splitlines():\n _LOGGER.debug(str(line).strip())\n process.wait()\n\n @staticmethod\n def create_default_config(config_path):\n \"\"\"Create a default config file based on the included example.\"\"\"\n _LOGGER.info(\"Creating %s.\", config_path)\n config_dir, _ = os.path.split(config_path)\n if not os.path.isdir(config_dir):\n os.makedirs(config_dir)\n shutil.copyfile(EXAMPLE_CONFIG_FILE, config_path)\n return config_path\n\n @staticmethod\n def _reload_modules(modules):\n for module in modules:\n importlib.reload(module[\"module\"])\n\n def load_config_file(self, config_paths):\n \"\"\"Load a yaml config file from path.\"\"\"\n config_path = \"\"\n for possible_path in config_paths:\n if not os.path.isfile(possible_path):\n _LOGGER.debug(\"Config file \" + possible_path +\n \" not found\")\n else:\n config_path = possible_path\n break\n\n if not config_path:\n _LOGGER.info(\"No configuration files found.\")\n config_path = self.create_default_config(DEFAULT_CONFIG_PATH)\n\n try:\n with open(config_path, 'r') as stream:\n _LOGGER.info(\"Loaded config from %s\", config_path)\n return yaml.load(stream)\n except yaml.YAMLError as error:\n self.opsdroid.critical(error, 1)\n except FileNotFoundError as error:\n self.opsdroid.critical(str(error), 1)\n\n def setup_modules_directory(self, config):\n \"\"\"Create and configure the modules directory.\"\"\"\n module_path = os.path.expanduser(\n config.get(\"module-path\", DEFAULT_MODULES_PATH))\n sys.path.append(module_path)\n\n if not os.path.isdir(module_path):\n os.makedirs(module_path, exist_ok=True)\n\n self.modules_directory = os.path.join(module_path, MODULES_DIRECTORY)\n\n # Create modules directory if doesn't exist\n if not os.path.isdir(self.modules_directory):\n os.makedirs(self.modules_directory)\n\n def load_modules_from_config(self, config):\n \"\"\"Load all module types based on config.\"\"\"\n _LOGGER.debug(\"Loading modules from config\")\n\n self.setup_modules_directory(config)\n\n connectors, databases, skills = None, None, None\n\n if 'databases' in config.keys():\n databases = self._load_modules('database', config['databases'])\n else:\n _LOGGER.warning(\"No databases in configuration\")\n\n if 'skills' in config.keys():\n skills = self._load_modules('skill', config['skills'])\n self.opsdroid.skills = []\n self._reload_modules(skills)\n else:\n self.opsdroid.critical(\n \"No skills in configuration, at least 1 required\", 1)\n\n if 'connectors' in config.keys():\n connectors = self._load_modules('connector', config['connectors'])\n else:\n self.opsdroid.critical(\n \"No connectors in configuration, at least 1 required\", 1)\n\n return connectors, databases, skills\n\n def _load_modules(self, modules_type, modules):\n \"\"\"Install and load modules.\"\"\"\n _LOGGER.debug(\"Loading \" + modules_type + \" modules\")\n loaded_modules = []\n\n if not os.path.isdir(DEFAULT_MODULE_DEPS_PATH):\n os.makedirs(DEFAULT_MODULE_DEPS_PATH)\n sys.path.append(DEFAULT_MODULE_DEPS_PATH)\n\n for module in modules:\n\n # Set up module config\n config = module\n config = {} if config is None else config\n config[\"name\"] = module[\"name\"]\n config[\"type\"] = modules_type\n config[\"module_path\"] = self.build_module_path(\"import\", config)\n config[\"install_path\"] = self.build_module_path(\"install\", config)\n if \"branch\" not in config:\n config[\"branch\"] = DEFAULT_MODULE_BRANCH\n\n # Remove module for reinstall if no-cache set\n self.check_cache(config)\n\n # Install module\n self._install_module(config)\n\n # Import module\n self.current_import_config = config\n module = self.import_module(config)\n if module is not None:\n loaded_modules.append({\n \"module\": module,\n \"config\": config})\n else:\n _LOGGER.error(\n \"Module \" + config[\"name\"] + \" failed to import\")\n\n return loaded_modules\n\n def _install_module(self, config):\n # pylint: disable=R0201\n \"\"\"Install a module.\"\"\"\n _LOGGER.debug(\"Installing \" + config[\"name\"])\n\n if os.path.isdir(config[\"install_path\"]) or \\\n os.path.isfile(config[\"install_path\"] + \".py\"):\n # TODO Allow for updating or reinstalling of modules\n _LOGGER.debug(\"Module \" + config[\"name\"] +\n \" already installed, skipping\")\n return\n\n if \"path\" in config:\n self._install_local_module(config)\n else:\n self._install_git_module(config)\n\n if os.path.isdir(config[\"install_path\"]):\n _LOGGER.debug(\"Installed \" + config[\"name\"] +\n \" to \" + config[\"install_path\"])\n else:\n _LOGGER.debug(\"Install of \" + config[\"name\"] + \" failed\")\n\n # Install module dependancies\n if os.path.isfile(config[\"install_path\"] + \"/requirements.txt\"):\n self.pip_install_deps(config[\"install_path\"] +\n \"/requirements.txt\")\n\n def _install_git_module(self, config):\n \"\"\"Install a module from a git repository.\"\"\"\n if config is not None and \"repo\" in config:\n git_url = config[\"repo\"]\n else:\n git_url = DEFAULT_GIT_URL + config[\"type\"] + \\\n \"-\" + config[\"name\"] + \".git\"\n\n if any(prefix in git_url for prefix in [\"http\", \"https\", \"ssh\"]):\n # TODO Test if url or ssh path exists\n # TODO Handle github authentication\n _LOGGER.debug(\"Cloning from remote repository\")\n self.git_clone(git_url, config[\"install_path\"],\n config[\"branch\"])\n else:\n if os.path.isdir(git_url):\n _LOGGER.debug(\"Cloning from local repository\")\n self.git_clone(git_url, config[\"install_path\"],\n config[\"branch\"])\n else:\n _LOGGER.debug(\"Could not find local git repo \" + git_url)\n\n @staticmethod\n def _install_local_module(config):\n \"\"\"Install a module from a local path.\"\"\"\n installed = False\n config[\"path\"] = os.path.expanduser(config[\"path\"])\n\n installdir, _ = os.path.split(config[\"install_path\"])\n if not os.path.isdir(installdir):\n os.makedirs(installdir, exist_ok=True)\n\n if os.path.isdir(config[\"path\"]):\n shutil.copytree(config[\"path\"], config[\"install_path\"])\n installed = True\n\n if os.path.isfile(config[\"path\"]):\n os.makedirs(config[\"install_path\"], exist_ok=True)\n shutil.copyfile(config[\"path\"], config[\"install_path\"] +\n \"/__init__.py\")\n installed = True\n\n if not installed:\n _LOGGER.error(\"Failed to install from \" + config[\"path\"])\n", "path": "opsdroid/loader.py"}], "after_files": [{"content": "\"\"\"Class for loading in modules to OpsDroid.\"\"\"\n\nimport logging\nimport os\nimport sys\nimport shutil\nimport subprocess\nimport importlib\nimport re\nimport yaml\nfrom opsdroid.const import (\n DEFAULT_GIT_URL, MODULES_DIRECTORY, DEFAULT_MODULES_PATH,\n DEFAULT_MODULE_BRANCH, DEFAULT_CONFIG_PATH, EXAMPLE_CONFIG_FILE,\n DEFAULT_MODULE_DEPS_PATH)\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Loader:\n \"\"\"Class to load in config and modules.\"\"\"\n\n def __init__(self, opsdroid):\n \"\"\"Create object with opsdroid instance.\"\"\"\n self.opsdroid = opsdroid\n self.modules_directory = None\n self.current_import_config = None\n _LOGGER.debug(\"Loaded loader\")\n\n @staticmethod\n def import_module(config):\n \"\"\"Import module namespace as variable and return it.\"\"\"\n try:\n module = importlib.import_module(\n config[\"module_path\"] + \".\" + config[\"name\"])\n _LOGGER.debug(\"Loaded \" + config[\"type\"] + \": \" +\n config[\"module_path\"])\n return module\n except ImportError as error:\n _LOGGER.debug(\"Failed to load \" + config[\"type\"] +\n \" \" + config[\"module_path\"] + \".\" + config[\"name\"])\n _LOGGER.debug(error)\n\n try:\n module = importlib.import_module(\n config[\"module_path\"])\n _LOGGER.debug(\"Loaded \" + config[\"type\"] + \": \" +\n config[\"module_path\"])\n return module\n except ImportError as error:\n _LOGGER.debug(\"Failed to load \" + config[\"type\"] +\n \" \" + config[\"module_path\"])\n _LOGGER.debug(error)\n\n _LOGGER.error(\"Failed to load \" + config[\"type\"] +\n \" \" + config[\"module_path\"])\n return None\n\n @staticmethod\n def check_cache(config):\n \"\"\"Remove module if 'no-cache' set in config.\"\"\"\n if \"no-cache\" in config \\\n and config[\"no-cache\"]:\n _LOGGER.debug(\"'no-cache' set, removing \" + config[\"install_path\"])\n if os.path.isdir(config[\"install_path\"]):\n shutil.rmtree(config[\"install_path\"])\n if os.path.isfile(config[\"install_path\"] + \".py\"):\n os.remove(config[\"install_path\"] + \".py\")\n\n def build_module_path(self, path_type, config):\n \"\"\"Generate the module path from name and type.\"\"\"\n if path_type == \"import\":\n return MODULES_DIRECTORY + \".\" + config[\"type\"] + \\\n \".\" + config[\"name\"]\n elif path_type == \"install\":\n return self.modules_directory + \"/\" + config[\"type\"] + \\\n \"/\" + config[\"name\"]\n\n @staticmethod\n def git_clone(git_url, install_path, branch):\n \"\"\"Clone a git repo to a location and wait for finish.\"\"\"\n process = subprocess.Popen([\"git\", \"clone\", \"-b\", branch,\n git_url, install_path], shell=False,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n for output in process.communicate():\n if output != \"\":\n for line in output.splitlines():\n _LOGGER.debug(str(line).strip())\n process.wait()\n\n @staticmethod\n def pip_install_deps(requirements_path):\n \"\"\"Pip install a requirements.txt file and wait for finish.\"\"\"\n process = subprocess.Popen([\"pip\", \"install\",\n \"--target={}\".format(\n DEFAULT_MODULE_DEPS_PATH),\n \"--ignore-installed\",\n \"-r\", requirements_path],\n shell=False,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n for output in process.communicate():\n if output != \"\":\n for line in output.splitlines():\n _LOGGER.debug(str(line).strip())\n process.wait()\n\n @staticmethod\n def create_default_config(config_path):\n \"\"\"Create a default config file based on the included example.\"\"\"\n _LOGGER.info(\"Creating %s.\", config_path)\n config_dir, _ = os.path.split(config_path)\n if not os.path.isdir(config_dir):\n os.makedirs(config_dir)\n shutil.copyfile(EXAMPLE_CONFIG_FILE, config_path)\n return config_path\n\n @staticmethod\n def _reload_modules(modules):\n for module in modules:\n importlib.reload(module[\"module\"])\n\n def load_config_file(self, config_paths):\n \"\"\"Load a yaml config file from path.\"\"\"\n config_path = \"\"\n for possible_path in config_paths:\n if not os.path.isfile(possible_path):\n _LOGGER.debug(\"Config file \" + possible_path +\n \" not found\")\n else:\n config_path = possible_path\n break\n\n if not config_path:\n _LOGGER.info(\"No configuration files found.\")\n config_path = self.create_default_config(DEFAULT_CONFIG_PATH)\n\n env_var_pattern = re.compile(r'^\\$([A-Z_]*)$')\n yaml.add_implicit_resolver(\"!envvar\", env_var_pattern)\n\n def envvar_constructor(loader, node):\n \"\"\"Yaml parser for env vars.\"\"\"\n value = loader.construct_scalar(node)\n [env_var] = env_var_pattern.match(value).groups()\n return os.environ[env_var]\n\n yaml.add_constructor('!envvar', envvar_constructor)\n\n try:\n with open(config_path, 'r') as stream:\n _LOGGER.info(\"Loaded config from %s\", config_path)\n return yaml.load(stream)\n except yaml.YAMLError as error:\n self.opsdroid.critical(error, 1)\n except FileNotFoundError as error:\n self.opsdroid.critical(str(error), 1)\n\n def setup_modules_directory(self, config):\n \"\"\"Create and configure the modules directory.\"\"\"\n module_path = os.path.expanduser(\n config.get(\"module-path\", DEFAULT_MODULES_PATH))\n sys.path.append(module_path)\n\n if not os.path.isdir(module_path):\n os.makedirs(module_path, exist_ok=True)\n\n self.modules_directory = os.path.join(module_path, MODULES_DIRECTORY)\n\n # Create modules directory if doesn't exist\n if not os.path.isdir(self.modules_directory):\n os.makedirs(self.modules_directory)\n\n def load_modules_from_config(self, config):\n \"\"\"Load all module types based on config.\"\"\"\n _LOGGER.debug(\"Loading modules from config\")\n\n self.setup_modules_directory(config)\n\n connectors, databases, skills = None, None, None\n\n if 'databases' in config.keys():\n databases = self._load_modules('database', config['databases'])\n else:\n _LOGGER.warning(\"No databases in configuration\")\n\n if 'skills' in config.keys():\n skills = self._load_modules('skill', config['skills'])\n self.opsdroid.skills = []\n self._reload_modules(skills)\n else:\n self.opsdroid.critical(\n \"No skills in configuration, at least 1 required\", 1)\n\n if 'connectors' in config.keys():\n connectors = self._load_modules('connector', config['connectors'])\n else:\n self.opsdroid.critical(\n \"No connectors in configuration, at least 1 required\", 1)\n\n return connectors, databases, skills\n\n def _load_modules(self, modules_type, modules):\n \"\"\"Install and load modules.\"\"\"\n _LOGGER.debug(\"Loading \" + modules_type + \" modules\")\n loaded_modules = []\n\n if not os.path.isdir(DEFAULT_MODULE_DEPS_PATH):\n os.makedirs(DEFAULT_MODULE_DEPS_PATH)\n sys.path.append(DEFAULT_MODULE_DEPS_PATH)\n\n for module in modules:\n\n # Set up module config\n config = module\n config = {} if config is None else config\n config[\"name\"] = module[\"name\"]\n config[\"type\"] = modules_type\n config[\"module_path\"] = self.build_module_path(\"import\", config)\n config[\"install_path\"] = self.build_module_path(\"install\", config)\n if \"branch\" not in config:\n config[\"branch\"] = DEFAULT_MODULE_BRANCH\n\n # Remove module for reinstall if no-cache set\n self.check_cache(config)\n\n # Install module\n self._install_module(config)\n\n # Import module\n self.current_import_config = config\n module = self.import_module(config)\n if module is not None:\n loaded_modules.append({\n \"module\": module,\n \"config\": config})\n else:\n _LOGGER.error(\n \"Module \" + config[\"name\"] + \" failed to import\")\n\n return loaded_modules\n\n def _install_module(self, config):\n # pylint: disable=R0201\n \"\"\"Install a module.\"\"\"\n _LOGGER.debug(\"Installing \" + config[\"name\"])\n\n if os.path.isdir(config[\"install_path\"]) or \\\n os.path.isfile(config[\"install_path\"] + \".py\"):\n # TODO Allow for updating or reinstalling of modules\n _LOGGER.debug(\"Module \" + config[\"name\"] +\n \" already installed, skipping\")\n return\n\n if \"path\" in config:\n self._install_local_module(config)\n else:\n self._install_git_module(config)\n\n if os.path.isdir(config[\"install_path\"]):\n _LOGGER.debug(\"Installed \" + config[\"name\"] +\n \" to \" + config[\"install_path\"])\n else:\n _LOGGER.debug(\"Install of \" + config[\"name\"] + \" failed\")\n\n # Install module dependancies\n if os.path.isfile(config[\"install_path\"] + \"/requirements.txt\"):\n self.pip_install_deps(config[\"install_path\"] +\n \"/requirements.txt\")\n\n def _install_git_module(self, config):\n \"\"\"Install a module from a git repository.\"\"\"\n if config is not None and \"repo\" in config:\n git_url = config[\"repo\"]\n else:\n git_url = DEFAULT_GIT_URL + config[\"type\"] + \\\n \"-\" + config[\"name\"] + \".git\"\n\n if any(prefix in git_url for prefix in [\"http\", \"https\", \"ssh\"]):\n # TODO Test if url or ssh path exists\n # TODO Handle github authentication\n _LOGGER.debug(\"Cloning from remote repository\")\n self.git_clone(git_url, config[\"install_path\"],\n config[\"branch\"])\n else:\n if os.path.isdir(git_url):\n _LOGGER.debug(\"Cloning from local repository\")\n self.git_clone(git_url, config[\"install_path\"],\n config[\"branch\"])\n else:\n _LOGGER.debug(\"Could not find local git repo \" + git_url)\n\n @staticmethod\n def _install_local_module(config):\n \"\"\"Install a module from a local path.\"\"\"\n installed = False\n config[\"path\"] = os.path.expanduser(config[\"path\"])\n\n installdir, _ = os.path.split(config[\"install_path\"])\n if not os.path.isdir(installdir):\n os.makedirs(installdir, exist_ok=True)\n\n if os.path.isdir(config[\"path\"]):\n shutil.copytree(config[\"path\"], config[\"install_path\"])\n installed = True\n\n if os.path.isfile(config[\"path\"]):\n os.makedirs(config[\"install_path\"], exist_ok=True)\n shutil.copyfile(config[\"path\"], config[\"install_path\"] +\n \"/__init__.py\")\n installed = True\n\n if not installed:\n _LOGGER.error(\"Failed to install from \" + config[\"path\"])\n", "path": "opsdroid/loader.py"}]} | 3,453 | 254 |
gh_patches_debug_4461 | rasdani/github-patches | git_diff | sunpy__sunpy-3333 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[1.0.3] test_print_config_files fails with installed sunpy
<!-- This comments are hidden when you submit the issue so you do not need to remove them!
Please be sure to check out our contributing guidelines: https://github.com/sunpy/sunpy/blob/master/CONTRIBUTING.rst
Please be sure to check out our code of conduct:
https://github.com/sunpy/sunpy/blob/master/CODE_OF_CONDUCT.rst -->
<!-- Please have a search on our GitHub repository to see if a similar issue has already been posted.
If a similar issue is closed, have a quick look to see if you are satisfied by the resolution.
If not please go ahead and open an issue! -->
### Description
On Debian, we run the provided tests regularly on the installed package ("CI test") to ensure that it still works in the current environment.
With Version 1.0.3 (or before; I didn't check 1.0 … 1.0.2), I see the following failure:
### Actual behavior
`print_test_config_files` tries to write to the installed package, which is denied for an ordinary user:
```
_____________________________________ test_print_config_files _____________________________________
undo_download_dir_patch = None
def test_print_config_files(undo_download_dir_patch):
# TODO: Tidy this up.
stdout = sys.stdout
out = io.StringIO()
sys.stdout = out
> print_config()
/usr/lib/python3/dist-packages/sunpy/util/tests/test_config.py:51:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
/usr/lib/python3/dist-packages/sunpy/util/config.py:107: in print_config
for file_ in _find_config_files():
/usr/lib/python3/dist-packages/sunpy/util/config.py:69: in _find_config_files
config_path = Path(_get_user_configdir())
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
def _get_user_configdir():
"""[…]"""
configdir = os.environ.get('SUNPY_CONFIGDIR', CONFIG_DIR)
if not _is_writable_dir(configdir):
> raise RuntimeError(f'Could not write to SUNPY_CONFIGDIR="{configdir}"')
E RuntimeError: Could not write to SUNPY_CONFIGDIR="/usr/lib/python3/dist-packages/sunpy/data"
```
This also happens when I explicitly set `SUNPY_CONFIGDIR` to a different directory; the path mentioned in the `RuntimeError` then still is `/usr/lib/python3/dist-packages/sunpy/data`.
### Steps to Reproduce
```
$ cd /tmp
$ SUNPY_CONFIGDIR=/tmp python3 -c "import sunpy; exit(sunpy.self_test(args=\"-k 'not figure and not online'\"))"
```
### System Details
<!-- We at least need to know the SunPy version you are using. -->
<!-- We provide a short function in SunPy that will provide some of the below information. -->
<!-- It is sunpy.util.system_info(), this is optional but strongly recommended. -->
- SunPy Version: 1.0.3
- Astropy Version: 3.2.1
- Python Version: 3.7.4
- OS information: Debian unstable
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sunpy/conftest.py`
Content:
```
1 import os
2 import json
3 import pathlib
4 import warnings
5 import importlib
6 import tempfile
7
8 import pytest
9
10 import astropy
11 import astropy.config.paths
12
13 import sunpy.tests.helpers
14 from sunpy.tests.hash import HASH_LIBRARY_NAME
15 from sunpy.tests.helpers import new_hash_library, generate_figure_webpage
16 from sunpy.util.exceptions import SunpyDeprecationWarning
17
18 # Force MPL to use non-gui backends for testing.
19 try:
20 import matplotlib
21 except ImportError:
22 pass
23 else:
24 matplotlib.use('Agg')
25
26 # Don't actually import pytest_remotedata because that can do things to the
27 # entrypoints code in pytest.
28 remotedata_spec = importlib.util.find_spec("pytest_remotedata")
29 HAVE_REMOTEDATA = remotedata_spec is not None
30
31
32 def pytest_addoption(parser):
33 parser.addoption("--figure_dir", action="store", default="./figure_test_images")
34
35
36 @pytest.fixture(scope='session', autouse=True)
37 def figure_base_dir(request):
38 sunpy.tests.helpers.figure_base_dir = pathlib.Path(
39 request.config.getoption("--figure_dir"))
40
41
42 @pytest.fixture(scope='session', autouse=True)
43 def tmp_config_dir(request):
44 """
45 Globally set the default config for all tests.
46 """
47 tmpdir = tempfile.TemporaryDirectory()
48
49 os.environ["SUNPY_CONFIGDIR"] = str(pathlib.Path(__file__).parent / "data")
50 astropy.config.paths.set_temp_config._temp_path = str(tmpdir.name)
51 astropy.config.paths.set_temp_cache._temp_path = str(tmpdir.name)
52
53 yield
54
55 del os.environ["SUNPY_CONFIGDIR"]
56 astropy.config.paths.set_temp_config._temp_path = None
57 astropy.config.paths.set_temp_cache._temp_path = None
58
59
60 @pytest.fixture()
61 def undo_config_dir_patch():
62 """
63 Provide a way for certain tests to not have the config dir.
64 """
65 oridir = os.environ["SUNPY_CONFIGDIR"]
66 del os.environ["SUNPY_CONFIGDIR"]
67 yield
68 os.environ["SUNPY_CONFIGDIR"] = oridir
69
70
71 @pytest.fixture(scope='session', autouse=True)
72 def tmp_dl_dir(request):
73 """
74 Globally set the default download directory for the test run to a tmp dir.
75 """
76 with tempfile.TemporaryDirectory() as tmpdir:
77 os.environ["SUNPY_DOWNLOADDIR"] = tmpdir
78 yield tmpdir
79 del os.environ["SUNPY_DOWNLOADDIR"]
80
81
82 @pytest.fixture()
83 def undo_download_dir_patch():
84 """
85 Provide a way for certain tests to not have tmp download dir.
86 """
87 oridir = os.environ["SUNPY_DOWNLOADDIR"]
88 del os.environ["SUNPY_DOWNLOADDIR"]
89 yield
90 os.environ["SUNPY_DOWNLOADDIR"] = oridir
91
92
93 def pytest_runtest_setup(item):
94 """
95 pytest hook to skip all tests that have the mark 'remotedata' if the
96 pytest_remotedata plugin is not installed.
97 """
98 if isinstance(item, pytest.Function):
99 if 'remote_data' in item.keywords and not HAVE_REMOTEDATA:
100 pytest.skip("skipping remotedata tests as pytest-remotedata is not installed")
101
102
103 def pytest_unconfigure(config):
104
105 # If at least one figure test has been run, print result image directory
106 if len(new_hash_library) > 0:
107 # Write the new hash library in JSON
108 figure_base_dir = pathlib.Path(config.getoption("--figure_dir"))
109 hashfile = figure_base_dir / HASH_LIBRARY_NAME
110 with open(hashfile, 'w') as outfile:
111 json.dump(new_hash_library, outfile, sort_keys=True, indent=4, separators=(',', ': '))
112
113 """
114 Turn on internet when generating the figure comparison webpage.
115 """
116 if HAVE_REMOTEDATA:
117 from pytest_remotedata.disable_internet import turn_on_internet, turn_off_internet
118 else:
119 def turn_on_internet(): pass
120 def turn_off_internet(): pass
121
122 turn_on_internet()
123 generate_figure_webpage(new_hash_library)
124 turn_off_internet()
125
126 print('All images from image tests can be found in {}'.format(figure_base_dir.resolve()))
127 print("The corresponding hash library is {}".format(hashfile.resolve()))
128
129
130 def pytest_sessionstart(session):
131 warnings.simplefilter("error", SunpyDeprecationWarning)
132
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sunpy/conftest.py b/sunpy/conftest.py
--- a/sunpy/conftest.py
+++ b/sunpy/conftest.py
@@ -46,7 +46,7 @@
"""
tmpdir = tempfile.TemporaryDirectory()
- os.environ["SUNPY_CONFIGDIR"] = str(pathlib.Path(__file__).parent / "data")
+ os.environ["SUNPY_CONFIGDIR"] = str(tmpdir.name)
astropy.config.paths.set_temp_config._temp_path = str(tmpdir.name)
astropy.config.paths.set_temp_cache._temp_path = str(tmpdir.name)
| {"golden_diff": "diff --git a/sunpy/conftest.py b/sunpy/conftest.py\n--- a/sunpy/conftest.py\n+++ b/sunpy/conftest.py\n@@ -46,7 +46,7 @@\n \"\"\"\n tmpdir = tempfile.TemporaryDirectory()\n \n- os.environ[\"SUNPY_CONFIGDIR\"] = str(pathlib.Path(__file__).parent / \"data\")\n+ os.environ[\"SUNPY_CONFIGDIR\"] = str(tmpdir.name)\n astropy.config.paths.set_temp_config._temp_path = str(tmpdir.name)\n astropy.config.paths.set_temp_cache._temp_path = str(tmpdir.name)\n", "issue": "[1.0.3] test_print_config_files fails with installed sunpy\n<!-- This comments are hidden when you submit the issue so you do not need to remove them!\r\nPlease be sure to check out our contributing guidelines: https://github.com/sunpy/sunpy/blob/master/CONTRIBUTING.rst\r\nPlease be sure to check out our code of conduct:\r\nhttps://github.com/sunpy/sunpy/blob/master/CODE_OF_CONDUCT.rst -->\r\n\r\n<!-- Please have a search on our GitHub repository to see if a similar issue has already been posted.\r\nIf a similar issue is closed, have a quick look to see if you are satisfied by the resolution.\r\nIf not please go ahead and open an issue! -->\r\n\r\n### Description\r\n\r\nOn Debian, we run the provided tests regularly on the installed package (\"CI test\") to ensure that it still works in the current environment. \r\nWith Version 1.0.3 (or before; I didn't check 1.0 \u2026 1.0.2), I see the following failure:\r\n\r\n### Actual behavior\r\n`print_test_config_files` tries to write to the installed package, which is denied for an ordinary user:\r\n```\r\n_____________________________________ test_print_config_files _____________________________________\r\n\r\nundo_download_dir_patch = None\r\n\r\n def test_print_config_files(undo_download_dir_patch):\r\n # TODO: Tidy this up.\r\n stdout = sys.stdout\r\n out = io.StringIO()\r\n sys.stdout = out\r\n> print_config()\r\n\r\n/usr/lib/python3/dist-packages/sunpy/util/tests/test_config.py:51: \r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _\r\n/usr/lib/python3/dist-packages/sunpy/util/config.py:107: in print_config\r\n for file_ in _find_config_files():\r\n/usr/lib/python3/dist-packages/sunpy/util/config.py:69: in _find_config_files\r\n config_path = Path(_get_user_configdir())\r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _\r\n\r\n def _get_user_configdir():\r\n \"\"\"[\u2026]\"\"\"\r\n configdir = os.environ.get('SUNPY_CONFIGDIR', CONFIG_DIR)\r\n \r\n if not _is_writable_dir(configdir):\r\n> raise RuntimeError(f'Could not write to SUNPY_CONFIGDIR=\"{configdir}\"')\r\nE RuntimeError: Could not write to SUNPY_CONFIGDIR=\"/usr/lib/python3/dist-packages/sunpy/data\"\r\n```\r\nThis also happens when I explicitly set `SUNPY_CONFIGDIR` to a different directory; the path mentioned in the `RuntimeError` then still is `/usr/lib/python3/dist-packages/sunpy/data`.\r\n\r\n### Steps to Reproduce\r\n\r\n```\r\n$ cd /tmp\r\n$ SUNPY_CONFIGDIR=/tmp python3 -c \"import sunpy; exit(sunpy.self_test(args=\\\"-k 'not figure and not online'\\\"))\"\r\n```\r\n\r\n\r\n### System Details\r\n<!-- We at least need to know the SunPy version you are using. -->\r\n<!-- We provide a short function in SunPy that will provide some of the below information. -->\r\n<!-- It is sunpy.util.system_info(), this is optional but strongly recommended. -->\r\n\r\n - SunPy Version: 1.0.3\r\n - Astropy Version: 3.2.1\r\n - Python Version: 3.7.4\r\n - OS information: Debian unstable\r\n\n", "before_files": [{"content": "import os\nimport json\nimport pathlib\nimport warnings\nimport importlib\nimport tempfile\n\nimport pytest\n\nimport astropy\nimport astropy.config.paths\n\nimport sunpy.tests.helpers\nfrom sunpy.tests.hash import HASH_LIBRARY_NAME\nfrom sunpy.tests.helpers import new_hash_library, generate_figure_webpage\nfrom sunpy.util.exceptions import SunpyDeprecationWarning\n\n# Force MPL to use non-gui backends for testing.\ntry:\n import matplotlib\nexcept ImportError:\n pass\nelse:\n matplotlib.use('Agg')\n\n# Don't actually import pytest_remotedata because that can do things to the\n# entrypoints code in pytest.\nremotedata_spec = importlib.util.find_spec(\"pytest_remotedata\")\nHAVE_REMOTEDATA = remotedata_spec is not None\n\n\ndef pytest_addoption(parser):\n parser.addoption(\"--figure_dir\", action=\"store\", default=\"./figure_test_images\")\n\n\[email protected](scope='session', autouse=True)\ndef figure_base_dir(request):\n sunpy.tests.helpers.figure_base_dir = pathlib.Path(\n request.config.getoption(\"--figure_dir\"))\n\n\[email protected](scope='session', autouse=True)\ndef tmp_config_dir(request):\n \"\"\"\n Globally set the default config for all tests.\n \"\"\"\n tmpdir = tempfile.TemporaryDirectory()\n\n os.environ[\"SUNPY_CONFIGDIR\"] = str(pathlib.Path(__file__).parent / \"data\")\n astropy.config.paths.set_temp_config._temp_path = str(tmpdir.name)\n astropy.config.paths.set_temp_cache._temp_path = str(tmpdir.name)\n\n yield\n\n del os.environ[\"SUNPY_CONFIGDIR\"]\n astropy.config.paths.set_temp_config._temp_path = None\n astropy.config.paths.set_temp_cache._temp_path = None\n\n\[email protected]()\ndef undo_config_dir_patch():\n \"\"\"\n Provide a way for certain tests to not have the config dir.\n \"\"\"\n oridir = os.environ[\"SUNPY_CONFIGDIR\"]\n del os.environ[\"SUNPY_CONFIGDIR\"]\n yield\n os.environ[\"SUNPY_CONFIGDIR\"] = oridir\n\n\[email protected](scope='session', autouse=True)\ndef tmp_dl_dir(request):\n \"\"\"\n Globally set the default download directory for the test run to a tmp dir.\n \"\"\"\n with tempfile.TemporaryDirectory() as tmpdir:\n os.environ[\"SUNPY_DOWNLOADDIR\"] = tmpdir\n yield tmpdir\n del os.environ[\"SUNPY_DOWNLOADDIR\"]\n\n\[email protected]()\ndef undo_download_dir_patch():\n \"\"\"\n Provide a way for certain tests to not have tmp download dir.\n \"\"\"\n oridir = os.environ[\"SUNPY_DOWNLOADDIR\"]\n del os.environ[\"SUNPY_DOWNLOADDIR\"]\n yield\n os.environ[\"SUNPY_DOWNLOADDIR\"] = oridir\n\n\ndef pytest_runtest_setup(item):\n \"\"\"\n pytest hook to skip all tests that have the mark 'remotedata' if the\n pytest_remotedata plugin is not installed.\n \"\"\"\n if isinstance(item, pytest.Function):\n if 'remote_data' in item.keywords and not HAVE_REMOTEDATA:\n pytest.skip(\"skipping remotedata tests as pytest-remotedata is not installed\")\n\n\ndef pytest_unconfigure(config):\n\n # If at least one figure test has been run, print result image directory\n if len(new_hash_library) > 0:\n # Write the new hash library in JSON\n figure_base_dir = pathlib.Path(config.getoption(\"--figure_dir\"))\n hashfile = figure_base_dir / HASH_LIBRARY_NAME\n with open(hashfile, 'w') as outfile:\n json.dump(new_hash_library, outfile, sort_keys=True, indent=4, separators=(',', ': '))\n\n \"\"\"\n Turn on internet when generating the figure comparison webpage.\n \"\"\"\n if HAVE_REMOTEDATA:\n from pytest_remotedata.disable_internet import turn_on_internet, turn_off_internet\n else:\n def turn_on_internet(): pass\n def turn_off_internet(): pass\n\n turn_on_internet()\n generate_figure_webpage(new_hash_library)\n turn_off_internet()\n\n print('All images from image tests can be found in {}'.format(figure_base_dir.resolve()))\n print(\"The corresponding hash library is {}\".format(hashfile.resolve()))\n\n\ndef pytest_sessionstart(session):\n warnings.simplefilter(\"error\", SunpyDeprecationWarning)\n", "path": "sunpy/conftest.py"}], "after_files": [{"content": "import os\nimport json\nimport pathlib\nimport warnings\nimport importlib\nimport tempfile\n\nimport pytest\n\nimport astropy\nimport astropy.config.paths\n\nimport sunpy.tests.helpers\nfrom sunpy.tests.hash import HASH_LIBRARY_NAME\nfrom sunpy.tests.helpers import new_hash_library, generate_figure_webpage\nfrom sunpy.util.exceptions import SunpyDeprecationWarning\n\n# Force MPL to use non-gui backends for testing.\ntry:\n import matplotlib\nexcept ImportError:\n pass\nelse:\n matplotlib.use('Agg')\n\n# Don't actually import pytest_remotedata because that can do things to the\n# entrypoints code in pytest.\nremotedata_spec = importlib.util.find_spec(\"pytest_remotedata\")\nHAVE_REMOTEDATA = remotedata_spec is not None\n\n\ndef pytest_addoption(parser):\n parser.addoption(\"--figure_dir\", action=\"store\", default=\"./figure_test_images\")\n\n\[email protected](scope='session', autouse=True)\ndef figure_base_dir(request):\n sunpy.tests.helpers.figure_base_dir = pathlib.Path(\n request.config.getoption(\"--figure_dir\"))\n\n\[email protected](scope='session', autouse=True)\ndef tmp_config_dir(request):\n \"\"\"\n Globally set the default config for all tests.\n \"\"\"\n tmpdir = tempfile.TemporaryDirectory()\n\n os.environ[\"SUNPY_CONFIGDIR\"] = str(tmpdir.name)\n astropy.config.paths.set_temp_config._temp_path = str(tmpdir.name)\n astropy.config.paths.set_temp_cache._temp_path = str(tmpdir.name)\n\n yield\n\n del os.environ[\"SUNPY_CONFIGDIR\"]\n astropy.config.paths.set_temp_config._temp_path = None\n astropy.config.paths.set_temp_cache._temp_path = None\n\n\[email protected]()\ndef undo_config_dir_patch():\n \"\"\"\n Provide a way for certain tests to not have the config dir.\n \"\"\"\n oridir = os.environ[\"SUNPY_CONFIGDIR\"]\n del os.environ[\"SUNPY_CONFIGDIR\"]\n yield\n os.environ[\"SUNPY_CONFIGDIR\"] = oridir\n\n\[email protected](scope='session', autouse=True)\ndef tmp_dl_dir(request):\n \"\"\"\n Globally set the default download directory for the test run to a tmp dir.\n \"\"\"\n with tempfile.TemporaryDirectory() as tmpdir:\n os.environ[\"SUNPY_DOWNLOADDIR\"] = tmpdir\n yield tmpdir\n del os.environ[\"SUNPY_DOWNLOADDIR\"]\n\n\[email protected]()\ndef undo_download_dir_patch():\n \"\"\"\n Provide a way for certain tests to not have tmp download dir.\n \"\"\"\n oridir = os.environ[\"SUNPY_DOWNLOADDIR\"]\n del os.environ[\"SUNPY_DOWNLOADDIR\"]\n yield\n os.environ[\"SUNPY_DOWNLOADDIR\"] = oridir\n\n\ndef pytest_runtest_setup(item):\n \"\"\"\n pytest hook to skip all tests that have the mark 'remotedata' if the\n pytest_remotedata plugin is not installed.\n \"\"\"\n if isinstance(item, pytest.Function):\n if 'remote_data' in item.keywords and not HAVE_REMOTEDATA:\n pytest.skip(\"skipping remotedata tests as pytest-remotedata is not installed\")\n\n\ndef pytest_unconfigure(config):\n\n # If at least one figure test has been run, print result image directory\n if len(new_hash_library) > 0:\n # Write the new hash library in JSON\n figure_base_dir = pathlib.Path(config.getoption(\"--figure_dir\"))\n hashfile = figure_base_dir / HASH_LIBRARY_NAME\n with open(hashfile, 'w') as outfile:\n json.dump(new_hash_library, outfile, sort_keys=True, indent=4, separators=(',', ': '))\n\n \"\"\"\n Turn on internet when generating the figure comparison webpage.\n \"\"\"\n if HAVE_REMOTEDATA:\n from pytest_remotedata.disable_internet import turn_on_internet, turn_off_internet\n else:\n def turn_on_internet(): pass\n def turn_off_internet(): pass\n\n turn_on_internet()\n generate_figure_webpage(new_hash_library)\n turn_off_internet()\n\n print('All images from image tests can be found in {}'.format(figure_base_dir.resolve()))\n print(\"The corresponding hash library is {}\".format(hashfile.resolve()))\n\n\ndef pytest_sessionstart(session):\n warnings.simplefilter(\"error\", SunpyDeprecationWarning)\n", "path": "sunpy/conftest.py"}]} | 2,268 | 139 |
gh_patches_debug_24355 | rasdani/github-patches | git_diff | opendatacube__datacube-core-940 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Leaking DB connections in intergration tests
Integration tests started occasionally failing with the following error:
```
psycopg2.OperationalError: FATAL: sorry, too many clients already
```
I have not been able to reproduce this in the local dev environment, but it happens on Github Actions from time to time, typically when running ` integration_tests/test_dataset_add.py` suite.
Integration tests run `datacube` CLI app, but in a fancy way, avoiding launching a separate process. This is needed for accurate coverage tracking, but I feel like this leads to resource leakage. Apps are designed for once off running, so they might not care about closing db connections in a "proper way".
Acceptable solutions:
- Bump connection pool to avoid problems
- Find a way for test fixtures to cleanup properly
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `datacube/ui/click.py`
Content:
```
1 """
2 Common functions for click-based cli scripts.
3 """
4 import functools
5 import logging
6 import os
7 import copy
8 import sys
9
10 import click
11
12 from datacube import config, __version__
13 from datacube.api.core import Datacube
14
15 from datacube.executor import get_executor, mk_celery_executor
16 from datacube.index import index_connect
17 from pathlib import Path
18
19 from datacube.ui.expression import parse_expressions
20 from sqlalchemy.exc import OperationalError, ProgrammingError
21
22 _LOG_FORMAT_STRING = '%(asctime)s %(process)d %(name)s %(levelname)s %(message)s'
23 CLICK_SETTINGS = dict(help_option_names=['-h', '--help'])
24 _LOG = logging.getLogger(__name__)
25
26
27 def _print_version(ctx, param, value):
28 if not value or ctx.resilient_parsing:
29 return
30
31 click.echo(
32 '{prog}, version {version}'.format(
33 prog='Open Data Cube core',
34 version=__version__
35 )
36 )
37 ctx.exit()
38
39
40 def compose(*functions):
41 """
42 >>> compose(
43 ... lambda x: x+1,
44 ... lambda y: y+2
45 ... )(1)
46 4
47 """
48
49 def compose2(f, g):
50 return lambda x: f(g(x))
51
52 return functools.reduce(compose2, functions, lambda x: x)
53
54
55 class ColorFormatter(logging.Formatter):
56 colors = {
57 'info': dict(fg='white'),
58 'error': dict(fg='red'),
59 'exception': dict(fg='red'),
60 'critical': dict(fg='red'),
61 'debug': dict(fg='blue'),
62 'warning': dict(fg='yellow')
63 }
64
65 def format(self, record):
66 if not record.exc_info:
67 record = copy.copy(record)
68 record.levelname = click.style(record.levelname, **self.colors.get(record.levelname.lower(), {}))
69 return logging.Formatter.format(self, record)
70
71
72 class ClickHandler(logging.Handler):
73 def emit(self, record):
74 try:
75 msg = self.format(record)
76 click.echo(msg, err=True)
77 except: # pylint: disable=bare-except
78 self.handleError(record)
79
80
81 def remove_handlers_of_type(logger, handler_type):
82 for handler in logger.handlers:
83 if isinstance(handler, handler_type):
84 logger.removeHandler(handler)
85
86
87 def _init_logging(ctx, param, value):
88 # When running in tests, we don't want to keep adding log handlers. It creates duplicate log messages up the wahoo.
89 remove_handlers_of_type(logging.root, ClickHandler)
90 handler = ClickHandler()
91 handler.formatter = ColorFormatter(_LOG_FORMAT_STRING)
92 logging.root.addHandler(handler)
93
94 logging_level = logging.WARN - 10 * value
95 logging.root.setLevel(logging_level)
96 logging.getLogger('datacube').setLevel(logging_level)
97
98 if logging_level <= logging.INFO:
99 logging.getLogger('rasterio').setLevel(logging.INFO)
100
101 logging.getLogger('datacube').info('Running datacube command: %s', ' '.join(sys.argv))
102
103 if not ctx.obj:
104 ctx.obj = {}
105
106 ctx.obj['verbosity'] = value
107
108
109 def _add_logfile(ctx, param, value):
110 formatter = logging.Formatter(_LOG_FORMAT_STRING)
111 for logfile in value:
112 handler = logging.FileHandler(logfile)
113 handler.formatter = formatter
114 logging.root.addHandler(handler)
115
116
117 def _log_queries(ctx, param, value):
118 if value:
119 logging.getLogger('sqlalchemy.engine').setLevel('INFO')
120
121
122 def _set_config(ctx, param, value):
123 if value:
124 if not any(os.path.exists(p) for p in value):
125 raise ValueError('No specified config paths exist: {}'.format(value))
126
127 if not ctx.obj:
128 ctx.obj = {}
129 paths = value
130 ctx.obj['config_files'] = paths
131 return value
132
133
134 def _set_environment(ctx, param, value):
135 if not ctx.obj:
136 ctx.obj = {}
137 ctx.obj['config_environment'] = value
138
139
140 #: pylint: disable=invalid-name
141 version_option = click.option('--version', is_flag=True, callback=_print_version,
142 expose_value=False, is_eager=True)
143 #: pylint: disable=invalid-name
144 verbose_option = click.option('--verbose', '-v', count=True, callback=_init_logging,
145 is_eager=True, expose_value=False, help="Use multiple times for more verbosity")
146 #: pylint: disable=invalid-name
147 logfile_option = click.option('--log-file', multiple=True, callback=_add_logfile,
148 is_eager=True, expose_value=False, help="Specify log file")
149 #: pylint: disable=invalid-name
150 config_option = click.option('--config', '--config_file', '-C', multiple=True, default='', callback=_set_config,
151 expose_value=False)
152 config_option_exposed = click.option('--config', '--config_file', '-C', multiple=True, default='', callback=_set_config)
153
154 environment_option = click.option('--env', '-E', callback=_set_environment,
155 expose_value=False)
156 #: pylint: disable=invalid-name
157 log_queries_option = click.option('--log-queries', is_flag=True, callback=_log_queries,
158 expose_value=False, help="Print database queries.")
159
160 # This is a function, so it's valid to be lowercase.
161 #: pylint: disable=invalid-name
162 global_cli_options = compose(
163 version_option,
164 verbose_option,
165 logfile_option,
166 environment_option,
167 config_option,
168 log_queries_option
169 )
170
171
172 @click.group(help="Data Cube command-line interface", context_settings=CLICK_SETTINGS)
173 @global_cli_options
174 def cli():
175 pass
176
177
178 def pass_config(f):
179 """Get a datacube config as the first argument. """
180
181 def new_func(*args, **kwargs):
182 obj = click.get_current_context().obj
183
184 paths = obj.get('config_files', None)
185 # If the user is overriding the defaults
186 specific_environment = obj.get('config_environment')
187
188 try:
189 parsed_config = config.LocalConfig.find(paths=paths, env=specific_environment)
190 except ValueError:
191 if specific_environment:
192 raise click.ClickException("No datacube config found for '{}'".format(specific_environment))
193 else:
194 raise click.ClickException("No datacube config found")
195
196 _LOG.debug("Loaded datacube config: %r", parsed_config)
197 return f(parsed_config, *args, **kwargs)
198
199 return functools.update_wrapper(new_func, f)
200
201
202 def pass_index(app_name=None, expect_initialised=True):
203 """Get a connection to the index as the first argument.
204
205 :param str app_name:
206 A short name of the application for logging purposes.
207 :param bool expect_initialised:
208 Whether to connect immediately on startup. Useful to catch connection config issues immediately,
209 but if you're planning to fork before any usage (such as in the case of some web servers),
210 you may not want this. For more information on thread/process usage, see datacube.index.Index
211 """
212
213 def decorate(f):
214 @pass_config
215 def with_index(local_config: config.LocalConfig,
216 *args,
217 **kwargs):
218 ctx = click.get_current_context()
219 try:
220 index = index_connect(local_config,
221 application_name=app_name or ctx.command_path,
222 validate_connection=expect_initialised)
223 ctx.obj['index'] = index
224 _LOG.debug("Connected to datacube index: %s", index)
225 return f(index, *args, **kwargs)
226 except (OperationalError, ProgrammingError) as e:
227 handle_exception('Error Connecting to database: %s', e)
228
229 return functools.update_wrapper(with_index, f)
230
231 return decorate
232
233
234 def pass_datacube(app_name=None, expect_initialised=True):
235 """
236 Get a DataCube from the current or default local settings.
237
238 :param str app_name:
239 A short name of the application for logging purposes.
240 :param bool expect_initialised:
241 Whether to connect immediately on startup. Useful to catch connection config issues immediately,
242 but if you're planning to fork before any usage (such as in the case of some web servers),
243 you may not want this. For More information on thread/process usage, see datacube.index.Index
244 """
245
246 def decorate(f):
247 @pass_index(app_name=app_name, expect_initialised=expect_initialised)
248 def with_datacube(index, *args, **kwargs):
249 return f(Datacube(index=index), *args, **kwargs)
250
251 return functools.update_wrapper(with_datacube, f)
252
253 return decorate
254
255
256 def parse_endpoint(value):
257 ip, port = tuple(value.split(':'))
258 return ip, int(port)
259
260
261 EXECUTOR_TYPES = {
262 'serial': lambda _: get_executor(None, None),
263 'multiproc': lambda workers: get_executor(None, int(workers)),
264 'distributed': lambda addr: get_executor(parse_endpoint(addr), True),
265 'celery': lambda addr: mk_celery_executor(*parse_endpoint(addr))
266 }
267
268 EXECUTOR_TYPES['dask'] = EXECUTOR_TYPES['distributed'] # Add alias "dask" for distributed
269
270
271 def _setup_executor(ctx, param, value):
272 try:
273 return EXECUTOR_TYPES[value[0]](value[1])
274 except ValueError:
275 ctx.fail("Failed to create '%s' executor with '%s'" % value)
276
277
278 executor_cli_options = click.option('--executor', # type: ignore
279 type=(click.Choice(list(EXECUTOR_TYPES)), str),
280 default=('serial', None),
281 help="Run parallelized, either locally or distributed. eg:\n"
282 "--executor multiproc 4 (OR)\n"
283 "--executor distributed 10.0.0.8:8888",
284 callback=_setup_executor)
285
286
287 def handle_exception(msg, e):
288 """
289 Exit following an exception in a CLI app
290
291 If verbosity (-v flag) specified, dump out a stack trace. Otherwise,
292 simply print the given error message.
293
294 Include a '%s' in the message to print the single line message from the
295 exception.
296
297 :param e: caught Exception
298 :param msg: Message to User with optional %s
299 """
300 ctx = click.get_current_context()
301 if ctx.obj['verbosity'] >= 1:
302 raise e
303 else:
304 if '%s' in msg:
305 click.echo(msg % e)
306 else:
307 click.echo(msg)
308 ctx.exit(1)
309
310
311 def parsed_search_expressions(f):
312 """
313 Add [EXPRESSIONs] arguments to a click application
314
315 Passes a parsed dict of search expressions to the `expressions` argument
316 of the command.
317
318 Also appends documentation on using search expressions to the command.
319
320 WARNING: This wrapped expects an unlimited number of search expressions
321 as click arguments, which means your command must take only click options
322 or a specified number of arguments.
323 """
324 if not f.__doc__:
325 f.__doc__ = ""
326 f.__doc__ += """
327 EXPRESSIONS
328
329 Select datasets using [EXPRESSIONS] to filter by date, product type,
330 spatial extents or other searchable fields.
331
332 \b
333 FIELD = VALUE
334 FIELD in DATE-RANGE
335 FIELD in [START, END]
336
337 \b
338 DATE-RANGE is one of YYYY, YYYY-MM or YYYY-MM-DD
339 START and END can be either numbers or dates
340
341 FIELD: x, y, lat, lon, time, product, ...
342
343 \b
344 eg. 'time in [1996-01-01, 1996-12-31]'
345 'time in 1996'
346 'lon in [130, 140]' 'lat in [-40, -30]'
347 product=ls5_nbar_albers
348
349 """
350
351 def my_parse(ctx, param, value):
352 return parse_expressions(*list(value))
353
354 f = click.argument('expressions', callback=my_parse, nargs=-1)(f)
355 return f
356
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/datacube/ui/click.py b/datacube/ui/click.py
--- a/datacube/ui/click.py
+++ b/datacube/ui/click.py
@@ -215,16 +215,21 @@
def with_index(local_config: config.LocalConfig,
*args,
**kwargs):
- ctx = click.get_current_context()
+ command_path = click.get_current_context().command_path
try:
index = index_connect(local_config,
- application_name=app_name or ctx.command_path,
+ application_name=app_name or command_path,
validate_connection=expect_initialised)
- ctx.obj['index'] = index
_LOG.debug("Connected to datacube index: %s", index)
- return f(index, *args, **kwargs)
except (OperationalError, ProgrammingError) as e:
handle_exception('Error Connecting to database: %s', e)
+ return
+
+ try:
+ return f(index, *args, **kwargs)
+ finally:
+ index.close()
+ del index
return functools.update_wrapper(with_index, f)
| {"golden_diff": "diff --git a/datacube/ui/click.py b/datacube/ui/click.py\n--- a/datacube/ui/click.py\n+++ b/datacube/ui/click.py\n@@ -215,16 +215,21 @@\n def with_index(local_config: config.LocalConfig,\n *args,\n **kwargs):\n- ctx = click.get_current_context()\n+ command_path = click.get_current_context().command_path\n try:\n index = index_connect(local_config,\n- application_name=app_name or ctx.command_path,\n+ application_name=app_name or command_path,\n validate_connection=expect_initialised)\n- ctx.obj['index'] = index\n _LOG.debug(\"Connected to datacube index: %s\", index)\n- return f(index, *args, **kwargs)\n except (OperationalError, ProgrammingError) as e:\n handle_exception('Error Connecting to database: %s', e)\n+ return\n+\n+ try:\n+ return f(index, *args, **kwargs)\n+ finally:\n+ index.close()\n+ del index\n \n return functools.update_wrapper(with_index, f)\n", "issue": "Leaking DB connections in intergration tests\nIntegration tests started occasionally failing with the following error:\r\n\r\n```\r\npsycopg2.OperationalError: FATAL: sorry, too many clients already\r\n```\r\n\r\nI have not been able to reproduce this in the local dev environment, but it happens on Github Actions from time to time, typically when running ` integration_tests/test_dataset_add.py` suite.\r\n\r\nIntegration tests run `datacube` CLI app, but in a fancy way, avoiding launching a separate process. This is needed for accurate coverage tracking, but I feel like this leads to resource leakage. Apps are designed for once off running, so they might not care about closing db connections in a \"proper way\".\r\n\r\nAcceptable solutions:\r\n\r\n- Bump connection pool to avoid problems\r\n- Find a way for test fixtures to cleanup properly\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nCommon functions for click-based cli scripts.\n\"\"\"\nimport functools\nimport logging\nimport os\nimport copy\nimport sys\n\nimport click\n\nfrom datacube import config, __version__\nfrom datacube.api.core import Datacube\n\nfrom datacube.executor import get_executor, mk_celery_executor\nfrom datacube.index import index_connect\nfrom pathlib import Path\n\nfrom datacube.ui.expression import parse_expressions\nfrom sqlalchemy.exc import OperationalError, ProgrammingError\n\n_LOG_FORMAT_STRING = '%(asctime)s %(process)d %(name)s %(levelname)s %(message)s'\nCLICK_SETTINGS = dict(help_option_names=['-h', '--help'])\n_LOG = logging.getLogger(__name__)\n\n\ndef _print_version(ctx, param, value):\n if not value or ctx.resilient_parsing:\n return\n\n click.echo(\n '{prog}, version {version}'.format(\n prog='Open Data Cube core',\n version=__version__\n )\n )\n ctx.exit()\n\n\ndef compose(*functions):\n \"\"\"\n >>> compose(\n ... lambda x: x+1,\n ... lambda y: y+2\n ... )(1)\n 4\n \"\"\"\n\n def compose2(f, g):\n return lambda x: f(g(x))\n\n return functools.reduce(compose2, functions, lambda x: x)\n\n\nclass ColorFormatter(logging.Formatter):\n colors = {\n 'info': dict(fg='white'),\n 'error': dict(fg='red'),\n 'exception': dict(fg='red'),\n 'critical': dict(fg='red'),\n 'debug': dict(fg='blue'),\n 'warning': dict(fg='yellow')\n }\n\n def format(self, record):\n if not record.exc_info:\n record = copy.copy(record)\n record.levelname = click.style(record.levelname, **self.colors.get(record.levelname.lower(), {}))\n return logging.Formatter.format(self, record)\n\n\nclass ClickHandler(logging.Handler):\n def emit(self, record):\n try:\n msg = self.format(record)\n click.echo(msg, err=True)\n except: # pylint: disable=bare-except\n self.handleError(record)\n\n\ndef remove_handlers_of_type(logger, handler_type):\n for handler in logger.handlers:\n if isinstance(handler, handler_type):\n logger.removeHandler(handler)\n\n\ndef _init_logging(ctx, param, value):\n # When running in tests, we don't want to keep adding log handlers. It creates duplicate log messages up the wahoo.\n remove_handlers_of_type(logging.root, ClickHandler)\n handler = ClickHandler()\n handler.formatter = ColorFormatter(_LOG_FORMAT_STRING)\n logging.root.addHandler(handler)\n\n logging_level = logging.WARN - 10 * value\n logging.root.setLevel(logging_level)\n logging.getLogger('datacube').setLevel(logging_level)\n\n if logging_level <= logging.INFO:\n logging.getLogger('rasterio').setLevel(logging.INFO)\n\n logging.getLogger('datacube').info('Running datacube command: %s', ' '.join(sys.argv))\n\n if not ctx.obj:\n ctx.obj = {}\n\n ctx.obj['verbosity'] = value\n\n\ndef _add_logfile(ctx, param, value):\n formatter = logging.Formatter(_LOG_FORMAT_STRING)\n for logfile in value:\n handler = logging.FileHandler(logfile)\n handler.formatter = formatter\n logging.root.addHandler(handler)\n\n\ndef _log_queries(ctx, param, value):\n if value:\n logging.getLogger('sqlalchemy.engine').setLevel('INFO')\n\n\ndef _set_config(ctx, param, value):\n if value:\n if not any(os.path.exists(p) for p in value):\n raise ValueError('No specified config paths exist: {}'.format(value))\n\n if not ctx.obj:\n ctx.obj = {}\n paths = value\n ctx.obj['config_files'] = paths\n return value\n\n\ndef _set_environment(ctx, param, value):\n if not ctx.obj:\n ctx.obj = {}\n ctx.obj['config_environment'] = value\n\n\n#: pylint: disable=invalid-name\nversion_option = click.option('--version', is_flag=True, callback=_print_version,\n expose_value=False, is_eager=True)\n#: pylint: disable=invalid-name\nverbose_option = click.option('--verbose', '-v', count=True, callback=_init_logging,\n is_eager=True, expose_value=False, help=\"Use multiple times for more verbosity\")\n#: pylint: disable=invalid-name\nlogfile_option = click.option('--log-file', multiple=True, callback=_add_logfile,\n is_eager=True, expose_value=False, help=\"Specify log file\")\n#: pylint: disable=invalid-name\nconfig_option = click.option('--config', '--config_file', '-C', multiple=True, default='', callback=_set_config,\n expose_value=False)\nconfig_option_exposed = click.option('--config', '--config_file', '-C', multiple=True, default='', callback=_set_config)\n\nenvironment_option = click.option('--env', '-E', callback=_set_environment,\n expose_value=False)\n#: pylint: disable=invalid-name\nlog_queries_option = click.option('--log-queries', is_flag=True, callback=_log_queries,\n expose_value=False, help=\"Print database queries.\")\n\n# This is a function, so it's valid to be lowercase.\n#: pylint: disable=invalid-name\nglobal_cli_options = compose(\n version_option,\n verbose_option,\n logfile_option,\n environment_option,\n config_option,\n log_queries_option\n)\n\n\[email protected](help=\"Data Cube command-line interface\", context_settings=CLICK_SETTINGS)\n@global_cli_options\ndef cli():\n pass\n\n\ndef pass_config(f):\n \"\"\"Get a datacube config as the first argument. \"\"\"\n\n def new_func(*args, **kwargs):\n obj = click.get_current_context().obj\n\n paths = obj.get('config_files', None)\n # If the user is overriding the defaults\n specific_environment = obj.get('config_environment')\n\n try:\n parsed_config = config.LocalConfig.find(paths=paths, env=specific_environment)\n except ValueError:\n if specific_environment:\n raise click.ClickException(\"No datacube config found for '{}'\".format(specific_environment))\n else:\n raise click.ClickException(\"No datacube config found\")\n\n _LOG.debug(\"Loaded datacube config: %r\", parsed_config)\n return f(parsed_config, *args, **kwargs)\n\n return functools.update_wrapper(new_func, f)\n\n\ndef pass_index(app_name=None, expect_initialised=True):\n \"\"\"Get a connection to the index as the first argument.\n\n :param str app_name:\n A short name of the application for logging purposes.\n :param bool expect_initialised:\n Whether to connect immediately on startup. Useful to catch connection config issues immediately,\n but if you're planning to fork before any usage (such as in the case of some web servers),\n you may not want this. For more information on thread/process usage, see datacube.index.Index\n \"\"\"\n\n def decorate(f):\n @pass_config\n def with_index(local_config: config.LocalConfig,\n *args,\n **kwargs):\n ctx = click.get_current_context()\n try:\n index = index_connect(local_config,\n application_name=app_name or ctx.command_path,\n validate_connection=expect_initialised)\n ctx.obj['index'] = index\n _LOG.debug(\"Connected to datacube index: %s\", index)\n return f(index, *args, **kwargs)\n except (OperationalError, ProgrammingError) as e:\n handle_exception('Error Connecting to database: %s', e)\n\n return functools.update_wrapper(with_index, f)\n\n return decorate\n\n\ndef pass_datacube(app_name=None, expect_initialised=True):\n \"\"\"\n Get a DataCube from the current or default local settings.\n\n :param str app_name:\n A short name of the application for logging purposes.\n :param bool expect_initialised:\n Whether to connect immediately on startup. Useful to catch connection config issues immediately,\n but if you're planning to fork before any usage (such as in the case of some web servers),\n you may not want this. For More information on thread/process usage, see datacube.index.Index\n \"\"\"\n\n def decorate(f):\n @pass_index(app_name=app_name, expect_initialised=expect_initialised)\n def with_datacube(index, *args, **kwargs):\n return f(Datacube(index=index), *args, **kwargs)\n\n return functools.update_wrapper(with_datacube, f)\n\n return decorate\n\n\ndef parse_endpoint(value):\n ip, port = tuple(value.split(':'))\n return ip, int(port)\n\n\nEXECUTOR_TYPES = {\n 'serial': lambda _: get_executor(None, None),\n 'multiproc': lambda workers: get_executor(None, int(workers)),\n 'distributed': lambda addr: get_executor(parse_endpoint(addr), True),\n 'celery': lambda addr: mk_celery_executor(*parse_endpoint(addr))\n}\n\nEXECUTOR_TYPES['dask'] = EXECUTOR_TYPES['distributed'] # Add alias \"dask\" for distributed\n\n\ndef _setup_executor(ctx, param, value):\n try:\n return EXECUTOR_TYPES[value[0]](value[1])\n except ValueError:\n ctx.fail(\"Failed to create '%s' executor with '%s'\" % value)\n\n\nexecutor_cli_options = click.option('--executor', # type: ignore\n type=(click.Choice(list(EXECUTOR_TYPES)), str),\n default=('serial', None),\n help=\"Run parallelized, either locally or distributed. eg:\\n\"\n \"--executor multiproc 4 (OR)\\n\"\n \"--executor distributed 10.0.0.8:8888\",\n callback=_setup_executor)\n\n\ndef handle_exception(msg, e):\n \"\"\"\n Exit following an exception in a CLI app\n\n If verbosity (-v flag) specified, dump out a stack trace. Otherwise,\n simply print the given error message.\n\n Include a '%s' in the message to print the single line message from the\n exception.\n\n :param e: caught Exception\n :param msg: Message to User with optional %s\n \"\"\"\n ctx = click.get_current_context()\n if ctx.obj['verbosity'] >= 1:\n raise e\n else:\n if '%s' in msg:\n click.echo(msg % e)\n else:\n click.echo(msg)\n ctx.exit(1)\n\n\ndef parsed_search_expressions(f):\n \"\"\"\n Add [EXPRESSIONs] arguments to a click application\n\n Passes a parsed dict of search expressions to the `expressions` argument\n of the command.\n\n Also appends documentation on using search expressions to the command.\n\n WARNING: This wrapped expects an unlimited number of search expressions\n as click arguments, which means your command must take only click options\n or a specified number of arguments.\n \"\"\"\n if not f.__doc__:\n f.__doc__ = \"\"\n f.__doc__ += \"\"\"\n EXPRESSIONS\n \n Select datasets using [EXPRESSIONS] to filter by date, product type,\n spatial extents or other searchable fields.\n\n \\b\n FIELD = VALUE\n FIELD in DATE-RANGE\n FIELD in [START, END]\n\n \\b\n DATE-RANGE is one of YYYY, YYYY-MM or YYYY-MM-DD\n START and END can be either numbers or dates\n\n FIELD: x, y, lat, lon, time, product, ... \n\n \\b\n eg. 'time in [1996-01-01, 1996-12-31]'\n 'time in 1996'\n 'lon in [130, 140]' 'lat in [-40, -30]'\n product=ls5_nbar_albers\n\n \"\"\"\n\n def my_parse(ctx, param, value):\n return parse_expressions(*list(value))\n\n f = click.argument('expressions', callback=my_parse, nargs=-1)(f)\n return f\n", "path": "datacube/ui/click.py"}], "after_files": [{"content": "\"\"\"\nCommon functions for click-based cli scripts.\n\"\"\"\nimport functools\nimport logging\nimport os\nimport copy\nimport sys\n\nimport click\n\nfrom datacube import config, __version__\nfrom datacube.api.core import Datacube\n\nfrom datacube.executor import get_executor, mk_celery_executor\nfrom datacube.index import index_connect\nfrom pathlib import Path\n\nfrom datacube.ui.expression import parse_expressions\nfrom sqlalchemy.exc import OperationalError, ProgrammingError\n\n_LOG_FORMAT_STRING = '%(asctime)s %(process)d %(name)s %(levelname)s %(message)s'\nCLICK_SETTINGS = dict(help_option_names=['-h', '--help'])\n_LOG = logging.getLogger(__name__)\n\n\ndef _print_version(ctx, param, value):\n if not value or ctx.resilient_parsing:\n return\n\n click.echo(\n '{prog}, version {version}'.format(\n prog='Open Data Cube core',\n version=__version__\n )\n )\n ctx.exit()\n\n\ndef compose(*functions):\n \"\"\"\n >>> compose(\n ... lambda x: x+1,\n ... lambda y: y+2\n ... )(1)\n 4\n \"\"\"\n\n def compose2(f, g):\n return lambda x: f(g(x))\n\n return functools.reduce(compose2, functions, lambda x: x)\n\n\nclass ColorFormatter(logging.Formatter):\n colors = {\n 'info': dict(fg='white'),\n 'error': dict(fg='red'),\n 'exception': dict(fg='red'),\n 'critical': dict(fg='red'),\n 'debug': dict(fg='blue'),\n 'warning': dict(fg='yellow')\n }\n\n def format(self, record):\n if not record.exc_info:\n record = copy.copy(record)\n record.levelname = click.style(record.levelname, **self.colors.get(record.levelname.lower(), {}))\n return logging.Formatter.format(self, record)\n\n\nclass ClickHandler(logging.Handler):\n def emit(self, record):\n try:\n msg = self.format(record)\n click.echo(msg, err=True)\n except: # pylint: disable=bare-except\n self.handleError(record)\n\n\ndef remove_handlers_of_type(logger, handler_type):\n for handler in logger.handlers:\n if isinstance(handler, handler_type):\n logger.removeHandler(handler)\n\n\ndef _init_logging(ctx, param, value):\n # When running in tests, we don't want to keep adding log handlers. It creates duplicate log messages up the wahoo.\n remove_handlers_of_type(logging.root, ClickHandler)\n handler = ClickHandler()\n handler.formatter = ColorFormatter(_LOG_FORMAT_STRING)\n logging.root.addHandler(handler)\n\n logging_level = logging.WARN - 10 * value\n logging.root.setLevel(logging_level)\n logging.getLogger('datacube').setLevel(logging_level)\n\n if logging_level <= logging.INFO:\n logging.getLogger('rasterio').setLevel(logging.INFO)\n\n logging.getLogger('datacube').info('Running datacube command: %s', ' '.join(sys.argv))\n\n if not ctx.obj:\n ctx.obj = {}\n\n ctx.obj['verbosity'] = value\n\n\ndef _add_logfile(ctx, param, value):\n formatter = logging.Formatter(_LOG_FORMAT_STRING)\n for logfile in value:\n handler = logging.FileHandler(logfile)\n handler.formatter = formatter\n logging.root.addHandler(handler)\n\n\ndef _log_queries(ctx, param, value):\n if value:\n logging.getLogger('sqlalchemy.engine').setLevel('INFO')\n\n\ndef _set_config(ctx, param, value):\n if value:\n if not any(os.path.exists(p) for p in value):\n raise ValueError('No specified config paths exist: {}'.format(value))\n\n if not ctx.obj:\n ctx.obj = {}\n paths = value\n ctx.obj['config_files'] = paths\n return value\n\n\ndef _set_environment(ctx, param, value):\n if not ctx.obj:\n ctx.obj = {}\n ctx.obj['config_environment'] = value\n\n\n#: pylint: disable=invalid-name\nversion_option = click.option('--version', is_flag=True, callback=_print_version,\n expose_value=False, is_eager=True)\n#: pylint: disable=invalid-name\nverbose_option = click.option('--verbose', '-v', count=True, callback=_init_logging,\n is_eager=True, expose_value=False, help=\"Use multiple times for more verbosity\")\n#: pylint: disable=invalid-name\nlogfile_option = click.option('--log-file', multiple=True, callback=_add_logfile,\n is_eager=True, expose_value=False, help=\"Specify log file\")\n#: pylint: disable=invalid-name\nconfig_option = click.option('--config', '--config_file', '-C', multiple=True, default='', callback=_set_config,\n expose_value=False)\nconfig_option_exposed = click.option('--config', '--config_file', '-C', multiple=True, default='', callback=_set_config)\n\nenvironment_option = click.option('--env', '-E', callback=_set_environment,\n expose_value=False)\n#: pylint: disable=invalid-name\nlog_queries_option = click.option('--log-queries', is_flag=True, callback=_log_queries,\n expose_value=False, help=\"Print database queries.\")\n\n# This is a function, so it's valid to be lowercase.\n#: pylint: disable=invalid-name\nglobal_cli_options = compose(\n version_option,\n verbose_option,\n logfile_option,\n environment_option,\n config_option,\n log_queries_option\n)\n\n\[email protected](help=\"Data Cube command-line interface\", context_settings=CLICK_SETTINGS)\n@global_cli_options\ndef cli():\n pass\n\n\ndef pass_config(f):\n \"\"\"Get a datacube config as the first argument. \"\"\"\n\n def new_func(*args, **kwargs):\n obj = click.get_current_context().obj\n\n paths = obj.get('config_files', None)\n # If the user is overriding the defaults\n specific_environment = obj.get('config_environment')\n\n try:\n parsed_config = config.LocalConfig.find(paths=paths, env=specific_environment)\n except ValueError:\n if specific_environment:\n raise click.ClickException(\"No datacube config found for '{}'\".format(specific_environment))\n else:\n raise click.ClickException(\"No datacube config found\")\n\n _LOG.debug(\"Loaded datacube config: %r\", parsed_config)\n return f(parsed_config, *args, **kwargs)\n\n return functools.update_wrapper(new_func, f)\n\n\ndef pass_index(app_name=None, expect_initialised=True):\n \"\"\"Get a connection to the index as the first argument.\n\n :param str app_name:\n A short name of the application for logging purposes.\n :param bool expect_initialised:\n Whether to connect immediately on startup. Useful to catch connection config issues immediately,\n but if you're planning to fork before any usage (such as in the case of some web servers),\n you may not want this. For more information on thread/process usage, see datacube.index.Index\n \"\"\"\n\n def decorate(f):\n @pass_config\n def with_index(local_config: config.LocalConfig,\n *args,\n **kwargs):\n command_path = click.get_current_context().command_path\n try:\n index = index_connect(local_config,\n application_name=app_name or command_path,\n validate_connection=expect_initialised)\n _LOG.debug(\"Connected to datacube index: %s\", index)\n except (OperationalError, ProgrammingError) as e:\n handle_exception('Error Connecting to database: %s', e)\n return\n\n try:\n return f(index, *args, **kwargs)\n finally:\n index.close()\n del index\n\n return functools.update_wrapper(with_index, f)\n\n return decorate\n\n\ndef pass_datacube(app_name=None, expect_initialised=True):\n \"\"\"\n Get a DataCube from the current or default local settings.\n\n :param str app_name:\n A short name of the application for logging purposes.\n :param bool expect_initialised:\n Whether to connect immediately on startup. Useful to catch connection config issues immediately,\n but if you're planning to fork before any usage (such as in the case of some web servers),\n you may not want this. For More information on thread/process usage, see datacube.index.Index\n \"\"\"\n\n def decorate(f):\n @pass_index(app_name=app_name, expect_initialised=expect_initialised)\n def with_datacube(index, *args, **kwargs):\n return f(Datacube(index=index), *args, **kwargs)\n\n return functools.update_wrapper(with_datacube, f)\n\n return decorate\n\n\ndef parse_endpoint(value):\n ip, port = tuple(value.split(':'))\n return ip, int(port)\n\n\nEXECUTOR_TYPES = {\n 'serial': lambda _: get_executor(None, None),\n 'multiproc': lambda workers: get_executor(None, int(workers)),\n 'distributed': lambda addr: get_executor(parse_endpoint(addr), True),\n 'celery': lambda addr: mk_celery_executor(*parse_endpoint(addr))\n}\n\nEXECUTOR_TYPES['dask'] = EXECUTOR_TYPES['distributed'] # Add alias \"dask\" for distributed\n\n\ndef _setup_executor(ctx, param, value):\n try:\n return EXECUTOR_TYPES[value[0]](value[1])\n except ValueError:\n ctx.fail(\"Failed to create '%s' executor with '%s'\" % value)\n\n\nexecutor_cli_options = click.option('--executor', # type: ignore\n type=(click.Choice(list(EXECUTOR_TYPES)), str),\n default=('serial', None),\n help=\"Run parallelized, either locally or distributed. eg:\\n\"\n \"--executor multiproc 4 (OR)\\n\"\n \"--executor distributed 10.0.0.8:8888\",\n callback=_setup_executor)\n\n\ndef handle_exception(msg, e):\n \"\"\"\n Exit following an exception in a CLI app\n\n If verbosity (-v flag) specified, dump out a stack trace. Otherwise,\n simply print the given error message.\n\n Include a '%s' in the message to print the single line message from the\n exception.\n\n :param e: caught Exception\n :param msg: Message to User with optional %s\n \"\"\"\n ctx = click.get_current_context()\n if ctx.obj['verbosity'] >= 1:\n raise e\n else:\n if '%s' in msg:\n click.echo(msg % e)\n else:\n click.echo(msg)\n ctx.exit(1)\n\n\ndef parsed_search_expressions(f):\n \"\"\"\n Add [EXPRESSIONs] arguments to a click application\n\n Passes a parsed dict of search expressions to the `expressions` argument\n of the command.\n\n Also appends documentation on using search expressions to the command.\n\n WARNING: This wrapped expects an unlimited number of search expressions\n as click arguments, which means your command must take only click options\n or a specified number of arguments.\n \"\"\"\n if not f.__doc__:\n f.__doc__ = \"\"\n f.__doc__ += \"\"\"\n EXPRESSIONS\n \n Select datasets using [EXPRESSIONS] to filter by date, product type,\n spatial extents or other searchable fields.\n\n \\b\n FIELD = VALUE\n FIELD in DATE-RANGE\n FIELD in [START, END]\n\n \\b\n DATE-RANGE is one of YYYY, YYYY-MM or YYYY-MM-DD\n START and END can be either numbers or dates\n\n FIELD: x, y, lat, lon, time, product, ... \n\n \\b\n eg. 'time in [1996-01-01, 1996-12-31]'\n 'time in 1996'\n 'lon in [130, 140]' 'lat in [-40, -30]'\n product=ls5_nbar_albers\n\n \"\"\"\n\n def my_parse(ctx, param, value):\n return parse_expressions(*list(value))\n\n f = click.argument('expressions', callback=my_parse, nargs=-1)(f)\n return f\n", "path": "datacube/ui/click.py"}]} | 3,975 | 245 |
gh_patches_debug_56962 | rasdani/github-patches | git_diff | tensorflow__tensor2tensor-360 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error with `.decode` on `str` object when generating `summarize_cnn_dailymail32k` data
## CMD
```
t2t-trainer \
--generate_data \
--data_dir="$data" \
--problems=summarize_cnn_dailymail32k \
--model=transformer \
--hparams_set=transformer_base_single_gpu \
--output_dir="$root"
```
## OUTPUT
```
INFO:tensorflow:Generating data for summarize_cnn_dailymail32k
INFO:tensorflow:Generating vocab file: t2t_data/vocab.cnndailymail.32768
INFO:tensorflow:Downloading https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfTHk4NFg2SndKcjQ&confirm=ak1U to /tmp/t2t_datagen/cnn_stories.tgz
INFO:tensorflow:Succesfully downloaded cnn_stories.tgz, 158577824 bytes.
INFO:tensorflow:Downloading https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfM1BxdkxVaTY2bWs&confirm=1N53 to /tmp/t2t_datagen/dailymail_stories.tgz
INFO:tensorflow:Succesfully downloaded dailymail_stories.tgz, 375893739 bytes.
Traceback (most recent call last):
File "/home/pltrdy/anaconda3/bin/t2t-trainer", line 96, in <module>
tf.app.run()
File "/home/pltrdy/anaconda3/lib/python3.6/site-packages/tensorflow/python/platform/app.py", line 48, in run
_sys.exit(main(_sys.argv[:1] + flags_passthrough))
File "/home/pltrdy/anaconda3/bin/t2t-trainer", line 83, in main
problem.generate_data(data_dir, tmp_dir)
File "/home/pltrdy/anaconda3/lib/python3.6/site-packages/tensor2tensor/data_generators/problem.py", line 625, in generate_data
self.generator(data_dir, tmp_dir, True), all_paths)
File "/home/pltrdy/anaconda3/lib/python3.6/site-packages/tensor2tensor/data_generators/generator_utils.py", line 143, in generate_files
for case in generator:
File "/home/pltrdy/anaconda3/lib/python3.6/site-packages/tensor2tensor/data_generators/cnn_dailymail.py", line 132, in generator
story_generator(tmp_dir))
File "/home/pltrdy/anaconda3/lib/python3.6/site-packages/tensor2tensor/data_generators/generator_utils.py", line 328, in get_or_generate_vocab_inner
for item in generator:
File "/home/pltrdy/anaconda3/lib/python3.6/site-packages/tensor2tensor/data_generators/cnn_dailymail.py", line 78, in story_generator
line = unicode(line, "utf-8") if six.PY2 else line.decode("utf-8")
AttributeError: 'str' object has no attribute 'decode'
```
## CONFIG
* Python version: `Python 3.6.1 |Anaconda custom (64-bit)| (default, May 11 2017, 13:09:58)`
* Tensor2Tensor version:
```
$ pip show tensor2tensor
Name: tensor2tensor
Version: 1.2.4
Summary: Tensor2Tensor
Home-page: http://github.com/tensorflow/tensor2tensor
Author: Google Inc.
Author-email: [email protected]
License: Apache 2.0
Location: /home/pltrdy/anaconda3/lib/python3.6/site-packages
Requires: sympy, numpy, requests, six, future, bz2file
```
## QUICKFIX
```
--- a/tensor2tensor/data_generators/cnn_dailymail.py
+++ b/tensor2tensor/data_generators/cnn_dailymail.py
@@ -74,7 +74,7 @@ def story_generator(tmp_dir):
for path in paths:
for story_file in tf.gfile.Glob(path + "*"):
story = u""
- for line in tf.gfile.Open(story_file):
+ for line in tf.gfile.Open(story_file, 'rb'):
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tensor2tensor/data_generators/cnn_dailymail.py`
Content:
```
1 # coding=utf-8
2 # Copyright 2017 The Tensor2Tensor Authors.
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 """Data generators for the CNN and Daily Mail datasets."""
17
18 from __future__ import absolute_import
19 from __future__ import division
20 from __future__ import print_function
21
22 import os
23 import tarfile
24
25 # Dependency imports
26
27 import six
28 from tensor2tensor.data_generators import generator_utils
29 from tensor2tensor.data_generators import problem
30 from tensor2tensor.data_generators import text_encoder
31 from tensor2tensor.utils import registry
32
33 import tensorflow as tf
34
35
36 # Links to data from http://cs.nyu.edu/~kcho/DMQA/
37 _CNN_STORIES_DRIVE_URL = "https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfTHk4NFg2SndKcjQ"
38
39 _DAILYMAIL_STORIES_DRIVE_URL = "https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfM1BxdkxVaTY2bWs"
40
41
42 # End-of-sentence marker.
43 EOS = text_encoder.EOS_ID
44
45
46 def _maybe_download_corpora(tmp_dir):
47 """Download corpora if necessary and unzip them.
48
49 Args:
50 tmp_dir: directory containing dataset.
51
52 Returns:
53 filepath of the downloaded corpus file.
54 """
55 cnn_filename = "cnn_stories.tgz"
56 cnn_finalpath = os.path.join(tmp_dir, "cnn/stories/")
57 dailymail_filename = "dailymail_stories.tgz"
58 dailymail_finalpath = os.path.join(tmp_dir, "dailymail/stories/")
59 if not tf.gfile.Exists(cnn_finalpath):
60 cnn_file = generator_utils.maybe_download_from_drive(
61 tmp_dir, cnn_filename, _CNN_STORIES_DRIVE_URL)
62 with tarfile.open(cnn_file, "r:gz") as cnn_tar:
63 cnn_tar.extractall(tmp_dir)
64 if not tf.gfile.Exists(dailymail_finalpath):
65 dailymail_file = generator_utils.maybe_download_from_drive(
66 tmp_dir, dailymail_filename, _DAILYMAIL_STORIES_DRIVE_URL)
67 with tarfile.open(dailymail_file, "r:gz") as dailymail_tar:
68 dailymail_tar.extractall(tmp_dir)
69 return [cnn_finalpath, dailymail_finalpath]
70
71
72 def story_generator(tmp_dir):
73 paths = _maybe_download_corpora(tmp_dir)
74 for path in paths:
75 for story_file in tf.gfile.Glob(path + "*"):
76 story = u""
77 for line in tf.gfile.Open(story_file):
78 line = unicode(line, "utf-8") if six.PY2 else line.decode("utf-8")
79 story += line
80 yield story
81
82
83 def _story_summary_split(story):
84 end_pos = story.find("\n\n") # Upto first empty line.
85 assert end_pos != -1
86 return story[:end_pos], story[end_pos:].strip()
87
88
89 @registry.register_problem
90 class SummarizeCnnDailymail32k(problem.Text2TextProblem):
91 """Summarize CNN and Daily Mail articles to their first paragraph."""
92
93 @property
94 def is_character_level(self):
95 return False
96
97 @property
98 def has_inputs(self):
99 return True
100
101 @property
102 def input_space_id(self):
103 return problem.SpaceID.EN_TOK
104
105 @property
106 def target_space_id(self):
107 return problem.SpaceID.EN_TOK
108
109 @property
110 def num_shards(self):
111 return 100
112
113 @property
114 def vocab_name(self):
115 return "vocab.cnndailymail"
116
117 @property
118 def use_subword_tokenizer(self):
119 return True
120
121 @property
122 def targeted_vocab_size(self):
123 return 2**15 # 32768
124
125 @property
126 def use_train_shards_for_dev(self):
127 return True
128
129 def generator(self, data_dir, tmp_dir, _):
130 encoder = generator_utils.get_or_generate_vocab_inner(
131 data_dir, self.vocab_file, self.targeted_vocab_size,
132 story_generator(tmp_dir))
133 for story in story_generator(tmp_dir):
134 summary, rest = _story_summary_split(story)
135 encoded_summary = encoder.encode(summary) + [EOS]
136 encoded_story = encoder.encode(rest) + [EOS]
137 yield {"inputs": encoded_story, "targets": encoded_summary}
138
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tensor2tensor/data_generators/cnn_dailymail.py b/tensor2tensor/data_generators/cnn_dailymail.py
--- a/tensor2tensor/data_generators/cnn_dailymail.py
+++ b/tensor2tensor/data_generators/cnn_dailymail.py
@@ -74,7 +74,7 @@
for path in paths:
for story_file in tf.gfile.Glob(path + "*"):
story = u""
- for line in tf.gfile.Open(story_file):
+ for line in tf.gfile.Open(story_file, 'rb'):
line = unicode(line, "utf-8") if six.PY2 else line.decode("utf-8")
story += line
yield story
| {"golden_diff": "diff --git a/tensor2tensor/data_generators/cnn_dailymail.py b/tensor2tensor/data_generators/cnn_dailymail.py\n--- a/tensor2tensor/data_generators/cnn_dailymail.py\n+++ b/tensor2tensor/data_generators/cnn_dailymail.py\n@@ -74,7 +74,7 @@\n for path in paths:\n for story_file in tf.gfile.Glob(path + \"*\"):\n story = u\"\"\n- for line in tf.gfile.Open(story_file):\n+ for line in tf.gfile.Open(story_file, 'rb'):\n line = unicode(line, \"utf-8\") if six.PY2 else line.decode(\"utf-8\")\n story += line\n yield story\n", "issue": "Error with `.decode` on `str` object when generating `summarize_cnn_dailymail32k` data\n## CMD\r\n```\r\nt2t-trainer \\\r\n --generate_data \\\r\n --data_dir=\"$data\" \\\r\n --problems=summarize_cnn_dailymail32k \\\r\n --model=transformer \\\r\n --hparams_set=transformer_base_single_gpu \\\r\n --output_dir=\"$root\"\r\n```\r\n\r\n## OUTPUT\r\n```\r\nINFO:tensorflow:Generating data for summarize_cnn_dailymail32k\r\nINFO:tensorflow:Generating vocab file: t2t_data/vocab.cnndailymail.32768\r\nINFO:tensorflow:Downloading https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfTHk4NFg2SndKcjQ&confirm=ak1U to /tmp/t2t_datagen/cnn_stories.tgz\r\n\r\nINFO:tensorflow:Succesfully downloaded cnn_stories.tgz, 158577824 bytes.\r\nINFO:tensorflow:Downloading https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfM1BxdkxVaTY2bWs&confirm=1N53 to /tmp/t2t_datagen/dailymail_stories.tgz\r\n\r\nINFO:tensorflow:Succesfully downloaded dailymail_stories.tgz, 375893739 bytes.\r\nTraceback (most recent call last):\r\n File \"/home/pltrdy/anaconda3/bin/t2t-trainer\", line 96, in <module>\r\n tf.app.run()\r\n File \"/home/pltrdy/anaconda3/lib/python3.6/site-packages/tensorflow/python/platform/app.py\", line 48, in run\r\n _sys.exit(main(_sys.argv[:1] + flags_passthrough))\r\n File \"/home/pltrdy/anaconda3/bin/t2t-trainer\", line 83, in main\r\n problem.generate_data(data_dir, tmp_dir)\r\n File \"/home/pltrdy/anaconda3/lib/python3.6/site-packages/tensor2tensor/data_generators/problem.py\", line 625, in generate_data\r\n self.generator(data_dir, tmp_dir, True), all_paths)\r\n File \"/home/pltrdy/anaconda3/lib/python3.6/site-packages/tensor2tensor/data_generators/generator_utils.py\", line 143, in generate_files\r\n for case in generator:\r\n File \"/home/pltrdy/anaconda3/lib/python3.6/site-packages/tensor2tensor/data_generators/cnn_dailymail.py\", line 132, in generator\r\n story_generator(tmp_dir))\r\n File \"/home/pltrdy/anaconda3/lib/python3.6/site-packages/tensor2tensor/data_generators/generator_utils.py\", line 328, in get_or_generate_vocab_inner\r\n for item in generator:\r\n File \"/home/pltrdy/anaconda3/lib/python3.6/site-packages/tensor2tensor/data_generators/cnn_dailymail.py\", line 78, in story_generator\r\n line = unicode(line, \"utf-8\") if six.PY2 else line.decode(\"utf-8\")\r\nAttributeError: 'str' object has no attribute 'decode'\r\n```\r\n\r\n## CONFIG\r\n* Python version: `Python 3.6.1 |Anaconda custom (64-bit)| (default, May 11 2017, 13:09:58)`\r\n* Tensor2Tensor version:\r\n```\r\n$ pip show tensor2tensor\r\nName: tensor2tensor\r\nVersion: 1.2.4\r\nSummary: Tensor2Tensor\r\nHome-page: http://github.com/tensorflow/tensor2tensor\r\nAuthor: Google Inc.\r\nAuthor-email: [email protected]\r\nLicense: Apache 2.0\r\nLocation: /home/pltrdy/anaconda3/lib/python3.6/site-packages\r\nRequires: sympy, numpy, requests, six, future, bz2file\r\n```\r\n\r\n## QUICKFIX\r\n```\r\n--- a/tensor2tensor/data_generators/cnn_dailymail.py\r\n+++ b/tensor2tensor/data_generators/cnn_dailymail.py\r\n@@ -74,7 +74,7 @@ def story_generator(tmp_dir):\r\n for path in paths:\r\n for story_file in tf.gfile.Glob(path + \"*\"):\r\n story = u\"\"\r\n- for line in tf.gfile.Open(story_file):\r\n+ for line in tf.gfile.Open(story_file, 'rb'):\r\n\r\n```\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2017 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Data generators for the CNN and Daily Mail datasets.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tarfile\n\n# Dependency imports\n\nimport six\nfrom tensor2tensor.data_generators import generator_utils\nfrom tensor2tensor.data_generators import problem\nfrom tensor2tensor.data_generators import text_encoder\nfrom tensor2tensor.utils import registry\n\nimport tensorflow as tf\n\n\n# Links to data from http://cs.nyu.edu/~kcho/DMQA/\n_CNN_STORIES_DRIVE_URL = \"https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfTHk4NFg2SndKcjQ\"\n\n_DAILYMAIL_STORIES_DRIVE_URL = \"https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfM1BxdkxVaTY2bWs\"\n\n\n# End-of-sentence marker.\nEOS = text_encoder.EOS_ID\n\n\ndef _maybe_download_corpora(tmp_dir):\n \"\"\"Download corpora if necessary and unzip them.\n\n Args:\n tmp_dir: directory containing dataset.\n\n Returns:\n filepath of the downloaded corpus file.\n \"\"\"\n cnn_filename = \"cnn_stories.tgz\"\n cnn_finalpath = os.path.join(tmp_dir, \"cnn/stories/\")\n dailymail_filename = \"dailymail_stories.tgz\"\n dailymail_finalpath = os.path.join(tmp_dir, \"dailymail/stories/\")\n if not tf.gfile.Exists(cnn_finalpath):\n cnn_file = generator_utils.maybe_download_from_drive(\n tmp_dir, cnn_filename, _CNN_STORIES_DRIVE_URL)\n with tarfile.open(cnn_file, \"r:gz\") as cnn_tar:\n cnn_tar.extractall(tmp_dir)\n if not tf.gfile.Exists(dailymail_finalpath):\n dailymail_file = generator_utils.maybe_download_from_drive(\n tmp_dir, dailymail_filename, _DAILYMAIL_STORIES_DRIVE_URL)\n with tarfile.open(dailymail_file, \"r:gz\") as dailymail_tar:\n dailymail_tar.extractall(tmp_dir)\n return [cnn_finalpath, dailymail_finalpath]\n\n\ndef story_generator(tmp_dir):\n paths = _maybe_download_corpora(tmp_dir)\n for path in paths:\n for story_file in tf.gfile.Glob(path + \"*\"):\n story = u\"\"\n for line in tf.gfile.Open(story_file):\n line = unicode(line, \"utf-8\") if six.PY2 else line.decode(\"utf-8\")\n story += line\n yield story\n\n\ndef _story_summary_split(story):\n end_pos = story.find(\"\\n\\n\") # Upto first empty line.\n assert end_pos != -1\n return story[:end_pos], story[end_pos:].strip()\n\n\[email protected]_problem\nclass SummarizeCnnDailymail32k(problem.Text2TextProblem):\n \"\"\"Summarize CNN and Daily Mail articles to their first paragraph.\"\"\"\n\n @property\n def is_character_level(self):\n return False\n\n @property\n def has_inputs(self):\n return True\n\n @property\n def input_space_id(self):\n return problem.SpaceID.EN_TOK\n\n @property\n def target_space_id(self):\n return problem.SpaceID.EN_TOK\n\n @property\n def num_shards(self):\n return 100\n\n @property\n def vocab_name(self):\n return \"vocab.cnndailymail\"\n\n @property\n def use_subword_tokenizer(self):\n return True\n\n @property\n def targeted_vocab_size(self):\n return 2**15 # 32768\n\n @property\n def use_train_shards_for_dev(self):\n return True\n\n def generator(self, data_dir, tmp_dir, _):\n encoder = generator_utils.get_or_generate_vocab_inner(\n data_dir, self.vocab_file, self.targeted_vocab_size,\n story_generator(tmp_dir))\n for story in story_generator(tmp_dir):\n summary, rest = _story_summary_split(story)\n encoded_summary = encoder.encode(summary) + [EOS]\n encoded_story = encoder.encode(rest) + [EOS]\n yield {\"inputs\": encoded_story, \"targets\": encoded_summary}\n", "path": "tensor2tensor/data_generators/cnn_dailymail.py"}], "after_files": [{"content": "# coding=utf-8\n# Copyright 2017 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Data generators for the CNN and Daily Mail datasets.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tarfile\n\n# Dependency imports\n\nimport six\nfrom tensor2tensor.data_generators import generator_utils\nfrom tensor2tensor.data_generators import problem\nfrom tensor2tensor.data_generators import text_encoder\nfrom tensor2tensor.utils import registry\n\nimport tensorflow as tf\n\n\n# Links to data from http://cs.nyu.edu/~kcho/DMQA/\n_CNN_STORIES_DRIVE_URL = \"https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfTHk4NFg2SndKcjQ\"\n\n_DAILYMAIL_STORIES_DRIVE_URL = \"https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfM1BxdkxVaTY2bWs\"\n\n\n# End-of-sentence marker.\nEOS = text_encoder.EOS_ID\n\n\ndef _maybe_download_corpora(tmp_dir):\n \"\"\"Download corpora if necessary and unzip them.\n\n Args:\n tmp_dir: directory containing dataset.\n\n Returns:\n filepath of the downloaded corpus file.\n \"\"\"\n cnn_filename = \"cnn_stories.tgz\"\n cnn_finalpath = os.path.join(tmp_dir, \"cnn/stories/\")\n dailymail_filename = \"dailymail_stories.tgz\"\n dailymail_finalpath = os.path.join(tmp_dir, \"dailymail/stories/\")\n if not tf.gfile.Exists(cnn_finalpath):\n cnn_file = generator_utils.maybe_download_from_drive(\n tmp_dir, cnn_filename, _CNN_STORIES_DRIVE_URL)\n with tarfile.open(cnn_file, \"r:gz\") as cnn_tar:\n cnn_tar.extractall(tmp_dir)\n if not tf.gfile.Exists(dailymail_finalpath):\n dailymail_file = generator_utils.maybe_download_from_drive(\n tmp_dir, dailymail_filename, _DAILYMAIL_STORIES_DRIVE_URL)\n with tarfile.open(dailymail_file, \"r:gz\") as dailymail_tar:\n dailymail_tar.extractall(tmp_dir)\n return [cnn_finalpath, dailymail_finalpath]\n\n\ndef story_generator(tmp_dir):\n paths = _maybe_download_corpora(tmp_dir)\n for path in paths:\n for story_file in tf.gfile.Glob(path + \"*\"):\n story = u\"\"\n for line in tf.gfile.Open(story_file, 'rb'):\n line = unicode(line, \"utf-8\") if six.PY2 else line.decode(\"utf-8\")\n story += line\n yield story\n\n\ndef _story_summary_split(story):\n end_pos = story.find(\"\\n\\n\") # Upto first empty line.\n assert end_pos != -1\n return story[:end_pos], story[end_pos:].strip()\n\n\[email protected]_problem\nclass SummarizeCnnDailymail32k(problem.Text2TextProblem):\n \"\"\"Summarize CNN and Daily Mail articles to their first paragraph.\"\"\"\n\n @property\n def is_character_level(self):\n return False\n\n @property\n def has_inputs(self):\n return True\n\n @property\n def input_space_id(self):\n return problem.SpaceID.EN_TOK\n\n @property\n def target_space_id(self):\n return problem.SpaceID.EN_TOK\n\n @property\n def num_shards(self):\n return 100\n\n @property\n def vocab_name(self):\n return \"vocab.cnndailymail\"\n\n @property\n def use_subword_tokenizer(self):\n return True\n\n @property\n def targeted_vocab_size(self):\n return 2**15 # 32768\n\n @property\n def use_train_shards_for_dev(self):\n return True\n\n def generator(self, data_dir, tmp_dir, _):\n encoder = generator_utils.get_or_generate_vocab_inner(\n data_dir, self.vocab_file, self.targeted_vocab_size,\n story_generator(tmp_dir))\n for story in story_generator(tmp_dir):\n summary, rest = _story_summary_split(story)\n encoded_summary = encoder.encode(summary) + [EOS]\n encoded_story = encoder.encode(rest) + [EOS]\n yield {\"inputs\": encoded_story, \"targets\": encoded_summary}\n", "path": "tensor2tensor/data_generators/cnn_dailymail.py"}]} | 2,672 | 167 |
gh_patches_debug_11751 | rasdani/github-patches | git_diff | localstack__localstack-10170 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Invoking a lambda using a function url always returns 200
### Is there an existing issue for this?
- [X] I have searched the existing issues
### Current Behavior
Given the following javascript lambda function
```javascript
exports.main = async (event) => {
console.log('Hello World');
return {
statusCode: '302',
body: {},
headers: {
Location: 'https://example.com'
}
};
}
```
When deploying to local stack and adding a function url, the url returns the correct `body` and `headers` but it will return a 200 status code.
In a real aws environment, an actual 302 is returned.
### Expected Behavior
The correct status code is returned
### How are you starting LocalStack?
With a docker-compose file
### Steps To Reproduce
My docker compose file
```yaml
version: '3.4'
services:
localstack:
image: localstack/localstack:2.0.2
environment:
- SERVICES=dynamodb,lambda
ports:
- '4566:4566'
expose:
- '4566'
volumes:
- /var/run/docker.sock:/var/run/docker.sock
```
#### How are you starting localstack (e.g., `bin/localstack` command, arguments, or `docker-compose.yml`)
deploying using terraform
```hcl
data "aws_iam_policy_document" "assume_role" {
statement {
effect = "Allow"
principals {
type = "Service"
identifiers = ["lambda.amazonaws.com"]
}
actions = ["sts:AssumeRole"]
}
}
resource "aws_iam_role" "iam_for_lambda" {
name = "iam_for_lambda"
assume_role_policy = data.aws_iam_policy_document.assume_role.json
}
data "archive_file" "lambda" {
type = "zip"
source_file = "../index.js"
output_path = local.archive_file
}
resource "aws_lambda_function" "redirect_lambda" {
filename = local.archive_file
function_name = "redirects"
role = aws_iam_role.iam_for_lambda.arn
handler = "index.main"
source_code_hash = data.archive_file.lambda.output_base64sha256
runtime = "nodejs18.x"
environment {
variables = {
foo = "bar"
}
}
}
resource "aws_lambda_function_url" "lambda_url" {
function_name = aws_lambda_function.redirect_lambda.function_name
authorization_type = "NONE"
}
```
### Environment
```markdown
- OS: docker
- LocalStack: latest
```
### Anything else?
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `localstack/services/lambda_/urlrouter.py`
Content:
```
1 """Routing for Lambda function URLs: https://docs.aws.amazon.com/lambda/latest/dg/lambda-urls.html"""
2 import base64
3 import json
4 import logging
5 import urllib
6 from datetime import datetime
7 from http import HTTPStatus
8
9 from localstack.aws.api import HttpResponse
10 from localstack.aws.api.lambda_ import InvocationType
11 from localstack.aws.protocol.serializer import gen_amzn_requestid
12 from localstack.http import Request, Router
13 from localstack.http.dispatcher import Handler
14 from localstack.services.lambda_.api_utils import FULL_FN_ARN_PATTERN
15 from localstack.services.lambda_.invocation.lambda_models import InvocationResult
16 from localstack.services.lambda_.invocation.lambda_service import LambdaService
17 from localstack.services.lambda_.invocation.models import lambda_stores
18 from localstack.utils.aws.request_context import AWS_REGION_REGEX
19 from localstack.utils.strings import long_uid, to_bytes, to_str
20 from localstack.utils.time import TIMESTAMP_READABLE_FORMAT, mktime, timestamp
21 from localstack.utils.urls import localstack_host
22
23 LOG = logging.getLogger(__name__)
24
25
26 class FunctionUrlRouter:
27 router: Router[Handler]
28 lambda_service: LambdaService
29
30 def __init__(self, router: Router[Handler], lambda_service: LambdaService):
31 self.router = router
32 self.registered = False
33 self.lambda_service = lambda_service
34
35 def register_routes(self) -> None:
36 if self.registered:
37 LOG.debug("Skipped Lambda URL route registration (routes already registered).")
38 return
39 self.registered = True
40
41 LOG.debug("Registering parameterized Lambda routes.")
42
43 self.router.add(
44 "/",
45 host=f"<api_id>.lambda-url.<regex('{AWS_REGION_REGEX}'):region>.<regex('.*'):server>",
46 endpoint=self.handle_lambda_url_invocation,
47 defaults={"path": ""},
48 )
49 self.router.add(
50 "/<path:path>",
51 host=f"<api_id>.lambda-url.<regex('{AWS_REGION_REGEX}'):region>.<regex('.*'):server>",
52 endpoint=self.handle_lambda_url_invocation,
53 )
54
55 def handle_lambda_url_invocation(
56 self, request: Request, api_id: str, region: str, **url_params: dict[str, str]
57 ) -> HttpResponse:
58 response = HttpResponse(headers={"Content-type": "application/json"})
59
60 lambda_url_config = None
61 try:
62 for account_id in lambda_stores.keys():
63 store = lambda_stores[account_id][region]
64 for fn in store.functions.values():
65 for url_config in fn.function_url_configs.values():
66 if url_config.url_id == api_id:
67 lambda_url_config = url_config
68 except IndexError as e:
69 LOG.warning(f"Lambda URL ({api_id}) not found: {e}")
70 response.set_json({"Message": None})
71 response.status = "404"
72 return response
73
74 event = event_for_lambda_url(
75 api_id, request.full_path, request.data, request.headers, request.method
76 )
77
78 match = FULL_FN_ARN_PATTERN.search(lambda_url_config.function_arn).groupdict()
79
80 result = self.lambda_service.invoke(
81 function_name=match.get("function_name"),
82 qualifier=match.get("qualifier"),
83 account_id=match.get("account_id"),
84 region=match.get("region_name"),
85 invocation_type=InvocationType.RequestResponse,
86 client_context="{}", # TODO: test
87 payload=to_bytes(json.dumps(event)),
88 request_id=gen_amzn_requestid(),
89 )
90 if result.is_error:
91 response = HttpResponse("Internal Server Error", HTTPStatus.BAD_GATEWAY)
92 else:
93 response = lambda_result_to_response(result)
94 return response
95
96
97 def event_for_lambda_url(api_id: str, path: str, data, headers, method: str) -> dict:
98 raw_path = path.split("?")[0]
99 raw_query_string = path.split("?")[1] if len(path.split("?")) > 1 else ""
100 query_string_parameters = (
101 {} if not raw_query_string else dict(urllib.parse.parse_qsl(raw_query_string))
102 )
103
104 now = datetime.utcnow()
105 readable = timestamp(time=now, format=TIMESTAMP_READABLE_FORMAT)
106 if not any(char in readable for char in ["+", "-"]):
107 readable += "+0000"
108
109 source_ip = headers.get("Remote-Addr", "")
110 request_context = {
111 "accountId": "anonymous",
112 "apiId": api_id,
113 "domainName": headers.get("Host", ""),
114 "domainPrefix": api_id,
115 "http": {
116 "method": method,
117 "path": raw_path,
118 "protocol": "HTTP/1.1",
119 "sourceIp": source_ip,
120 "userAgent": headers.get("User-Agent", ""),
121 },
122 "requestId": long_uid(),
123 "routeKey": "$default",
124 "stage": "$default",
125 "time": readable,
126 "timeEpoch": mktime(ts=now, millis=True),
127 }
128
129 content_type = headers.get("Content-Type", "").lower()
130 content_type_is_text = any(text_type in content_type for text_type in ["text", "json", "xml"])
131
132 is_base64_encoded = not (data.isascii() and content_type_is_text) if data else False
133 body = base64.b64encode(data).decode() if is_base64_encoded else data
134 if isinstance(body, bytes):
135 body = to_str(body)
136
137 ignored_headers = ["connection", "x-localstack-tgt-api", "x-localstack-request-url"]
138 event_headers = {k.lower(): v for k, v in headers.items() if k.lower() not in ignored_headers}
139
140 event_headers.update(
141 {
142 "x-amzn-tls-cipher-suite": "ECDHE-RSA-AES128-GCM-SHA256",
143 "x-amzn-tls-version": "TLSv1.2",
144 "x-forwarded-proto": "http",
145 "x-forwarded-for": source_ip,
146 "x-forwarded-port": str(localstack_host().port),
147 }
148 )
149
150 event = {
151 "version": "2.0",
152 "routeKey": "$default",
153 "rawPath": raw_path,
154 "rawQueryString": raw_query_string,
155 "headers": event_headers,
156 "queryStringParameters": query_string_parameters,
157 "requestContext": request_context,
158 "body": body,
159 "isBase64Encoded": is_base64_encoded,
160 }
161
162 if not data:
163 event.pop("body")
164
165 return event
166
167
168 def lambda_result_to_response(result: InvocationResult):
169 response = HttpResponse()
170
171 # Set default headers
172 response.headers.update(
173 {
174 "Content-Type": "application/json",
175 "Connection": "keep-alive",
176 "x-amzn-requestid": result.request_id,
177 "x-amzn-trace-id": long_uid(), # TODO: get the proper trace id here
178 }
179 )
180
181 original_payload = to_str(result.payload)
182 parsed_result = json.loads(original_payload)
183
184 # patch to fix whitespaces
185 # TODO: check if this is a downstream issue of invocation result serialization
186 original_payload = json.dumps(parsed_result, separators=(",", ":"))
187
188 if isinstance(parsed_result, str):
189 # a string is a special case here and is returned as-is
190 response.data = parsed_result
191 elif isinstance(parsed_result, dict):
192 # if it's a dict it might be a proper response
193 if isinstance(parsed_result.get("headers"), dict):
194 response.headers.update(parsed_result.get("headers"))
195 if "body" not in parsed_result:
196 # TODO: test if providing a status code but no body actually works
197 response.data = original_payload
198 elif isinstance(parsed_result.get("body"), dict):
199 response.data = json.dumps(parsed_result.get("body"))
200 elif parsed_result.get("isBase64Encoded", False):
201 body_bytes = to_bytes(to_str(parsed_result.get("body", "")))
202 decoded_body_bytes = base64.b64decode(body_bytes)
203 response.data = decoded_body_bytes
204 else:
205 response.data = parsed_result.get("body")
206 else:
207 response.data = original_payload
208
209 return response
210
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/localstack/services/lambda_/urlrouter.py b/localstack/services/lambda_/urlrouter.py
--- a/localstack/services/lambda_/urlrouter.py
+++ b/localstack/services/lambda_/urlrouter.py
@@ -192,6 +192,8 @@
# if it's a dict it might be a proper response
if isinstance(parsed_result.get("headers"), dict):
response.headers.update(parsed_result.get("headers"))
+ if "statusCode" in parsed_result:
+ response.status_code = int(parsed_result["statusCode"])
if "body" not in parsed_result:
# TODO: test if providing a status code but no body actually works
response.data = original_payload
| {"golden_diff": "diff --git a/localstack/services/lambda_/urlrouter.py b/localstack/services/lambda_/urlrouter.py\n--- a/localstack/services/lambda_/urlrouter.py\n+++ b/localstack/services/lambda_/urlrouter.py\n@@ -192,6 +192,8 @@\n # if it's a dict it might be a proper response\n if isinstance(parsed_result.get(\"headers\"), dict):\n response.headers.update(parsed_result.get(\"headers\"))\n+ if \"statusCode\" in parsed_result:\n+ response.status_code = int(parsed_result[\"statusCode\"])\n if \"body\" not in parsed_result:\n # TODO: test if providing a status code but no body actually works\n response.data = original_payload\n", "issue": "Invoking a lambda using a function url always returns 200\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues\n\n### Current Behavior\n\nGiven the following javascript lambda function\r\n```javascript\r\nexports.main = async (event) => {\r\n console.log('Hello World');\r\n return {\r\n statusCode: '302',\r\n body: {},\r\n headers: {\r\n Location: 'https://example.com'\r\n }\r\n };\r\n}\r\n```\r\n\r\nWhen deploying to local stack and adding a function url, the url returns the correct `body` and `headers` but it will return a 200 status code.\r\n\r\nIn a real aws environment, an actual 302 is returned.\n\n### Expected Behavior\n\nThe correct status code is returned\n\n### How are you starting LocalStack?\n\nWith a docker-compose file\n\n### Steps To Reproduce\n\nMy docker compose file\r\n\r\n```yaml\r\nversion: '3.4'\r\nservices:\r\n localstack:\r\n image: localstack/localstack:2.0.2\r\n environment:\r\n - SERVICES=dynamodb,lambda\r\n ports:\r\n - '4566:4566'\r\n expose:\r\n - '4566'\r\n volumes:\r\n - /var/run/docker.sock:/var/run/docker.sock\r\n```\r\n\r\n#### How are you starting localstack (e.g., `bin/localstack` command, arguments, or `docker-compose.yml`)\r\n\r\ndeploying using terraform\r\n\r\n```hcl\r\ndata \"aws_iam_policy_document\" \"assume_role\" {\r\n statement {\r\n effect = \"Allow\"\r\n\r\n principals {\r\n type = \"Service\"\r\n identifiers = [\"lambda.amazonaws.com\"]\r\n }\r\n\r\n actions = [\"sts:AssumeRole\"]\r\n }\r\n}\r\n\r\nresource \"aws_iam_role\" \"iam_for_lambda\" {\r\n name = \"iam_for_lambda\"\r\n assume_role_policy = data.aws_iam_policy_document.assume_role.json\r\n}\r\n\r\ndata \"archive_file\" \"lambda\" {\r\n type = \"zip\"\r\n source_file = \"../index.js\"\r\n output_path = local.archive_file\r\n}\r\n\r\nresource \"aws_lambda_function\" \"redirect_lambda\" {\r\n filename = local.archive_file\r\n function_name = \"redirects\"\r\n role = aws_iam_role.iam_for_lambda.arn\r\n handler = \"index.main\"\r\n\r\n source_code_hash = data.archive_file.lambda.output_base64sha256\r\n\r\n runtime = \"nodejs18.x\"\r\n\r\n environment {\r\n variables = {\r\n foo = \"bar\"\r\n }\r\n }\r\n}\r\n\r\nresource \"aws_lambda_function_url\" \"lambda_url\" {\r\n function_name = aws_lambda_function.redirect_lambda.function_name\r\n authorization_type = \"NONE\"\r\n}\r\n```\r\n\n\n### Environment\n\n```markdown\n- OS: docker\r\n- LocalStack: latest\n```\n\n\n### Anything else?\n\n_No response_\n", "before_files": [{"content": "\"\"\"Routing for Lambda function URLs: https://docs.aws.amazon.com/lambda/latest/dg/lambda-urls.html\"\"\"\nimport base64\nimport json\nimport logging\nimport urllib\nfrom datetime import datetime\nfrom http import HTTPStatus\n\nfrom localstack.aws.api import HttpResponse\nfrom localstack.aws.api.lambda_ import InvocationType\nfrom localstack.aws.protocol.serializer import gen_amzn_requestid\nfrom localstack.http import Request, Router\nfrom localstack.http.dispatcher import Handler\nfrom localstack.services.lambda_.api_utils import FULL_FN_ARN_PATTERN\nfrom localstack.services.lambda_.invocation.lambda_models import InvocationResult\nfrom localstack.services.lambda_.invocation.lambda_service import LambdaService\nfrom localstack.services.lambda_.invocation.models import lambda_stores\nfrom localstack.utils.aws.request_context import AWS_REGION_REGEX\nfrom localstack.utils.strings import long_uid, to_bytes, to_str\nfrom localstack.utils.time import TIMESTAMP_READABLE_FORMAT, mktime, timestamp\nfrom localstack.utils.urls import localstack_host\n\nLOG = logging.getLogger(__name__)\n\n\nclass FunctionUrlRouter:\n router: Router[Handler]\n lambda_service: LambdaService\n\n def __init__(self, router: Router[Handler], lambda_service: LambdaService):\n self.router = router\n self.registered = False\n self.lambda_service = lambda_service\n\n def register_routes(self) -> None:\n if self.registered:\n LOG.debug(\"Skipped Lambda URL route registration (routes already registered).\")\n return\n self.registered = True\n\n LOG.debug(\"Registering parameterized Lambda routes.\")\n\n self.router.add(\n \"/\",\n host=f\"<api_id>.lambda-url.<regex('{AWS_REGION_REGEX}'):region>.<regex('.*'):server>\",\n endpoint=self.handle_lambda_url_invocation,\n defaults={\"path\": \"\"},\n )\n self.router.add(\n \"/<path:path>\",\n host=f\"<api_id>.lambda-url.<regex('{AWS_REGION_REGEX}'):region>.<regex('.*'):server>\",\n endpoint=self.handle_lambda_url_invocation,\n )\n\n def handle_lambda_url_invocation(\n self, request: Request, api_id: str, region: str, **url_params: dict[str, str]\n ) -> HttpResponse:\n response = HttpResponse(headers={\"Content-type\": \"application/json\"})\n\n lambda_url_config = None\n try:\n for account_id in lambda_stores.keys():\n store = lambda_stores[account_id][region]\n for fn in store.functions.values():\n for url_config in fn.function_url_configs.values():\n if url_config.url_id == api_id:\n lambda_url_config = url_config\n except IndexError as e:\n LOG.warning(f\"Lambda URL ({api_id}) not found: {e}\")\n response.set_json({\"Message\": None})\n response.status = \"404\"\n return response\n\n event = event_for_lambda_url(\n api_id, request.full_path, request.data, request.headers, request.method\n )\n\n match = FULL_FN_ARN_PATTERN.search(lambda_url_config.function_arn).groupdict()\n\n result = self.lambda_service.invoke(\n function_name=match.get(\"function_name\"),\n qualifier=match.get(\"qualifier\"),\n account_id=match.get(\"account_id\"),\n region=match.get(\"region_name\"),\n invocation_type=InvocationType.RequestResponse,\n client_context=\"{}\", # TODO: test\n payload=to_bytes(json.dumps(event)),\n request_id=gen_amzn_requestid(),\n )\n if result.is_error:\n response = HttpResponse(\"Internal Server Error\", HTTPStatus.BAD_GATEWAY)\n else:\n response = lambda_result_to_response(result)\n return response\n\n\ndef event_for_lambda_url(api_id: str, path: str, data, headers, method: str) -> dict:\n raw_path = path.split(\"?\")[0]\n raw_query_string = path.split(\"?\")[1] if len(path.split(\"?\")) > 1 else \"\"\n query_string_parameters = (\n {} if not raw_query_string else dict(urllib.parse.parse_qsl(raw_query_string))\n )\n\n now = datetime.utcnow()\n readable = timestamp(time=now, format=TIMESTAMP_READABLE_FORMAT)\n if not any(char in readable for char in [\"+\", \"-\"]):\n readable += \"+0000\"\n\n source_ip = headers.get(\"Remote-Addr\", \"\")\n request_context = {\n \"accountId\": \"anonymous\",\n \"apiId\": api_id,\n \"domainName\": headers.get(\"Host\", \"\"),\n \"domainPrefix\": api_id,\n \"http\": {\n \"method\": method,\n \"path\": raw_path,\n \"protocol\": \"HTTP/1.1\",\n \"sourceIp\": source_ip,\n \"userAgent\": headers.get(\"User-Agent\", \"\"),\n },\n \"requestId\": long_uid(),\n \"routeKey\": \"$default\",\n \"stage\": \"$default\",\n \"time\": readable,\n \"timeEpoch\": mktime(ts=now, millis=True),\n }\n\n content_type = headers.get(\"Content-Type\", \"\").lower()\n content_type_is_text = any(text_type in content_type for text_type in [\"text\", \"json\", \"xml\"])\n\n is_base64_encoded = not (data.isascii() and content_type_is_text) if data else False\n body = base64.b64encode(data).decode() if is_base64_encoded else data\n if isinstance(body, bytes):\n body = to_str(body)\n\n ignored_headers = [\"connection\", \"x-localstack-tgt-api\", \"x-localstack-request-url\"]\n event_headers = {k.lower(): v for k, v in headers.items() if k.lower() not in ignored_headers}\n\n event_headers.update(\n {\n \"x-amzn-tls-cipher-suite\": \"ECDHE-RSA-AES128-GCM-SHA256\",\n \"x-amzn-tls-version\": \"TLSv1.2\",\n \"x-forwarded-proto\": \"http\",\n \"x-forwarded-for\": source_ip,\n \"x-forwarded-port\": str(localstack_host().port),\n }\n )\n\n event = {\n \"version\": \"2.0\",\n \"routeKey\": \"$default\",\n \"rawPath\": raw_path,\n \"rawQueryString\": raw_query_string,\n \"headers\": event_headers,\n \"queryStringParameters\": query_string_parameters,\n \"requestContext\": request_context,\n \"body\": body,\n \"isBase64Encoded\": is_base64_encoded,\n }\n\n if not data:\n event.pop(\"body\")\n\n return event\n\n\ndef lambda_result_to_response(result: InvocationResult):\n response = HttpResponse()\n\n # Set default headers\n response.headers.update(\n {\n \"Content-Type\": \"application/json\",\n \"Connection\": \"keep-alive\",\n \"x-amzn-requestid\": result.request_id,\n \"x-amzn-trace-id\": long_uid(), # TODO: get the proper trace id here\n }\n )\n\n original_payload = to_str(result.payload)\n parsed_result = json.loads(original_payload)\n\n # patch to fix whitespaces\n # TODO: check if this is a downstream issue of invocation result serialization\n original_payload = json.dumps(parsed_result, separators=(\",\", \":\"))\n\n if isinstance(parsed_result, str):\n # a string is a special case here and is returned as-is\n response.data = parsed_result\n elif isinstance(parsed_result, dict):\n # if it's a dict it might be a proper response\n if isinstance(parsed_result.get(\"headers\"), dict):\n response.headers.update(parsed_result.get(\"headers\"))\n if \"body\" not in parsed_result:\n # TODO: test if providing a status code but no body actually works\n response.data = original_payload\n elif isinstance(parsed_result.get(\"body\"), dict):\n response.data = json.dumps(parsed_result.get(\"body\"))\n elif parsed_result.get(\"isBase64Encoded\", False):\n body_bytes = to_bytes(to_str(parsed_result.get(\"body\", \"\")))\n decoded_body_bytes = base64.b64decode(body_bytes)\n response.data = decoded_body_bytes\n else:\n response.data = parsed_result.get(\"body\")\n else:\n response.data = original_payload\n\n return response\n", "path": "localstack/services/lambda_/urlrouter.py"}], "after_files": [{"content": "\"\"\"Routing for Lambda function URLs: https://docs.aws.amazon.com/lambda/latest/dg/lambda-urls.html\"\"\"\nimport base64\nimport json\nimport logging\nimport urllib\nfrom datetime import datetime\nfrom http import HTTPStatus\n\nfrom localstack.aws.api import HttpResponse\nfrom localstack.aws.api.lambda_ import InvocationType\nfrom localstack.aws.protocol.serializer import gen_amzn_requestid\nfrom localstack.http import Request, Router\nfrom localstack.http.dispatcher import Handler\nfrom localstack.services.lambda_.api_utils import FULL_FN_ARN_PATTERN\nfrom localstack.services.lambda_.invocation.lambda_models import InvocationResult\nfrom localstack.services.lambda_.invocation.lambda_service import LambdaService\nfrom localstack.services.lambda_.invocation.models import lambda_stores\nfrom localstack.utils.aws.request_context import AWS_REGION_REGEX\nfrom localstack.utils.strings import long_uid, to_bytes, to_str\nfrom localstack.utils.time import TIMESTAMP_READABLE_FORMAT, mktime, timestamp\nfrom localstack.utils.urls import localstack_host\n\nLOG = logging.getLogger(__name__)\n\n\nclass FunctionUrlRouter:\n router: Router[Handler]\n lambda_service: LambdaService\n\n def __init__(self, router: Router[Handler], lambda_service: LambdaService):\n self.router = router\n self.registered = False\n self.lambda_service = lambda_service\n\n def register_routes(self) -> None:\n if self.registered:\n LOG.debug(\"Skipped Lambda URL route registration (routes already registered).\")\n return\n self.registered = True\n\n LOG.debug(\"Registering parameterized Lambda routes.\")\n\n self.router.add(\n \"/\",\n host=f\"<api_id>.lambda-url.<regex('{AWS_REGION_REGEX}'):region>.<regex('.*'):server>\",\n endpoint=self.handle_lambda_url_invocation,\n defaults={\"path\": \"\"},\n )\n self.router.add(\n \"/<path:path>\",\n host=f\"<api_id>.lambda-url.<regex('{AWS_REGION_REGEX}'):region>.<regex('.*'):server>\",\n endpoint=self.handle_lambda_url_invocation,\n )\n\n def handle_lambda_url_invocation(\n self, request: Request, api_id: str, region: str, **url_params: dict[str, str]\n ) -> HttpResponse:\n response = HttpResponse(headers={\"Content-type\": \"application/json\"})\n\n lambda_url_config = None\n try:\n for account_id in lambda_stores.keys():\n store = lambda_stores[account_id][region]\n for fn in store.functions.values():\n for url_config in fn.function_url_configs.values():\n if url_config.url_id == api_id:\n lambda_url_config = url_config\n except IndexError as e:\n LOG.warning(f\"Lambda URL ({api_id}) not found: {e}\")\n response.set_json({\"Message\": None})\n response.status = \"404\"\n return response\n\n event = event_for_lambda_url(\n api_id, request.full_path, request.data, request.headers, request.method\n )\n\n match = FULL_FN_ARN_PATTERN.search(lambda_url_config.function_arn).groupdict()\n\n result = self.lambda_service.invoke(\n function_name=match.get(\"function_name\"),\n qualifier=match.get(\"qualifier\"),\n account_id=match.get(\"account_id\"),\n region=match.get(\"region_name\"),\n invocation_type=InvocationType.RequestResponse,\n client_context=\"{}\", # TODO: test\n payload=to_bytes(json.dumps(event)),\n request_id=gen_amzn_requestid(),\n )\n if result.is_error:\n response = HttpResponse(\"Internal Server Error\", HTTPStatus.BAD_GATEWAY)\n else:\n response = lambda_result_to_response(result)\n return response\n\n\ndef event_for_lambda_url(api_id: str, path: str, data, headers, method: str) -> dict:\n raw_path = path.split(\"?\")[0]\n raw_query_string = path.split(\"?\")[1] if len(path.split(\"?\")) > 1 else \"\"\n query_string_parameters = (\n {} if not raw_query_string else dict(urllib.parse.parse_qsl(raw_query_string))\n )\n\n now = datetime.utcnow()\n readable = timestamp(time=now, format=TIMESTAMP_READABLE_FORMAT)\n if not any(char in readable for char in [\"+\", \"-\"]):\n readable += \"+0000\"\n\n source_ip = headers.get(\"Remote-Addr\", \"\")\n request_context = {\n \"accountId\": \"anonymous\",\n \"apiId\": api_id,\n \"domainName\": headers.get(\"Host\", \"\"),\n \"domainPrefix\": api_id,\n \"http\": {\n \"method\": method,\n \"path\": raw_path,\n \"protocol\": \"HTTP/1.1\",\n \"sourceIp\": source_ip,\n \"userAgent\": headers.get(\"User-Agent\", \"\"),\n },\n \"requestId\": long_uid(),\n \"routeKey\": \"$default\",\n \"stage\": \"$default\",\n \"time\": readable,\n \"timeEpoch\": mktime(ts=now, millis=True),\n }\n\n content_type = headers.get(\"Content-Type\", \"\").lower()\n content_type_is_text = any(text_type in content_type for text_type in [\"text\", \"json\", \"xml\"])\n\n is_base64_encoded = not (data.isascii() and content_type_is_text) if data else False\n body = base64.b64encode(data).decode() if is_base64_encoded else data\n if isinstance(body, bytes):\n body = to_str(body)\n\n ignored_headers = [\"connection\", \"x-localstack-tgt-api\", \"x-localstack-request-url\"]\n event_headers = {k.lower(): v for k, v in headers.items() if k.lower() not in ignored_headers}\n\n event_headers.update(\n {\n \"x-amzn-tls-cipher-suite\": \"ECDHE-RSA-AES128-GCM-SHA256\",\n \"x-amzn-tls-version\": \"TLSv1.2\",\n \"x-forwarded-proto\": \"http\",\n \"x-forwarded-for\": source_ip,\n \"x-forwarded-port\": str(localstack_host().port),\n }\n )\n\n event = {\n \"version\": \"2.0\",\n \"routeKey\": \"$default\",\n \"rawPath\": raw_path,\n \"rawQueryString\": raw_query_string,\n \"headers\": event_headers,\n \"queryStringParameters\": query_string_parameters,\n \"requestContext\": request_context,\n \"body\": body,\n \"isBase64Encoded\": is_base64_encoded,\n }\n\n if not data:\n event.pop(\"body\")\n\n return event\n\n\ndef lambda_result_to_response(result: InvocationResult):\n response = HttpResponse()\n\n # Set default headers\n response.headers.update(\n {\n \"Content-Type\": \"application/json\",\n \"Connection\": \"keep-alive\",\n \"x-amzn-requestid\": result.request_id,\n \"x-amzn-trace-id\": long_uid(), # TODO: get the proper trace id here\n }\n )\n\n original_payload = to_str(result.payload)\n parsed_result = json.loads(original_payload)\n\n # patch to fix whitespaces\n # TODO: check if this is a downstream issue of invocation result serialization\n original_payload = json.dumps(parsed_result, separators=(\",\", \":\"))\n\n if isinstance(parsed_result, str):\n # a string is a special case here and is returned as-is\n response.data = parsed_result\n elif isinstance(parsed_result, dict):\n # if it's a dict it might be a proper response\n if isinstance(parsed_result.get(\"headers\"), dict):\n response.headers.update(parsed_result.get(\"headers\"))\n if \"statusCode\" in parsed_result:\n response.status_code = int(parsed_result[\"statusCode\"])\n if \"body\" not in parsed_result:\n # TODO: test if providing a status code but no body actually works\n response.data = original_payload\n elif isinstance(parsed_result.get(\"body\"), dict):\n response.data = json.dumps(parsed_result.get(\"body\"))\n elif parsed_result.get(\"isBase64Encoded\", False):\n body_bytes = to_bytes(to_str(parsed_result.get(\"body\", \"\")))\n decoded_body_bytes = base64.b64decode(body_bytes)\n response.data = decoded_body_bytes\n else:\n response.data = parsed_result.get(\"body\")\n else:\n response.data = original_payload\n\n return response\n", "path": "localstack/services/lambda_/urlrouter.py"}]} | 3,134 | 152 |
gh_patches_debug_348 | rasdani/github-patches | git_diff | ranaroussi__yfinance-295 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Deprecation warning due to invalid escape sequences
Deprecation warnings are raised due to invalid escape sequences. This can be fixed by using raw strings or escaping the literals. pyupgrade also helps in automatic conversion : https://github.com/asottile/pyupgrade/
```
find . -iname '*.py' | grep -Ev 'test.py' | xargs -P4 -I{} python3.8 -Wall -m py_compile {}
./yfinance/utils.py:67: DeprecationWarning: invalid escape sequence \g
return [_re.sub("([a-z])([A-Z])", "\g<1> \g<2>", i).title() for i in o]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `yfinance/utils.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 #
4 # Yahoo! Finance market data downloader (+fix for Pandas Datareader)
5 # https://github.com/ranaroussi/yfinance
6 #
7 # Copyright 2017-2019 Ran Aroussi
8 #
9 # Licensed under the Apache License, Version 2.0 (the "License");
10 # you may not use this file except in compliance with the License.
11 # You may obtain a copy of the License at
12 #
13 # http://www.apache.org/licenses/LICENSE-2.0
14 #
15 # Unless required by applicable law or agreed to in writing, software
16 # distributed under the License is distributed on an "AS IS" BASIS,
17 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18 # See the License for the specific language governing permissions and
19 # limitations under the License.
20 #
21
22 from __future__ import print_function
23
24 import requests as _requests
25 import re as _re
26 import pandas as _pd
27 import numpy as _np
28 import sys as _sys
29 import re as _re
30
31 try:
32 import ujson as _json
33 except ImportError:
34 import json as _json
35
36
37 def empty_df(index=[]):
38 empty = _pd.DataFrame(index=index, data={
39 'Open': _np.nan, 'High': _np.nan, 'Low': _np.nan,
40 'Close': _np.nan, 'Adj Close': _np.nan, 'Volume': _np.nan})
41 empty.index.name = 'Date'
42 return empty
43
44
45 def get_json(url, proxy=None):
46 html = _requests.get(url=url, proxies=proxy).text
47
48 if "QuoteSummaryStore" not in html:
49 html = _requests.get(url=url, proxies=proxy).text
50 if "QuoteSummaryStore" not in html:
51 return {}
52
53 json_str = html.split('root.App.main =')[1].split(
54 '(this)')[0].split(';\n}')[0].strip()
55 data = _json.loads(json_str)[
56 'context']['dispatcher']['stores']['QuoteSummaryStore']
57
58 # return data
59 new_data = _json.dumps(data).replace('{}', 'null')
60 new_data = _re.sub(
61 r'\{[\'|\"]raw[\'|\"]:(.*?),(.*?)\}', r'\1', new_data)
62
63 return _json.loads(new_data)
64
65
66 def camel2title(o):
67 return [_re.sub("([a-z])([A-Z])", "\g<1> \g<2>", i).title() for i in o]
68
69
70 def auto_adjust(data):
71 df = data.copy()
72 ratio = df["Close"] / df["Adj Close"]
73 df["Adj Open"] = df["Open"] / ratio
74 df["Adj High"] = df["High"] / ratio
75 df["Adj Low"] = df["Low"] / ratio
76
77 df.drop(
78 ["Open", "High", "Low", "Close"],
79 axis=1, inplace=True)
80
81 df.rename(columns={
82 "Adj Open": "Open", "Adj High": "High",
83 "Adj Low": "Low", "Adj Close": "Close"
84 }, inplace=True)
85
86 df = df[["Open", "High", "Low", "Close", "Volume"]]
87 return df[["Open", "High", "Low", "Close", "Volume"]]
88
89
90 def back_adjust(data):
91 """ back-adjusted data to mimic true historical prices """
92
93 df = data.copy()
94 ratio = df["Adj Close"] / df["Close"]
95 df["Adj Open"] = df["Open"] * ratio
96 df["Adj High"] = df["High"] * ratio
97 df["Adj Low"] = df["Low"] * ratio
98
99 df.drop(
100 ["Open", "High", "Low", "Adj Close"],
101 axis=1, inplace=True)
102
103 df.rename(columns={
104 "Adj Open": "Open", "Adj High": "High",
105 "Adj Low": "Low"
106 }, inplace=True)
107
108 return df[["Open", "High", "Low", "Close", "Volume"]]
109
110
111 def parse_quotes(data, tz=None):
112 timestamps = data["timestamp"]
113 ohlc = data["indicators"]["quote"][0]
114 volumes = ohlc["volume"]
115 opens = ohlc["open"]
116 closes = ohlc["close"]
117 lows = ohlc["low"]
118 highs = ohlc["high"]
119
120 adjclose = closes
121 if "adjclose" in data["indicators"]:
122 adjclose = data["indicators"]["adjclose"][0]["adjclose"]
123
124 quotes = _pd.DataFrame({"Open": opens,
125 "High": highs,
126 "Low": lows,
127 "Close": closes,
128 "Adj Close": adjclose,
129 "Volume": volumes})
130
131 quotes.index = _pd.to_datetime(timestamps, unit="s")
132 quotes.sort_index(inplace=True)
133
134 if tz is not None:
135 quotes.index = quotes.index.tz_localize(tz)
136
137 return quotes
138
139
140 def parse_actions(data, tz=None):
141 dividends = _pd.DataFrame(columns=["Dividends"])
142 splits = _pd.DataFrame(columns=["Stock Splits"])
143
144 if "events" in data:
145 if "dividends" in data["events"]:
146 dividends = _pd.DataFrame(
147 data=list(data["events"]["dividends"].values()))
148 dividends.set_index("date", inplace=True)
149 dividends.index = _pd.to_datetime(dividends.index, unit="s")
150 dividends.sort_index(inplace=True)
151 if tz is not None:
152 dividends.index = dividends.index.tz_localize(tz)
153
154 dividends.columns = ["Dividends"]
155
156 if "splits" in data["events"]:
157 splits = _pd.DataFrame(
158 data=list(data["events"]["splits"].values()))
159 splits.set_index("date", inplace=True)
160 splits.index = _pd.to_datetime(splits.index, unit="s")
161 splits.sort_index(inplace=True)
162 if tz is not None:
163 splits.index = splits.index.tz_localize(tz)
164 splits["Stock Splits"] = splits["numerator"] / \
165 splits["denominator"]
166 splits = splits["Stock Splits"]
167
168 return dividends, splits
169
170
171 class ProgressBar:
172 def __init__(self, iterations, text='completed'):
173 self.text = text
174 self.iterations = iterations
175 self.prog_bar = '[]'
176 self.fill_char = '*'
177 self.width = 50
178 self.__update_amount(0)
179 self.elapsed = 1
180
181 def completed(self):
182 if self.elapsed > self.iterations:
183 self.elapsed = self.iterations
184 self.update_iteration(1)
185 print('\r' + str(self), end='')
186 _sys.stdout.flush()
187 print()
188
189 def animate(self, iteration=None):
190 if iteration is None:
191 self.elapsed += 1
192 iteration = self.elapsed
193 else:
194 self.elapsed += iteration
195
196 print('\r' + str(self), end='')
197 _sys.stdout.flush()
198 self.update_iteration()
199
200 def update_iteration(self, val=None):
201 val = val if val is not None else self.elapsed / float(self.iterations)
202 self.__update_amount(val * 100.0)
203 self.prog_bar += ' %s of %s %s' % (
204 self.elapsed, self.iterations, self.text)
205
206 def __update_amount(self, new_amount):
207 percent_done = int(round((new_amount / 100.0) * 100.0))
208 all_full = self.width - 2
209 num_hashes = int(round((percent_done / 100.0) * all_full))
210 self.prog_bar = '[' + self.fill_char * \
211 num_hashes + ' ' * (all_full - num_hashes) + ']'
212 pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))
213 pct_string = '%d%%' % percent_done
214 self.prog_bar = self.prog_bar[0:pct_place] + \
215 (pct_string + self.prog_bar[pct_place + len(pct_string):])
216
217 def __str__(self):
218 return str(self.prog_bar)
219
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/yfinance/utils.py b/yfinance/utils.py
--- a/yfinance/utils.py
+++ b/yfinance/utils.py
@@ -64,7 +64,7 @@
def camel2title(o):
- return [_re.sub("([a-z])([A-Z])", "\g<1> \g<2>", i).title() for i in o]
+ return [_re.sub("([a-z])([A-Z])", r"\g<1> \g<2>", i).title() for i in o]
def auto_adjust(data):
| {"golden_diff": "diff --git a/yfinance/utils.py b/yfinance/utils.py\n--- a/yfinance/utils.py\n+++ b/yfinance/utils.py\n@@ -64,7 +64,7 @@\n \n \n def camel2title(o):\n- return [_re.sub(\"([a-z])([A-Z])\", \"\\g<1> \\g<2>\", i).title() for i in o]\n+ return [_re.sub(\"([a-z])([A-Z])\", r\"\\g<1> \\g<2>\", i).title() for i in o]\n \n \n def auto_adjust(data):\n", "issue": "Deprecation warning due to invalid escape sequences\nDeprecation warnings are raised due to invalid escape sequences. This can be fixed by using raw strings or escaping the literals. pyupgrade also helps in automatic conversion : https://github.com/asottile/pyupgrade/\r\n\r\n```\r\nfind . -iname '*.py' | grep -Ev 'test.py' | xargs -P4 -I{} python3.8 -Wall -m py_compile {}\r\n./yfinance/utils.py:67: DeprecationWarning: invalid escape sequence \\g\r\n return [_re.sub(\"([a-z])([A-Z])\", \"\\g<1> \\g<2>\", i).title() for i in o]\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Yahoo! Finance market data downloader (+fix for Pandas Datareader)\n# https://github.com/ranaroussi/yfinance\n#\n# Copyright 2017-2019 Ran Aroussi\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom __future__ import print_function\n\nimport requests as _requests\nimport re as _re\nimport pandas as _pd\nimport numpy as _np\nimport sys as _sys\nimport re as _re\n\ntry:\n import ujson as _json\nexcept ImportError:\n import json as _json\n\n\ndef empty_df(index=[]):\n empty = _pd.DataFrame(index=index, data={\n 'Open': _np.nan, 'High': _np.nan, 'Low': _np.nan,\n 'Close': _np.nan, 'Adj Close': _np.nan, 'Volume': _np.nan})\n empty.index.name = 'Date'\n return empty\n\n\ndef get_json(url, proxy=None):\n html = _requests.get(url=url, proxies=proxy).text\n\n if \"QuoteSummaryStore\" not in html:\n html = _requests.get(url=url, proxies=proxy).text\n if \"QuoteSummaryStore\" not in html:\n return {}\n\n json_str = html.split('root.App.main =')[1].split(\n '(this)')[0].split(';\\n}')[0].strip()\n data = _json.loads(json_str)[\n 'context']['dispatcher']['stores']['QuoteSummaryStore']\n\n # return data\n new_data = _json.dumps(data).replace('{}', 'null')\n new_data = _re.sub(\n r'\\{[\\'|\\\"]raw[\\'|\\\"]:(.*?),(.*?)\\}', r'\\1', new_data)\n\n return _json.loads(new_data)\n\n\ndef camel2title(o):\n return [_re.sub(\"([a-z])([A-Z])\", \"\\g<1> \\g<2>\", i).title() for i in o]\n\n\ndef auto_adjust(data):\n df = data.copy()\n ratio = df[\"Close\"] / df[\"Adj Close\"]\n df[\"Adj Open\"] = df[\"Open\"] / ratio\n df[\"Adj High\"] = df[\"High\"] / ratio\n df[\"Adj Low\"] = df[\"Low\"] / ratio\n\n df.drop(\n [\"Open\", \"High\", \"Low\", \"Close\"],\n axis=1, inplace=True)\n\n df.rename(columns={\n \"Adj Open\": \"Open\", \"Adj High\": \"High\",\n \"Adj Low\": \"Low\", \"Adj Close\": \"Close\"\n }, inplace=True)\n\n df = df[[\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]]\n return df[[\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]]\n\n\ndef back_adjust(data):\n \"\"\" back-adjusted data to mimic true historical prices \"\"\"\n\n df = data.copy()\n ratio = df[\"Adj Close\"] / df[\"Close\"]\n df[\"Adj Open\"] = df[\"Open\"] * ratio\n df[\"Adj High\"] = df[\"High\"] * ratio\n df[\"Adj Low\"] = df[\"Low\"] * ratio\n\n df.drop(\n [\"Open\", \"High\", \"Low\", \"Adj Close\"],\n axis=1, inplace=True)\n\n df.rename(columns={\n \"Adj Open\": \"Open\", \"Adj High\": \"High\",\n \"Adj Low\": \"Low\"\n }, inplace=True)\n\n return df[[\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]]\n\n\ndef parse_quotes(data, tz=None):\n timestamps = data[\"timestamp\"]\n ohlc = data[\"indicators\"][\"quote\"][0]\n volumes = ohlc[\"volume\"]\n opens = ohlc[\"open\"]\n closes = ohlc[\"close\"]\n lows = ohlc[\"low\"]\n highs = ohlc[\"high\"]\n\n adjclose = closes\n if \"adjclose\" in data[\"indicators\"]:\n adjclose = data[\"indicators\"][\"adjclose\"][0][\"adjclose\"]\n\n quotes = _pd.DataFrame({\"Open\": opens,\n \"High\": highs,\n \"Low\": lows,\n \"Close\": closes,\n \"Adj Close\": adjclose,\n \"Volume\": volumes})\n\n quotes.index = _pd.to_datetime(timestamps, unit=\"s\")\n quotes.sort_index(inplace=True)\n\n if tz is not None:\n quotes.index = quotes.index.tz_localize(tz)\n\n return quotes\n\n\ndef parse_actions(data, tz=None):\n dividends = _pd.DataFrame(columns=[\"Dividends\"])\n splits = _pd.DataFrame(columns=[\"Stock Splits\"])\n\n if \"events\" in data:\n if \"dividends\" in data[\"events\"]:\n dividends = _pd.DataFrame(\n data=list(data[\"events\"][\"dividends\"].values()))\n dividends.set_index(\"date\", inplace=True)\n dividends.index = _pd.to_datetime(dividends.index, unit=\"s\")\n dividends.sort_index(inplace=True)\n if tz is not None:\n dividends.index = dividends.index.tz_localize(tz)\n\n dividends.columns = [\"Dividends\"]\n\n if \"splits\" in data[\"events\"]:\n splits = _pd.DataFrame(\n data=list(data[\"events\"][\"splits\"].values()))\n splits.set_index(\"date\", inplace=True)\n splits.index = _pd.to_datetime(splits.index, unit=\"s\")\n splits.sort_index(inplace=True)\n if tz is not None:\n splits.index = splits.index.tz_localize(tz)\n splits[\"Stock Splits\"] = splits[\"numerator\"] / \\\n splits[\"denominator\"]\n splits = splits[\"Stock Splits\"]\n\n return dividends, splits\n\n\nclass ProgressBar:\n def __init__(self, iterations, text='completed'):\n self.text = text\n self.iterations = iterations\n self.prog_bar = '[]'\n self.fill_char = '*'\n self.width = 50\n self.__update_amount(0)\n self.elapsed = 1\n\n def completed(self):\n if self.elapsed > self.iterations:\n self.elapsed = self.iterations\n self.update_iteration(1)\n print('\\r' + str(self), end='')\n _sys.stdout.flush()\n print()\n\n def animate(self, iteration=None):\n if iteration is None:\n self.elapsed += 1\n iteration = self.elapsed\n else:\n self.elapsed += iteration\n\n print('\\r' + str(self), end='')\n _sys.stdout.flush()\n self.update_iteration()\n\n def update_iteration(self, val=None):\n val = val if val is not None else self.elapsed / float(self.iterations)\n self.__update_amount(val * 100.0)\n self.prog_bar += ' %s of %s %s' % (\n self.elapsed, self.iterations, self.text)\n\n def __update_amount(self, new_amount):\n percent_done = int(round((new_amount / 100.0) * 100.0))\n all_full = self.width - 2\n num_hashes = int(round((percent_done / 100.0) * all_full))\n self.prog_bar = '[' + self.fill_char * \\\n num_hashes + ' ' * (all_full - num_hashes) + ']'\n pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))\n pct_string = '%d%%' % percent_done\n self.prog_bar = self.prog_bar[0:pct_place] + \\\n (pct_string + self.prog_bar[pct_place + len(pct_string):])\n\n def __str__(self):\n return str(self.prog_bar)\n", "path": "yfinance/utils.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Yahoo! Finance market data downloader (+fix for Pandas Datareader)\n# https://github.com/ranaroussi/yfinance\n#\n# Copyright 2017-2019 Ran Aroussi\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom __future__ import print_function\n\nimport requests as _requests\nimport re as _re\nimport pandas as _pd\nimport numpy as _np\nimport sys as _sys\nimport re as _re\n\ntry:\n import ujson as _json\nexcept ImportError:\n import json as _json\n\n\ndef empty_df(index=[]):\n empty = _pd.DataFrame(index=index, data={\n 'Open': _np.nan, 'High': _np.nan, 'Low': _np.nan,\n 'Close': _np.nan, 'Adj Close': _np.nan, 'Volume': _np.nan})\n empty.index.name = 'Date'\n return empty\n\n\ndef get_json(url, proxy=None):\n html = _requests.get(url=url, proxies=proxy).text\n\n if \"QuoteSummaryStore\" not in html:\n html = _requests.get(url=url, proxies=proxy).text\n if \"QuoteSummaryStore\" not in html:\n return {}\n\n json_str = html.split('root.App.main =')[1].split(\n '(this)')[0].split(';\\n}')[0].strip()\n data = _json.loads(json_str)[\n 'context']['dispatcher']['stores']['QuoteSummaryStore']\n\n # return data\n new_data = _json.dumps(data).replace('{}', 'null')\n new_data = _re.sub(\n r'\\{[\\'|\\\"]raw[\\'|\\\"]:(.*?),(.*?)\\}', r'\\1', new_data)\n\n return _json.loads(new_data)\n\n\ndef camel2title(o):\n return [_re.sub(\"([a-z])([A-Z])\", r\"\\g<1> \\g<2>\", i).title() for i in o]\n\n\ndef auto_adjust(data):\n df = data.copy()\n ratio = df[\"Close\"] / df[\"Adj Close\"]\n df[\"Adj Open\"] = df[\"Open\"] / ratio\n df[\"Adj High\"] = df[\"High\"] / ratio\n df[\"Adj Low\"] = df[\"Low\"] / ratio\n\n df.drop(\n [\"Open\", \"High\", \"Low\", \"Close\"],\n axis=1, inplace=True)\n\n df.rename(columns={\n \"Adj Open\": \"Open\", \"Adj High\": \"High\",\n \"Adj Low\": \"Low\", \"Adj Close\": \"Close\"\n }, inplace=True)\n\n df = df[[\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]]\n return df[[\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]]\n\n\ndef back_adjust(data):\n \"\"\" back-adjusted data to mimic true historical prices \"\"\"\n\n df = data.copy()\n ratio = df[\"Adj Close\"] / df[\"Close\"]\n df[\"Adj Open\"] = df[\"Open\"] * ratio\n df[\"Adj High\"] = df[\"High\"] * ratio\n df[\"Adj Low\"] = df[\"Low\"] * ratio\n\n df.drop(\n [\"Open\", \"High\", \"Low\", \"Adj Close\"],\n axis=1, inplace=True)\n\n df.rename(columns={\n \"Adj Open\": \"Open\", \"Adj High\": \"High\",\n \"Adj Low\": \"Low\"\n }, inplace=True)\n\n return df[[\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]]\n\n\ndef parse_quotes(data, tz=None):\n timestamps = data[\"timestamp\"]\n ohlc = data[\"indicators\"][\"quote\"][0]\n volumes = ohlc[\"volume\"]\n opens = ohlc[\"open\"]\n closes = ohlc[\"close\"]\n lows = ohlc[\"low\"]\n highs = ohlc[\"high\"]\n\n adjclose = closes\n if \"adjclose\" in data[\"indicators\"]:\n adjclose = data[\"indicators\"][\"adjclose\"][0][\"adjclose\"]\n\n quotes = _pd.DataFrame({\"Open\": opens,\n \"High\": highs,\n \"Low\": lows,\n \"Close\": closes,\n \"Adj Close\": adjclose,\n \"Volume\": volumes})\n\n quotes.index = _pd.to_datetime(timestamps, unit=\"s\")\n quotes.sort_index(inplace=True)\n\n if tz is not None:\n quotes.index = quotes.index.tz_localize(tz)\n\n return quotes\n\n\ndef parse_actions(data, tz=None):\n dividends = _pd.DataFrame(columns=[\"Dividends\"])\n splits = _pd.DataFrame(columns=[\"Stock Splits\"])\n\n if \"events\" in data:\n if \"dividends\" in data[\"events\"]:\n dividends = _pd.DataFrame(\n data=list(data[\"events\"][\"dividends\"].values()))\n dividends.set_index(\"date\", inplace=True)\n dividends.index = _pd.to_datetime(dividends.index, unit=\"s\")\n dividends.sort_index(inplace=True)\n if tz is not None:\n dividends.index = dividends.index.tz_localize(tz)\n\n dividends.columns = [\"Dividends\"]\n\n if \"splits\" in data[\"events\"]:\n splits = _pd.DataFrame(\n data=list(data[\"events\"][\"splits\"].values()))\n splits.set_index(\"date\", inplace=True)\n splits.index = _pd.to_datetime(splits.index, unit=\"s\")\n splits.sort_index(inplace=True)\n if tz is not None:\n splits.index = splits.index.tz_localize(tz)\n splits[\"Stock Splits\"] = splits[\"numerator\"] / \\\n splits[\"denominator\"]\n splits = splits[\"Stock Splits\"]\n\n return dividends, splits\n\n\nclass ProgressBar:\n def __init__(self, iterations, text='completed'):\n self.text = text\n self.iterations = iterations\n self.prog_bar = '[]'\n self.fill_char = '*'\n self.width = 50\n self.__update_amount(0)\n self.elapsed = 1\n\n def completed(self):\n if self.elapsed > self.iterations:\n self.elapsed = self.iterations\n self.update_iteration(1)\n print('\\r' + str(self), end='')\n _sys.stdout.flush()\n print()\n\n def animate(self, iteration=None):\n if iteration is None:\n self.elapsed += 1\n iteration = self.elapsed\n else:\n self.elapsed += iteration\n\n print('\\r' + str(self), end='')\n _sys.stdout.flush()\n self.update_iteration()\n\n def update_iteration(self, val=None):\n val = val if val is not None else self.elapsed / float(self.iterations)\n self.__update_amount(val * 100.0)\n self.prog_bar += ' %s of %s %s' % (\n self.elapsed, self.iterations, self.text)\n\n def __update_amount(self, new_amount):\n percent_done = int(round((new_amount / 100.0) * 100.0))\n all_full = self.width - 2\n num_hashes = int(round((percent_done / 100.0) * all_full))\n self.prog_bar = '[' + self.fill_char * \\\n num_hashes + ' ' * (all_full - num_hashes) + ']'\n pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))\n pct_string = '%d%%' % percent_done\n self.prog_bar = self.prog_bar[0:pct_place] + \\\n (pct_string + self.prog_bar[pct_place + len(pct_string):])\n\n def __str__(self):\n return str(self.prog_bar)\n", "path": "yfinance/utils.py"}]} | 2,739 | 124 |
gh_patches_debug_18416 | rasdani/github-patches | git_diff | kartoza__prj.app-126 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
We should add Rischan to the email recipients list for projecta errors
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `django_project/core/settings/base.py`
Content:
```
1 # coding=utf-8
2 """
3 core.settings.base
4 """
5 # Django settings for projecta project.
6
7 from .utils import absolute_path
8
9 ADMINS = (
10 ('Tim Sutton', '[email protected]'),
11 )
12 SERVER_EMAIL = '[email protected]'
13 EMAIL_HOST = 'localhost'
14 DEFAULT_FROM_EMAIL = '[email protected]'
15
16 MANAGERS = ADMINS
17
18 # Local time zone for this installation. Choices can be found here:
19 # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
20 # although not all choices may be available on all operating systems.
21 # In a Windows environment this must be set to your system time zone.
22 TIME_ZONE = 'America/Chicago'
23
24 # Language code for this installation. All choices can be found here:
25 # http://www.i18nguy.com/unicode/language-identifiers.html
26 LANGUAGE_CODE = 'en-us'
27
28 SITE_ID = 1
29
30 # If you set this to False, Django will make some optimizations so as not
31 # to load the internationalization machinery.
32 USE_I18N = True
33
34 # If you set this to False, Django will not format dates, numbers and
35 # calendars according to the current locale.
36 USE_L10N = True
37
38 # If you set this to False, Django will not use timezone-aware datetimes.
39 USE_TZ = True
40
41 # Absolute filesystem path to the directory that will hold user-uploaded files.
42 # Example: "/var/www/example.com/media/"
43 MEDIA_ROOT = '/home/web/media'
44
45 # URL that handles the media served from MEDIA_ROOT. Make sure to use a
46 # trailing slash.
47 # Examples: "http://example.com/media/", "http://media.example.com/"
48 # MEDIA_URL = '/media/'
49 # setting full MEDIA_URL to be able to use it for the feeds
50 MEDIA_URL = '/media/'
51
52 # Absolute path to the directory static files should be collected to.
53 # Don't put anything in this directory yourself; store your static files
54 # in apps' "static/" subdirectories and in STATICFILES_DIRS.
55 # Example: "/var/www/example.com/static/"
56 STATIC_ROOT = '/home/web/static'
57
58 # URL prefix for static files.
59 # Example: "http://example.com/static/", "http://static.example.com/"
60 STATIC_URL = '/static/'
61
62 # Additional locations of static files
63 STATICFILES_DIRS = (
64 # Put strings here, like "/home/html/static" or "C:/www/django/static".
65 # Always use forward slashes, even on Windows.
66 # Don't forget to use absolute paths, not relative paths.
67 absolute_path('core', 'base_static'),
68 )
69
70 # List of finder classes that know how to find static files in
71 # various locations.
72 STATICFILES_FINDERS = (
73 'django.contrib.staticfiles.finders.FileSystemFinder',
74 'django.contrib.staticfiles.finders.AppDirectoriesFinder',
75 # 'django.contrib.staticfiles.finders.DefaultStorageFinder',
76 )
77
78 # import SECRET_KEY into current namespace
79 # noinspection PyUnresolvedReferences
80 from .secret import SECRET_KEY # noqa
81
82 # List of callables that know how to import templates from various sources.
83 TEMPLATE_LOADERS = (
84 'django.template.loaders.filesystem.Loader',
85 'django.template.loaders.app_directories.Loader',
86 # 'django.template.loaders.eggs.Loader',
87 )
88
89 TEMPLATE_CONTEXT_PROCESSORS = (
90 'django.contrib.auth.context_processors.auth',
91 'django.core.context_processors.request',
92 'django.core.context_processors.media',
93 'core.context_processors.add_intercom_app_id',
94 'django.template.context_processors.i18n'
95 )
96
97 MIDDLEWARE_CLASSES = (
98 'django.middleware.common.CommonMiddleware',
99 'django.contrib.sessions.middleware.SessionMiddleware',
100 'django.middleware.csrf.CsrfViewMiddleware',
101 'django.contrib.auth.middleware.AuthenticationMiddleware',
102 'django.contrib.messages.middleware.MessageMiddleware',
103 # Uncomment the next line for simple clickjacking protection:
104 # 'django.middleware.clickjacking.XFrameOptionsMiddleware',
105 'core.custom_middleware.NavContextMiddleware',
106 'django.middleware.locale.LocaleMiddleware'
107 )
108
109 ROOT_URLCONF = 'core.urls'
110
111 # Python dotted path to the WSGI application used by Django's runserver.
112 WSGI_APPLICATION = 'core.wsgi.application'
113
114 TEMPLATE_DIRS = (
115 # project level templates
116 absolute_path('core', 'base_templates'),
117 absolute_path('vota', 'templates'),
118 absolute_path('changes', 'templates'),
119
120 )
121
122 INSTALLED_APPS = (
123 'django.contrib.auth',
124 'django.contrib.contenttypes',
125 'django.contrib.sessions',
126 'django.contrib.sites',
127 'django.contrib.messages',
128 'django.contrib.staticfiles',
129 'django.contrib.admin',
130 'django.contrib.syndication',
131 )
132
133 # A sample logging configuration. The only tangible logging
134 # performed by this configuration is to send an email to
135 # the site admins on every HTTP 500 error when DEBUG=False.
136 # See http://docs.djangoproject.com/en/dev/topics/logging for
137 # more details on how to customize your logging configuration.
138 LOGGING = {
139 'version': 1,
140 'disable_existing_loggers': False,
141 'filters': {
142 'require_debug_false': {
143 '()': 'django.utils.log.RequireDebugFalse'
144 }
145 },
146 'handlers': {
147 'mail_admins': {
148 'level': 'ERROR',
149 'filters': ['require_debug_false'],
150 'class': 'django.utils.log.AdminEmailHandler'
151 }
152 },
153 'loggers': {
154 'django.request': {
155 'handlers': ['mail_admins'],
156 'level': 'ERROR',
157 'propagate': True,
158 },
159 }
160 }
161
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/django_project/core/settings/base.py b/django_project/core/settings/base.py
--- a/django_project/core/settings/base.py
+++ b/django_project/core/settings/base.py
@@ -7,7 +7,8 @@
from .utils import absolute_path
ADMINS = (
- ('Tim Sutton', '[email protected]'),
+ ('Tim Sutton', '[email protected]'),
+ ('Rischan Mafrur', '[email protected]')
)
SERVER_EMAIL = '[email protected]'
EMAIL_HOST = 'localhost'
@@ -19,7 +20,7 @@
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
-TIME_ZONE = 'America/Chicago'
+TIME_ZONE = 'Africa/Johannesburg'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
| {"golden_diff": "diff --git a/django_project/core/settings/base.py b/django_project/core/settings/base.py\n--- a/django_project/core/settings/base.py\n+++ b/django_project/core/settings/base.py\n@@ -7,7 +7,8 @@\n from .utils import absolute_path\n \n ADMINS = (\n- ('Tim Sutton', '[email protected]'),\n+ ('Tim Sutton', '[email protected]'),\n+ ('Rischan Mafrur', '[email protected]')\n )\n SERVER_EMAIL = '[email protected]'\n EMAIL_HOST = 'localhost'\n@@ -19,7 +20,7 @@\n # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n # although not all choices may be available on all operating systems.\n # In a Windows environment this must be set to your system time zone.\n-TIME_ZONE = 'America/Chicago'\n+TIME_ZONE = 'Africa/Johannesburg'\n \n # Language code for this installation. All choices can be found here:\n # http://www.i18nguy.com/unicode/language-identifiers.html\n", "issue": "We should add Rischan to the email recipients list for projecta errors\n\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"\ncore.settings.base\n\"\"\"\n# Django settings for projecta project.\n\nfrom .utils import absolute_path\n\nADMINS = (\n ('Tim Sutton', '[email protected]'),\n)\nSERVER_EMAIL = '[email protected]'\nEMAIL_HOST = 'localhost'\nDEFAULT_FROM_EMAIL = '[email protected]'\n\nMANAGERS = ADMINS\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# In a Windows environment this must be set to your system time zone.\nTIME_ZONE = 'America/Chicago'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'en-us'\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale.\nUSE_L10N = True\n\n# If you set this to False, Django will not use timezone-aware datetimes.\nUSE_TZ = True\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/var/www/example.com/media/\"\nMEDIA_ROOT = '/home/web/media'\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://example.com/media/\", \"http://media.example.com/\"\n# MEDIA_URL = '/media/'\n# setting full MEDIA_URL to be able to use it for the feeds\nMEDIA_URL = '/media/'\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/var/www/example.com/static/\"\nSTATIC_ROOT = '/home/web/static'\n\n# URL prefix for static files.\n# Example: \"http://example.com/static/\", \"http://static.example.com/\"\nSTATIC_URL = '/static/'\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n absolute_path('core', 'base_static'),\n)\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n # 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\n# import SECRET_KEY into current namespace\n# noinspection PyUnresolvedReferences\nfrom .secret import SECRET_KEY # noqa\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n # 'django.template.loaders.eggs.Loader',\n)\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.request',\n 'django.core.context_processors.media',\n 'core.context_processors.add_intercom_app_id',\n 'django.template.context_processors.i18n'\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n # Uncomment the next line for simple clickjacking protection:\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'core.custom_middleware.NavContextMiddleware',\n 'django.middleware.locale.LocaleMiddleware'\n)\n\nROOT_URLCONF = 'core.urls'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = 'core.wsgi.application'\n\nTEMPLATE_DIRS = (\n # project level templates\n absolute_path('core', 'base_templates'),\n absolute_path('vota', 'templates'),\n absolute_path('changes', 'templates'),\n\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.admin',\n 'django.contrib.syndication',\n)\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n", "path": "django_project/core/settings/base.py"}], "after_files": [{"content": "# coding=utf-8\n\"\"\"\ncore.settings.base\n\"\"\"\n# Django settings for projecta project.\n\nfrom .utils import absolute_path\n\nADMINS = (\n ('Tim Sutton', '[email protected]'),\n ('Rischan Mafrur', '[email protected]')\n)\nSERVER_EMAIL = '[email protected]'\nEMAIL_HOST = 'localhost'\nDEFAULT_FROM_EMAIL = '[email protected]'\n\nMANAGERS = ADMINS\n\n# Local time zone for this installation. Choices can be found here:\n# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name\n# although not all choices may be available on all operating systems.\n# In a Windows environment this must be set to your system time zone.\nTIME_ZONE = 'Africa/Johannesburg'\n\n# Language code for this installation. All choices can be found here:\n# http://www.i18nguy.com/unicode/language-identifiers.html\nLANGUAGE_CODE = 'en-us'\n\nSITE_ID = 1\n\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = True\n\n# If you set this to False, Django will not format dates, numbers and\n# calendars according to the current locale.\nUSE_L10N = True\n\n# If you set this to False, Django will not use timezone-aware datetimes.\nUSE_TZ = True\n\n# Absolute filesystem path to the directory that will hold user-uploaded files.\n# Example: \"/var/www/example.com/media/\"\nMEDIA_ROOT = '/home/web/media'\n\n# URL that handles the media served from MEDIA_ROOT. Make sure to use a\n# trailing slash.\n# Examples: \"http://example.com/media/\", \"http://media.example.com/\"\n# MEDIA_URL = '/media/'\n# setting full MEDIA_URL to be able to use it for the feeds\nMEDIA_URL = '/media/'\n\n# Absolute path to the directory static files should be collected to.\n# Don't put anything in this directory yourself; store your static files\n# in apps' \"static/\" subdirectories and in STATICFILES_DIRS.\n# Example: \"/var/www/example.com/static/\"\nSTATIC_ROOT = '/home/web/static'\n\n# URL prefix for static files.\n# Example: \"http://example.com/static/\", \"http://static.example.com/\"\nSTATIC_URL = '/static/'\n\n# Additional locations of static files\nSTATICFILES_DIRS = (\n # Put strings here, like \"/home/html/static\" or \"C:/www/django/static\".\n # Always use forward slashes, even on Windows.\n # Don't forget to use absolute paths, not relative paths.\n absolute_path('core', 'base_static'),\n)\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n # 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\n# import SECRET_KEY into current namespace\n# noinspection PyUnresolvedReferences\nfrom .secret import SECRET_KEY # noqa\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n # 'django.template.loaders.eggs.Loader',\n)\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.request',\n 'django.core.context_processors.media',\n 'core.context_processors.add_intercom_app_id',\n 'django.template.context_processors.i18n'\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n # Uncomment the next line for simple clickjacking protection:\n # 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'core.custom_middleware.NavContextMiddleware',\n 'django.middleware.locale.LocaleMiddleware'\n)\n\nROOT_URLCONF = 'core.urls'\n\n# Python dotted path to the WSGI application used by Django's runserver.\nWSGI_APPLICATION = 'core.wsgi.application'\n\nTEMPLATE_DIRS = (\n # project level templates\n absolute_path('core', 'base_templates'),\n absolute_path('vota', 'templates'),\n absolute_path('changes', 'templates'),\n\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.admin',\n 'django.contrib.syndication',\n)\n\n# A sample logging configuration. The only tangible logging\n# performed by this configuration is to send an email to\n# the site admins on every HTTP 500 error when DEBUG=False.\n# See http://docs.djangoproject.com/en/dev/topics/logging for\n# more details on how to customize your logging configuration.\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'filters': {\n 'require_debug_false': {\n '()': 'django.utils.log.RequireDebugFalse'\n }\n },\n 'handlers': {\n 'mail_admins': {\n 'level': 'ERROR',\n 'filters': ['require_debug_false'],\n 'class': 'django.utils.log.AdminEmailHandler'\n }\n },\n 'loggers': {\n 'django.request': {\n 'handlers': ['mail_admins'],\n 'level': 'ERROR',\n 'propagate': True,\n },\n }\n}\n", "path": "django_project/core/settings/base.py"}]} | 1,839 | 236 |
gh_patches_debug_39022 | rasdani/github-patches | git_diff | opsdroid__opsdroid-693 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add Google Style Docstrings
We should implement Google Style Docstrings to every function, method, class in opsdroid. This style will support existing documentation and will help in the future by generating documentation automatically.
This consists in a bit of effort so this issue can be worked by more than one contributor, just make sure that everyone knows what you are working on in order to avoid other contributors spending time on something that you are working on.
If you are unfamiliar with the Google Style Docstrings I'd recommend that you check these resources:
- [Sphix 1.8.0+ - Google Style Docstrings](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html)
Docstrings that need to be updated:
- main.py
- [x] configure_lang
- [ ] configure_log
- [ ] get_logging_level
- [ ] check_dependencies
- [ ] print_version
- [ ] print_example_config
- [ ] edit_files
- [x] welcome_message
- ~~helper.py~~
- [x] get_opsdroid
- [x] del_rw
- [x] move_config_to_appdir
- memory.py
- [x] Memory
- [x] get
- [x] put
- [x] _get_from_database
- [x] _put_to_database
- message.py
- [x] Message
- [x] __init__
- [x] _thinking_delay
- [x] _typing delay
- [x] respond
- [x] react
- web.py
- [ ] Web
- [x] get_port
- [x] get_host
- [x] get_ssl_context
- [ ] start
- [ ] build_response
- [ ] web_index_handler
- [ ] web_stats_handler
- matchers.py
- [ ] match_regex
- [ ] match_apiai_action
- [ ] match_apiai_intent
- [ ] match_dialogflow_action
- [ ] match_dialogflow_intent
- [ ] match_luisai_intent
- [ ] match_rasanlu
- [ ] match_recastai
- [ ] match_witai
- [ ] match_crontab
- [ ] match_webhook
- [ ] match_always
- core.py
- [ ] OpsDroid
- [ ] default_connector
- [ ] exit
- [ ] critical
- [ ] call_stop
- [ ] disconnect
- [ ] stop
- [ ] load
- [ ] start_loop
- [x] setup_skills
- [ ] train_parsers
- [ ] start_connector_tasks
- [ ] start_database
- [ ] run_skill
- [ ] get_ranked_skills
- [ ] parse
- loader.py
- [ ] Loader
- [x] import_module_from_spec
- [x] import_module
- [x] check_cache
- [x] build_module_import_path
- [x] build_module_install_path
- [x] git_clone
- [x] git_pull
- [x] pip_install_deps
- [x] create_default_config
- [x] load_config_file
- [ ] envvar_constructor
- [ ] include_constructor
- [x] setup_modules_directory
- [x] load_modules_from_config
- [x] _load_modules
- [x] _install_module
- [x] _update_module
- [ ] _install_git_module
- [x] _install_local_module
---- ORIGINAL POST ----
I've been wondering about this for a while now and I would like to know if we should replace/update all the docstrings in opsdroid with the Google Style doc strings.
I think this could help new and old contributors to contribute and commit to opsdroid since the Google Style docstrings give more information about every method/function and specifies clearly what sort of input the function/method expects, what will it return and what will be raised (if applicable).
The downsize of this style is that the length of every .py file will increase due to the doc strings, but since most IDE's allow you to hide those fields it shouldn't be too bad.
Here is a good example of Google Style Doc strings: [Sphix 1.8.0+ - Google Style Docstrings](http://www.sphinx-doc.org/en/master/ext/example_google.html)
I would like to know what you all think about this idea and if its worth spending time on it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opsdroid/memory.py`
Content:
```
1 """Class for persisting information in opsdroid."""
2
3 import logging
4
5
6 _LOGGER = logging.getLogger(__name__)
7
8
9 class Memory:
10 """An object to store and persist data outside of opsdroid."""
11
12 def __init__(self):
13 """Create memory dictionary."""
14 self.memory = {}
15 self.databases = []
16
17 async def get(self, key):
18 """Get data object for a given key."""
19 _LOGGER.debug(_("Getting %s from memory."), key)
20 database_result = await self._get_from_database(key)
21 if database_result is not None:
22 self.memory[key] = database_result
23 if key in self.memory:
24 return self.memory[key]
25
26 return None
27
28 async def put(self, key, data):
29 """Put a data object to a given key."""
30 _LOGGER.debug(_("Putting %s to memory"), key)
31 self.memory[key] = data
32 await self._put_to_database(key, self.memory[key])
33
34 async def _get_from_database(self, key):
35 """Get updates from databases for a given key."""
36 if not self.databases:
37 return None
38
39 results = []
40 for database in self.databases:
41 results.append(await database.get(key))
42 # TODO: Handle multiple databases
43 return results[0]
44
45 async def _put_to_database(self, key, data):
46 """Put updates into databases for a given key."""
47 if self.databases:
48 for database in self.databases:
49 await database.put(key, data)
50
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opsdroid/memory.py b/opsdroid/memory.py
--- a/opsdroid/memory.py
+++ b/opsdroid/memory.py
@@ -7,15 +7,33 @@
class Memory:
- """An object to store and persist data outside of opsdroid."""
+ """A Memory object.
+
+ An object to obtain, store and persist data outside of opsdroid.
+
+ Attributes:
+ databases (:obj:`list` of :obj:`Database`): List of database objects.
+ memory (:obj:`dict`): In-memory dictionary to store data.
+
+ """
def __init__(self):
- """Create memory dictionary."""
+ """Create object with minimum properties."""
self.memory = {}
self.databases = []
async def get(self, key):
- """Get data object for a given key."""
+ """Get data object for a given key.
+
+ Gets the key value found in-memory or from the database(s).
+
+ Args:
+ key (str): Key to retrieve data.
+
+ Returns:
+ A data object for the given key, otherwise `None`.
+
+ """
_LOGGER.debug(_("Getting %s from memory."), key)
database_result = await self._get_from_database(key)
if database_result is not None:
@@ -26,24 +44,53 @@
return None
async def put(self, key, data):
- """Put a data object to a given key."""
+ """Put a data object to a given key.
+
+ Stores the key and value in memory and the database(s).
+
+ Args:
+ key (str): Key for the data to store.
+ data (obj): Data object to store.
+
+ """
_LOGGER.debug(_("Putting %s to memory"), key)
self.memory[key] = data
await self._put_to_database(key, self.memory[key])
async def _get_from_database(self, key):
- """Get updates from databases for a given key."""
+ """Get updates from databases for a given key.
+
+ Gets the first key value found from the database(s).
+
+ Args:
+ key (str): Key to retrieve data from a database.
+
+ Returns:
+ The first key value (data object) found from the database(s).
+ Or `None` when no database is defined or no value is found.
+
+ Todo:
+ * Handle multiple databases
+
+ """
if not self.databases:
return None
results = []
for database in self.databases:
results.append(await database.get(key))
- # TODO: Handle multiple databases
return results[0]
async def _put_to_database(self, key, data):
- """Put updates into databases for a given key."""
+ """Put updates into databases for a given key.
+
+ Stores the key and value on each database defined.
+
+ Args:
+ key (str): Key for the data to store.
+ data (obj): Data object to store.
+
+ """
if self.databases:
for database in self.databases:
await database.put(key, data)
| {"golden_diff": "diff --git a/opsdroid/memory.py b/opsdroid/memory.py\n--- a/opsdroid/memory.py\n+++ b/opsdroid/memory.py\n@@ -7,15 +7,33 @@\n \n \n class Memory:\n- \"\"\"An object to store and persist data outside of opsdroid.\"\"\"\n+ \"\"\"A Memory object.\n+\n+ An object to obtain, store and persist data outside of opsdroid.\n+\n+ Attributes:\n+ databases (:obj:`list` of :obj:`Database`): List of database objects.\n+ memory (:obj:`dict`): In-memory dictionary to store data.\n+\n+ \"\"\"\n \n def __init__(self):\n- \"\"\"Create memory dictionary.\"\"\"\n+ \"\"\"Create object with minimum properties.\"\"\"\n self.memory = {}\n self.databases = []\n \n async def get(self, key):\n- \"\"\"Get data object for a given key.\"\"\"\n+ \"\"\"Get data object for a given key.\n+\n+ Gets the key value found in-memory or from the database(s).\n+\n+ Args:\n+ key (str): Key to retrieve data.\n+\n+ Returns:\n+ A data object for the given key, otherwise `None`.\n+\n+ \"\"\"\n _LOGGER.debug(_(\"Getting %s from memory.\"), key)\n database_result = await self._get_from_database(key)\n if database_result is not None:\n@@ -26,24 +44,53 @@\n return None\n \n async def put(self, key, data):\n- \"\"\"Put a data object to a given key.\"\"\"\n+ \"\"\"Put a data object to a given key.\n+\n+ Stores the key and value in memory and the database(s).\n+\n+ Args:\n+ key (str): Key for the data to store.\n+ data (obj): Data object to store.\n+\n+ \"\"\"\n _LOGGER.debug(_(\"Putting %s to memory\"), key)\n self.memory[key] = data\n await self._put_to_database(key, self.memory[key])\n \n async def _get_from_database(self, key):\n- \"\"\"Get updates from databases for a given key.\"\"\"\n+ \"\"\"Get updates from databases for a given key.\n+\n+ Gets the first key value found from the database(s).\n+\n+ Args:\n+ key (str): Key to retrieve data from a database.\n+\n+ Returns:\n+ The first key value (data object) found from the database(s).\n+ Or `None` when no database is defined or no value is found.\n+\n+ Todo:\n+ * Handle multiple databases\n+\n+ \"\"\"\n if not self.databases:\n return None\n \n results = []\n for database in self.databases:\n results.append(await database.get(key))\n- # TODO: Handle multiple databases\n return results[0]\n \n async def _put_to_database(self, key, data):\n- \"\"\"Put updates into databases for a given key.\"\"\"\n+ \"\"\"Put updates into databases for a given key.\n+\n+ Stores the key and value on each database defined.\n+\n+ Args:\n+ key (str): Key for the data to store.\n+ data (obj): Data object to store.\n+\n+ \"\"\"\n if self.databases:\n for database in self.databases:\n await database.put(key, data)\n", "issue": "Add Google Style Docstrings\nWe should implement Google Style Docstrings to every function, method, class in opsdroid. This style will support existing documentation and will help in the future by generating documentation automatically.\r\n\r\nThis consists in a bit of effort so this issue can be worked by more than one contributor, just make sure that everyone knows what you are working on in order to avoid other contributors spending time on something that you are working on.\r\n\r\nIf you are unfamiliar with the Google Style Docstrings I'd recommend that you check these resources:\r\n\r\n - [Sphix 1.8.0+ - Google Style Docstrings](https://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html)\r\n\r\n\r\n\r\nDocstrings that need to be updated:\r\n\r\n- main.py\r\n - [x] configure_lang\r\n - [ ] configure_log\r\n - [ ] get_logging_level\r\n - [ ] check_dependencies\r\n - [ ] print_version\r\n - [ ] print_example_config\r\n - [ ] edit_files\r\n - [x] welcome_message\r\n- ~~helper.py~~\r\n - [x] get_opsdroid\r\n - [x] del_rw\r\n - [x] move_config_to_appdir\r\n- memory.py\r\n - [x] Memory\r\n - [x] get\r\n - [x] put\r\n - [x] _get_from_database\r\n - [x] _put_to_database\r\n- message.py\r\n - [x] Message\r\n - [x] __init__\r\n - [x] _thinking_delay\r\n - [x] _typing delay\r\n - [x] respond\r\n - [x] react\r\n- web.py\r\n - [ ] Web\r\n - [x] get_port\r\n - [x] get_host\r\n - [x] get_ssl_context\r\n - [ ] start\r\n - [ ] build_response\r\n - [ ] web_index_handler\r\n - [ ] web_stats_handler\r\n- matchers.py\r\n - [ ] match_regex\r\n - [ ] match_apiai_action\r\n - [ ] match_apiai_intent\r\n - [ ] match_dialogflow_action\r\n - [ ] match_dialogflow_intent\r\n - [ ] match_luisai_intent\r\n - [ ] match_rasanlu\r\n - [ ] match_recastai\r\n - [ ] match_witai\r\n - [ ] match_crontab\r\n - [ ] match_webhook\r\n - [ ] match_always\r\n- core.py\r\n - [ ] OpsDroid\r\n - [ ] default_connector\r\n - [ ] exit\r\n - [ ] critical\r\n - [ ] call_stop\r\n - [ ] disconnect\r\n - [ ] stop\r\n - [ ] load\r\n - [ ] start_loop\r\n - [x] setup_skills\r\n - [ ] train_parsers\r\n - [ ] start_connector_tasks\r\n - [ ] start_database\r\n - [ ] run_skill\r\n - [ ] get_ranked_skills\r\n - [ ] parse\r\n- loader.py\r\n - [ ] Loader\r\n - [x] import_module_from_spec\r\n - [x] import_module\r\n - [x] check_cache\r\n - [x] build_module_import_path\r\n - [x] build_module_install_path\r\n - [x] git_clone\r\n - [x] git_pull\r\n - [x] pip_install_deps\r\n - [x] create_default_config\r\n - [x] load_config_file\r\n - [ ] envvar_constructor\r\n - [ ] include_constructor\r\n - [x] setup_modules_directory\r\n - [x] load_modules_from_config\r\n - [x] _load_modules\r\n - [x] _install_module\r\n - [x] _update_module\r\n - [ ] _install_git_module\r\n - [x] _install_local_module\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n---- ORIGINAL POST ---- \r\nI've been wondering about this for a while now and I would like to know if we should replace/update all the docstrings in opsdroid with the Google Style doc strings. \r\n\r\nI think this could help new and old contributors to contribute and commit to opsdroid since the Google Style docstrings give more information about every method/function and specifies clearly what sort of input the function/method expects, what will it return and what will be raised (if applicable).\r\n\r\nThe downsize of this style is that the length of every .py file will increase due to the doc strings, but since most IDE's allow you to hide those fields it shouldn't be too bad.\r\n\r\nHere is a good example of Google Style Doc strings: [Sphix 1.8.0+ - Google Style Docstrings](http://www.sphinx-doc.org/en/master/ext/example_google.html)\r\n\r\nI would like to know what you all think about this idea and if its worth spending time on it.\n", "before_files": [{"content": "\"\"\"Class for persisting information in opsdroid.\"\"\"\n\nimport logging\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Memory:\n \"\"\"An object to store and persist data outside of opsdroid.\"\"\"\n\n def __init__(self):\n \"\"\"Create memory dictionary.\"\"\"\n self.memory = {}\n self.databases = []\n\n async def get(self, key):\n \"\"\"Get data object for a given key.\"\"\"\n _LOGGER.debug(_(\"Getting %s from memory.\"), key)\n database_result = await self._get_from_database(key)\n if database_result is not None:\n self.memory[key] = database_result\n if key in self.memory:\n return self.memory[key]\n\n return None\n\n async def put(self, key, data):\n \"\"\"Put a data object to a given key.\"\"\"\n _LOGGER.debug(_(\"Putting %s to memory\"), key)\n self.memory[key] = data\n await self._put_to_database(key, self.memory[key])\n\n async def _get_from_database(self, key):\n \"\"\"Get updates from databases for a given key.\"\"\"\n if not self.databases:\n return None\n\n results = []\n for database in self.databases:\n results.append(await database.get(key))\n # TODO: Handle multiple databases\n return results[0]\n\n async def _put_to_database(self, key, data):\n \"\"\"Put updates into databases for a given key.\"\"\"\n if self.databases:\n for database in self.databases:\n await database.put(key, data)\n", "path": "opsdroid/memory.py"}], "after_files": [{"content": "\"\"\"Class for persisting information in opsdroid.\"\"\"\n\nimport logging\n\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Memory:\n \"\"\"A Memory object.\n\n An object to obtain, store and persist data outside of opsdroid.\n\n Attributes:\n databases (:obj:`list` of :obj:`Database`): List of database objects.\n memory (:obj:`dict`): In-memory dictionary to store data.\n\n \"\"\"\n\n def __init__(self):\n \"\"\"Create object with minimum properties.\"\"\"\n self.memory = {}\n self.databases = []\n\n async def get(self, key):\n \"\"\"Get data object for a given key.\n\n Gets the key value found in-memory or from the database(s).\n\n Args:\n key (str): Key to retrieve data.\n\n Returns:\n A data object for the given key, otherwise `None`.\n\n \"\"\"\n _LOGGER.debug(_(\"Getting %s from memory.\"), key)\n database_result = await self._get_from_database(key)\n if database_result is not None:\n self.memory[key] = database_result\n if key in self.memory:\n return self.memory[key]\n\n return None\n\n async def put(self, key, data):\n \"\"\"Put a data object to a given key.\n\n Stores the key and value in memory and the database(s).\n\n Args:\n key (str): Key for the data to store.\n data (obj): Data object to store.\n\n \"\"\"\n _LOGGER.debug(_(\"Putting %s to memory\"), key)\n self.memory[key] = data\n await self._put_to_database(key, self.memory[key])\n\n async def _get_from_database(self, key):\n \"\"\"Get updates from databases for a given key.\n\n Gets the first key value found from the database(s).\n\n Args:\n key (str): Key to retrieve data from a database.\n\n Returns:\n The first key value (data object) found from the database(s).\n Or `None` when no database is defined or no value is found.\n\n Todo:\n * Handle multiple databases\n\n \"\"\"\n if not self.databases:\n return None\n\n results = []\n for database in self.databases:\n results.append(await database.get(key))\n return results[0]\n\n async def _put_to_database(self, key, data):\n \"\"\"Put updates into databases for a given key.\n\n Stores the key and value on each database defined.\n\n Args:\n key (str): Key for the data to store.\n data (obj): Data object to store.\n\n \"\"\"\n if self.databases:\n for database in self.databases:\n await database.put(key, data)\n", "path": "opsdroid/memory.py"}]} | 1,707 | 701 |
gh_patches_debug_10081 | rasdani/github-patches | git_diff | qtile__qtile-1659 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
qtile-top crash 'Client' object has no attribute 'tracemalloc_dump'
<!--
Please do not ask general questions here! There are [community
contact](https://github.com/qtile/qtile#community) options for that.
-->
# Issue description
<!--
A brief discussion of what failed and how it failed. A description of
what you tried is helpful, i.e. "When I use lazy.kill() on a window I get
the following stack trace" instead of "Closing windows doesn't work".
-->
Tried to run `qtile-top` and it crashed. I'm not sure should be done before that script - no documentation.
Tried
```
arnas@zordmachine ~ qtile-cmd -o cmd -f tracemalloc_dump
[False, 'Trace not started']
arnas@zordmachine ~ qtile-cmd -o cmd -f tracemalloc_toggle
arnas@zordmachine ~ qtile-cmd -o cmd -f tracemalloc_dump
[True, '/home/arnas/.cache/qtile/qtile_tracemalloc.dump']
```
but this didn't helped
# Qtile version
<!--
Please include the exact commit hash of the version of Qtile that failed.
-->
5bd8e57f78b72ed9de4d10bf51af6db51561d794 (latest git version)
# Stack traces
<!--
Please attach any stack traces found in:
* `~/.xsession-errors`
* `~/.local/share/qtile/qtile.log`
-->
Traceback (most recent call last):
File "/usr/bin/qtile-top", line 11, in <module>
load_entry_point('qtile==0.15.0', 'console_scripts', 'qtile-top')()
File "/usr/lib/python3.8/site-packages/libqtile/scripts/qtile_top.py", line 164, in main
curses.wrapper(get_stats, client, limit=lines, seconds=seconds,
File "/usr/lib/python3.8/curses/__init__.py", line 105, in wrapper
return func(stdscr, *args, **kwds)
File "/usr/lib/python3.8/site-packages/libqtile/scripts/qtile_top.py", line 91, in get_stats
snapshot = get_trace(client, force_start)
File "/usr/lib/python3.8/site-packages/libqtile/scripts/qtile_top.py", line 63, in get_trace
(started, path) = client.tracemalloc_dump()
AttributeError: 'Client' object has no attribute 'tracemalloc_dump'
# Configuration
<!--
Please include a link or attach your configuration to the issue.
-->
https://github.com/zordsdavini/qtile-config
Arch
qtile-top crash 'Client' object has no attribute 'tracemalloc_dump'
<!--
Please do not ask general questions here! There are [community
contact](https://github.com/qtile/qtile#community) options for that.
-->
# Issue description
<!--
A brief discussion of what failed and how it failed. A description of
what you tried is helpful, i.e. "When I use lazy.kill() on a window I get
the following stack trace" instead of "Closing windows doesn't work".
-->
Tried to run `qtile-top` and it crashed. I'm not sure should be done before that script - no documentation.
Tried
```
arnas@zordmachine ~ qtile-cmd -o cmd -f tracemalloc_dump
[False, 'Trace not started']
arnas@zordmachine ~ qtile-cmd -o cmd -f tracemalloc_toggle
arnas@zordmachine ~ qtile-cmd -o cmd -f tracemalloc_dump
[True, '/home/arnas/.cache/qtile/qtile_tracemalloc.dump']
```
but this didn't helped
# Qtile version
<!--
Please include the exact commit hash of the version of Qtile that failed.
-->
5bd8e57f78b72ed9de4d10bf51af6db51561d794 (latest git version)
# Stack traces
<!--
Please attach any stack traces found in:
* `~/.xsession-errors`
* `~/.local/share/qtile/qtile.log`
-->
Traceback (most recent call last):
File "/usr/bin/qtile-top", line 11, in <module>
load_entry_point('qtile==0.15.0', 'console_scripts', 'qtile-top')()
File "/usr/lib/python3.8/site-packages/libqtile/scripts/qtile_top.py", line 164, in main
curses.wrapper(get_stats, client, limit=lines, seconds=seconds,
File "/usr/lib/python3.8/curses/__init__.py", line 105, in wrapper
return func(stdscr, *args, **kwds)
File "/usr/lib/python3.8/site-packages/libqtile/scripts/qtile_top.py", line 91, in get_stats
snapshot = get_trace(client, force_start)
File "/usr/lib/python3.8/site-packages/libqtile/scripts/qtile_top.py", line 63, in get_trace
(started, path) = client.tracemalloc_dump()
AttributeError: 'Client' object has no attribute 'tracemalloc_dump'
# Configuration
<!--
Please include a link or attach your configuration to the issue.
-->
https://github.com/zordsdavini/qtile-config
Arch
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libqtile/scripts/qtile_top.py`
Content:
```
1 # Copyright (c) 2015, Roger Duran
2 #
3 # Permission is hereby granted, free of charge, to any person obtaining a copy
4 # of this software and associated documentation files (the "Software"), to deal
5 # in the Software without restriction, including without limitation the rights
6 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 # copies of the Software, and to permit persons to whom the Software is
8 # furnished to do so, subject to the following conditions:
9 #
10 # The above copyright notice and this permission notice shall be included in
11 # all copies or substantial portions of the Software.
12 #
13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
19 # SOFTWARE.
20
21 """
22 Command-line top like for qtile
23 """
24
25 import argparse
26 import curses
27 import linecache
28 import os
29 import time
30 import tracemalloc
31 from tracemalloc import Snapshot
32
33 from libqtile import ipc
34
35
36 class TraceNotStarted(Exception):
37 pass
38
39
40 class TraceCantStart(Exception):
41 pass
42
43
44 def parse_args():
45 parser = argparse.ArgumentParser(description="Top like for qtile")
46 parser.add_argument('-l', '--lines', type=int, dest="lines", default=10,
47 help='Number of lines.')
48 parser.add_argument('-r', '--raw', dest="raw", action="store_true",
49 default=False, help='Output raw without curses')
50 parser.add_argument('-t', '--time', type=float, dest="seconds",
51 default=1.5, help='Number of seconds to refresh')
52 parser.add_argument('--force-start', dest="force_start",
53 action="store_true", default=False,
54 help='Force start tracemalloc on qtile')
55 parser.add_argument('-s', '--socket', type=str, dest="socket",
56 help='Use specified communication socket.')
57
58 opts = parser.parse_args()
59 return opts
60
61
62 def get_trace(client, force_start):
63 (started, path) = client.tracemalloc_dump()
64 if force_start and not started:
65 client.tracemalloc_toggle()
66 (started, path) = client.tracemalloc_dump()
67 if not started:
68 raise TraceCantStart
69 elif not started:
70 raise TraceNotStarted
71
72 return Snapshot.load(path)
73
74
75 def filter_snapshot(snapshot):
76 return snapshot.filter_traces((
77 tracemalloc.Filter(False, "<frozen importlib._bootstrap>"),
78 tracemalloc.Filter(False, "<unknown>"),
79 ))
80
81
82 def get_stats(scr, client, group_by='lineno', limit=10, seconds=1.5,
83 force_start=False):
84 (max_y, max_x) = scr.getmaxyx()
85 curses.init_pair(1, curses.COLOR_GREEN, curses.COLOR_BLACK)
86 while True:
87 scr.addstr(0, 0, "Qtile - Top {} lines".format(limit))
88 scr.addstr(1, 0, '{0:<3s} {1:<40s} {2:<30s} {3:<16s}'.format('#', 'Line', 'Memory', ' ' * (max_x - 71)),
89 curses.A_BOLD | curses.A_REVERSE)
90
91 snapshot = get_trace(client, force_start)
92 snapshot = filter_snapshot(snapshot)
93 top_stats = snapshot.statistics(group_by)
94 cnt = 1
95 for index, stat in enumerate(top_stats[:limit], 1):
96 frame = stat.traceback[0]
97 # replace "/path/to/module/file.py" with "module/file.py"
98 filename = os.sep.join(frame.filename.split(os.sep)[-2:])
99 code = ""
100 line = linecache.getline(frame.filename, frame.lineno).strip()
101 if line:
102 code = line
103 mem = "{:.1f} KiB".format(stat.size / 1024.0)
104 filename = "{}:{}".format(filename, frame.lineno)
105 scr.addstr(cnt + 1, 0, '{:<3} {:<40} {:<30}'.format(index, filename, mem))
106 scr.addstr(cnt + 2, 4, code, curses.color_pair(1))
107 cnt += 2
108
109 other = top_stats[limit:]
110 cnt += 2
111 if other:
112 size = sum(stat.size for stat in other)
113 other_size = ("{:d} other: {:.1f} KiB".format(len(other), size / 1024.0))
114 scr.addstr(cnt, 0, other_size, curses.A_BOLD)
115 cnt += 1
116
117 total = sum(stat.size for stat in top_stats)
118 total_size = "Total allocated size: {0:.1f} KiB".format(total / 1024.0)
119 scr.addstr(cnt, 0, total_size, curses.A_BOLD)
120
121 scr.move(max_y - 2, max_y - 2)
122 scr.refresh()
123 time.sleep(seconds)
124 scr.erase()
125
126
127 def raw_stats(client, group_by='lineno', limit=10, force_start=False):
128 snapshot = get_trace(client, force_start)
129 snapshot = filter_snapshot(snapshot)
130 top_stats = snapshot.statistics(group_by)
131
132 print("Qtile - Top {} lines".format(limit))
133 for index, stat in enumerate(top_stats[:limit], 1):
134 frame = stat.traceback[0]
135 # replace "/path/to/module/file.py" with "module/file.py"
136 filename = os.sep.join(frame.filename.split(os.sep)[-2:])
137 print("#{}: {}:{}: {:.1f} KiB"
138 .format(index, filename, frame.lineno, stat.size / 1024.0))
139 line = linecache.getline(frame.filename, frame.lineno).strip()
140 if line:
141 print(' {}'.format(line))
142
143 other = top_stats[limit:]
144 if other:
145 size = sum(stat.size for stat in other)
146 print("{:d} other: {:.1f} KiB".format(len(other), size / 1024.0))
147 total = sum(stat.size for stat in top_stats)
148 print("Total allocated size: {0:.1f} KiB".format(total / 1024.0))
149
150
151 def main():
152 opts = parse_args()
153 lines = opts.lines
154 seconds = opts.seconds
155 force_start = opts.force_start
156 if opts.socket is None:
157 socket = ipc.find_sockfile()
158 else:
159 socket = opts.socket
160 client = ipc.Client(socket)
161
162 try:
163 if not opts.raw:
164 curses.wrapper(get_stats, client, limit=lines, seconds=seconds,
165 force_start=force_start)
166 else:
167 raw_stats(client, limit=lines, force_start=force_start)
168 except TraceNotStarted:
169 print("tracemalloc not started on qtile, start by setting "
170 "PYTHONTRACEMALLOC=1 before starting qtile")
171 print("or force start tracemalloc now, but you'll lose early traces")
172 exit(1)
173 except TraceCantStart:
174 print("Can't start tracemalloc on qtile, check the logs")
175 except KeyboardInterrupt:
176 exit(-1)
177
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/libqtile/scripts/qtile_top.py b/libqtile/scripts/qtile_top.py
--- a/libqtile/scripts/qtile_top.py
+++ b/libqtile/scripts/qtile_top.py
@@ -30,7 +30,7 @@
import tracemalloc
from tracemalloc import Snapshot
-from libqtile import ipc
+from libqtile import command_client, command_interface, ipc
class TraceNotStarted(Exception):
@@ -158,6 +158,8 @@
else:
socket = opts.socket
client = ipc.Client(socket)
+ client = command_interface.IPCCommandInterface(client)
+ client = command_client.InteractiveCommandClient(client)
try:
if not opts.raw:
| {"golden_diff": "diff --git a/libqtile/scripts/qtile_top.py b/libqtile/scripts/qtile_top.py\n--- a/libqtile/scripts/qtile_top.py\n+++ b/libqtile/scripts/qtile_top.py\n@@ -30,7 +30,7 @@\n import tracemalloc\n from tracemalloc import Snapshot\n \n-from libqtile import ipc\n+from libqtile import command_client, command_interface, ipc\n \n \n class TraceNotStarted(Exception):\n@@ -158,6 +158,8 @@\n else:\n socket = opts.socket\n client = ipc.Client(socket)\n+ client = command_interface.IPCCommandInterface(client)\n+ client = command_client.InteractiveCommandClient(client)\n \n try:\n if not opts.raw:\n", "issue": "qtile-top crash 'Client' object has no attribute 'tracemalloc_dump'\n<!--\r\nPlease do not ask general questions here! There are [community\r\ncontact](https://github.com/qtile/qtile#community) options for that.\r\n-->\r\n\r\n# Issue description\r\n\r\n<!--\r\nA brief discussion of what failed and how it failed. A description of\r\nwhat you tried is helpful, i.e. \"When I use lazy.kill() on a window I get\r\nthe following stack trace\" instead of \"Closing windows doesn't work\".\r\n-->\r\nTried to run `qtile-top` and it crashed. I'm not sure should be done before that script - no documentation.\r\nTried \r\n```\r\n arnas@zordmachine \ue0b0 ~ \ue0b0 qtile-cmd -o cmd -f tracemalloc_dump\r\n[False, 'Trace not started']\r\n arnas@zordmachine \ue0b0 ~ \ue0b0 qtile-cmd -o cmd -f tracemalloc_toggle\r\n arnas@zordmachine \ue0b0 ~ \ue0b0 qtile-cmd -o cmd -f tracemalloc_dump\r\n[True, '/home/arnas/.cache/qtile/qtile_tracemalloc.dump']\r\n```\r\nbut this didn't helped\r\n\r\n# Qtile version\r\n\r\n<!--\r\nPlease include the exact commit hash of the version of Qtile that failed.\r\n-->\r\n\r\n5bd8e57f78b72ed9de4d10bf51af6db51561d794 (latest git version)\r\n\r\n# Stack traces\r\n\r\n<!--\r\nPlease attach any stack traces found in:\r\n\r\n* `~/.xsession-errors`\r\n* `~/.local/share/qtile/qtile.log`\r\n-->\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/bin/qtile-top\", line 11, in <module>\r\n load_entry_point('qtile==0.15.0', 'console_scripts', 'qtile-top')()\r\n File \"/usr/lib/python3.8/site-packages/libqtile/scripts/qtile_top.py\", line 164, in main\r\n curses.wrapper(get_stats, client, limit=lines, seconds=seconds,\r\n File \"/usr/lib/python3.8/curses/__init__.py\", line 105, in wrapper\r\n return func(stdscr, *args, **kwds)\r\n File \"/usr/lib/python3.8/site-packages/libqtile/scripts/qtile_top.py\", line 91, in get_stats\r\n snapshot = get_trace(client, force_start)\r\n File \"/usr/lib/python3.8/site-packages/libqtile/scripts/qtile_top.py\", line 63, in get_trace\r\n (started, path) = client.tracemalloc_dump()\r\nAttributeError: 'Client' object has no attribute 'tracemalloc_dump'\r\n\r\n\r\n# Configuration\r\n\r\n<!--\r\nPlease include a link or attach your configuration to the issue.\r\n-->\r\nhttps://github.com/zordsdavini/qtile-config\r\nArch\nqtile-top crash 'Client' object has no attribute 'tracemalloc_dump'\n<!--\r\nPlease do not ask general questions here! There are [community\r\ncontact](https://github.com/qtile/qtile#community) options for that.\r\n-->\r\n\r\n# Issue description\r\n\r\n<!--\r\nA brief discussion of what failed and how it failed. A description of\r\nwhat you tried is helpful, i.e. \"When I use lazy.kill() on a window I get\r\nthe following stack trace\" instead of \"Closing windows doesn't work\".\r\n-->\r\nTried to run `qtile-top` and it crashed. I'm not sure should be done before that script - no documentation.\r\nTried \r\n```\r\n arnas@zordmachine \ue0b0 ~ \ue0b0 qtile-cmd -o cmd -f tracemalloc_dump\r\n[False, 'Trace not started']\r\n arnas@zordmachine \ue0b0 ~ \ue0b0 qtile-cmd -o cmd -f tracemalloc_toggle\r\n arnas@zordmachine \ue0b0 ~ \ue0b0 qtile-cmd -o cmd -f tracemalloc_dump\r\n[True, '/home/arnas/.cache/qtile/qtile_tracemalloc.dump']\r\n```\r\nbut this didn't helped\r\n\r\n# Qtile version\r\n\r\n<!--\r\nPlease include the exact commit hash of the version of Qtile that failed.\r\n-->\r\n\r\n5bd8e57f78b72ed9de4d10bf51af6db51561d794 (latest git version)\r\n\r\n# Stack traces\r\n\r\n<!--\r\nPlease attach any stack traces found in:\r\n\r\n* `~/.xsession-errors`\r\n* `~/.local/share/qtile/qtile.log`\r\n-->\r\n\r\nTraceback (most recent call last):\r\n File \"/usr/bin/qtile-top\", line 11, in <module>\r\n load_entry_point('qtile==0.15.0', 'console_scripts', 'qtile-top')()\r\n File \"/usr/lib/python3.8/site-packages/libqtile/scripts/qtile_top.py\", line 164, in main\r\n curses.wrapper(get_stats, client, limit=lines, seconds=seconds,\r\n File \"/usr/lib/python3.8/curses/__init__.py\", line 105, in wrapper\r\n return func(stdscr, *args, **kwds)\r\n File \"/usr/lib/python3.8/site-packages/libqtile/scripts/qtile_top.py\", line 91, in get_stats\r\n snapshot = get_trace(client, force_start)\r\n File \"/usr/lib/python3.8/site-packages/libqtile/scripts/qtile_top.py\", line 63, in get_trace\r\n (started, path) = client.tracemalloc_dump()\r\nAttributeError: 'Client' object has no attribute 'tracemalloc_dump'\r\n\r\n\r\n# Configuration\r\n\r\n<!--\r\nPlease include a link or attach your configuration to the issue.\r\n-->\r\nhttps://github.com/zordsdavini/qtile-config\r\nArch\n", "before_files": [{"content": "# Copyright (c) 2015, Roger Duran\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n Command-line top like for qtile\n\"\"\"\n\nimport argparse\nimport curses\nimport linecache\nimport os\nimport time\nimport tracemalloc\nfrom tracemalloc import Snapshot\n\nfrom libqtile import ipc\n\n\nclass TraceNotStarted(Exception):\n pass\n\n\nclass TraceCantStart(Exception):\n pass\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"Top like for qtile\")\n parser.add_argument('-l', '--lines', type=int, dest=\"lines\", default=10,\n help='Number of lines.')\n parser.add_argument('-r', '--raw', dest=\"raw\", action=\"store_true\",\n default=False, help='Output raw without curses')\n parser.add_argument('-t', '--time', type=float, dest=\"seconds\",\n default=1.5, help='Number of seconds to refresh')\n parser.add_argument('--force-start', dest=\"force_start\",\n action=\"store_true\", default=False,\n help='Force start tracemalloc on qtile')\n parser.add_argument('-s', '--socket', type=str, dest=\"socket\",\n help='Use specified communication socket.')\n\n opts = parser.parse_args()\n return opts\n\n\ndef get_trace(client, force_start):\n (started, path) = client.tracemalloc_dump()\n if force_start and not started:\n client.tracemalloc_toggle()\n (started, path) = client.tracemalloc_dump()\n if not started:\n raise TraceCantStart\n elif not started:\n raise TraceNotStarted\n\n return Snapshot.load(path)\n\n\ndef filter_snapshot(snapshot):\n return snapshot.filter_traces((\n tracemalloc.Filter(False, \"<frozen importlib._bootstrap>\"),\n tracemalloc.Filter(False, \"<unknown>\"),\n ))\n\n\ndef get_stats(scr, client, group_by='lineno', limit=10, seconds=1.5,\n force_start=False):\n (max_y, max_x) = scr.getmaxyx()\n curses.init_pair(1, curses.COLOR_GREEN, curses.COLOR_BLACK)\n while True:\n scr.addstr(0, 0, \"Qtile - Top {} lines\".format(limit))\n scr.addstr(1, 0, '{0:<3s} {1:<40s} {2:<30s} {3:<16s}'.format('#', 'Line', 'Memory', ' ' * (max_x - 71)),\n curses.A_BOLD | curses.A_REVERSE)\n\n snapshot = get_trace(client, force_start)\n snapshot = filter_snapshot(snapshot)\n top_stats = snapshot.statistics(group_by)\n cnt = 1\n for index, stat in enumerate(top_stats[:limit], 1):\n frame = stat.traceback[0]\n # replace \"/path/to/module/file.py\" with \"module/file.py\"\n filename = os.sep.join(frame.filename.split(os.sep)[-2:])\n code = \"\"\n line = linecache.getline(frame.filename, frame.lineno).strip()\n if line:\n code = line\n mem = \"{:.1f} KiB\".format(stat.size / 1024.0)\n filename = \"{}:{}\".format(filename, frame.lineno)\n scr.addstr(cnt + 1, 0, '{:<3} {:<40} {:<30}'.format(index, filename, mem))\n scr.addstr(cnt + 2, 4, code, curses.color_pair(1))\n cnt += 2\n\n other = top_stats[limit:]\n cnt += 2\n if other:\n size = sum(stat.size for stat in other)\n other_size = (\"{:d} other: {:.1f} KiB\".format(len(other), size / 1024.0))\n scr.addstr(cnt, 0, other_size, curses.A_BOLD)\n cnt += 1\n\n total = sum(stat.size for stat in top_stats)\n total_size = \"Total allocated size: {0:.1f} KiB\".format(total / 1024.0)\n scr.addstr(cnt, 0, total_size, curses.A_BOLD)\n\n scr.move(max_y - 2, max_y - 2)\n scr.refresh()\n time.sleep(seconds)\n scr.erase()\n\n\ndef raw_stats(client, group_by='lineno', limit=10, force_start=False):\n snapshot = get_trace(client, force_start)\n snapshot = filter_snapshot(snapshot)\n top_stats = snapshot.statistics(group_by)\n\n print(\"Qtile - Top {} lines\".format(limit))\n for index, stat in enumerate(top_stats[:limit], 1):\n frame = stat.traceback[0]\n # replace \"/path/to/module/file.py\" with \"module/file.py\"\n filename = os.sep.join(frame.filename.split(os.sep)[-2:])\n print(\"#{}: {}:{}: {:.1f} KiB\"\n .format(index, filename, frame.lineno, stat.size / 1024.0))\n line = linecache.getline(frame.filename, frame.lineno).strip()\n if line:\n print(' {}'.format(line))\n\n other = top_stats[limit:]\n if other:\n size = sum(stat.size for stat in other)\n print(\"{:d} other: {:.1f} KiB\".format(len(other), size / 1024.0))\n total = sum(stat.size for stat in top_stats)\n print(\"Total allocated size: {0:.1f} KiB\".format(total / 1024.0))\n\n\ndef main():\n opts = parse_args()\n lines = opts.lines\n seconds = opts.seconds\n force_start = opts.force_start\n if opts.socket is None:\n socket = ipc.find_sockfile()\n else:\n socket = opts.socket\n client = ipc.Client(socket)\n\n try:\n if not opts.raw:\n curses.wrapper(get_stats, client, limit=lines, seconds=seconds,\n force_start=force_start)\n else:\n raw_stats(client, limit=lines, force_start=force_start)\n except TraceNotStarted:\n print(\"tracemalloc not started on qtile, start by setting \"\n \"PYTHONTRACEMALLOC=1 before starting qtile\")\n print(\"or force start tracemalloc now, but you'll lose early traces\")\n exit(1)\n except TraceCantStart:\n print(\"Can't start tracemalloc on qtile, check the logs\")\n except KeyboardInterrupt:\n exit(-1)\n", "path": "libqtile/scripts/qtile_top.py"}], "after_files": [{"content": "# Copyright (c) 2015, Roger Duran\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n\"\"\"\n Command-line top like for qtile\n\"\"\"\n\nimport argparse\nimport curses\nimport linecache\nimport os\nimport time\nimport tracemalloc\nfrom tracemalloc import Snapshot\n\nfrom libqtile import command_client, command_interface, ipc\n\n\nclass TraceNotStarted(Exception):\n pass\n\n\nclass TraceCantStart(Exception):\n pass\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"Top like for qtile\")\n parser.add_argument('-l', '--lines', type=int, dest=\"lines\", default=10,\n help='Number of lines.')\n parser.add_argument('-r', '--raw', dest=\"raw\", action=\"store_true\",\n default=False, help='Output raw without curses')\n parser.add_argument('-t', '--time', type=float, dest=\"seconds\",\n default=1.5, help='Number of seconds to refresh')\n parser.add_argument('--force-start', dest=\"force_start\",\n action=\"store_true\", default=False,\n help='Force start tracemalloc on qtile')\n parser.add_argument('-s', '--socket', type=str, dest=\"socket\",\n help='Use specified communication socket.')\n\n opts = parser.parse_args()\n return opts\n\n\ndef get_trace(client, force_start):\n (started, path) = client.tracemalloc_dump()\n if force_start and not started:\n client.tracemalloc_toggle()\n (started, path) = client.tracemalloc_dump()\n if not started:\n raise TraceCantStart\n elif not started:\n raise TraceNotStarted\n\n return Snapshot.load(path)\n\n\ndef filter_snapshot(snapshot):\n return snapshot.filter_traces((\n tracemalloc.Filter(False, \"<frozen importlib._bootstrap>\"),\n tracemalloc.Filter(False, \"<unknown>\"),\n ))\n\n\ndef get_stats(scr, client, group_by='lineno', limit=10, seconds=1.5,\n force_start=False):\n (max_y, max_x) = scr.getmaxyx()\n curses.init_pair(1, curses.COLOR_GREEN, curses.COLOR_BLACK)\n while True:\n scr.addstr(0, 0, \"Qtile - Top {} lines\".format(limit))\n scr.addstr(1, 0, '{0:<3s} {1:<40s} {2:<30s} {3:<16s}'.format('#', 'Line', 'Memory', ' ' * (max_x - 71)),\n curses.A_BOLD | curses.A_REVERSE)\n\n snapshot = get_trace(client, force_start)\n snapshot = filter_snapshot(snapshot)\n top_stats = snapshot.statistics(group_by)\n cnt = 1\n for index, stat in enumerate(top_stats[:limit], 1):\n frame = stat.traceback[0]\n # replace \"/path/to/module/file.py\" with \"module/file.py\"\n filename = os.sep.join(frame.filename.split(os.sep)[-2:])\n code = \"\"\n line = linecache.getline(frame.filename, frame.lineno).strip()\n if line:\n code = line\n mem = \"{:.1f} KiB\".format(stat.size / 1024.0)\n filename = \"{}:{}\".format(filename, frame.lineno)\n scr.addstr(cnt + 1, 0, '{:<3} {:<40} {:<30}'.format(index, filename, mem))\n scr.addstr(cnt + 2, 4, code, curses.color_pair(1))\n cnt += 2\n\n other = top_stats[limit:]\n cnt += 2\n if other:\n size = sum(stat.size for stat in other)\n other_size = (\"{:d} other: {:.1f} KiB\".format(len(other), size / 1024.0))\n scr.addstr(cnt, 0, other_size, curses.A_BOLD)\n cnt += 1\n\n total = sum(stat.size for stat in top_stats)\n total_size = \"Total allocated size: {0:.1f} KiB\".format(total / 1024.0)\n scr.addstr(cnt, 0, total_size, curses.A_BOLD)\n\n scr.move(max_y - 2, max_y - 2)\n scr.refresh()\n time.sleep(seconds)\n scr.erase()\n\n\ndef raw_stats(client, group_by='lineno', limit=10, force_start=False):\n snapshot = get_trace(client, force_start)\n snapshot = filter_snapshot(snapshot)\n top_stats = snapshot.statistics(group_by)\n\n print(\"Qtile - Top {} lines\".format(limit))\n for index, stat in enumerate(top_stats[:limit], 1):\n frame = stat.traceback[0]\n # replace \"/path/to/module/file.py\" with \"module/file.py\"\n filename = os.sep.join(frame.filename.split(os.sep)[-2:])\n print(\"#{}: {}:{}: {:.1f} KiB\"\n .format(index, filename, frame.lineno, stat.size / 1024.0))\n line = linecache.getline(frame.filename, frame.lineno).strip()\n if line:\n print(' {}'.format(line))\n\n other = top_stats[limit:]\n if other:\n size = sum(stat.size for stat in other)\n print(\"{:d} other: {:.1f} KiB\".format(len(other), size / 1024.0))\n total = sum(stat.size for stat in top_stats)\n print(\"Total allocated size: {0:.1f} KiB\".format(total / 1024.0))\n\n\ndef main():\n opts = parse_args()\n lines = opts.lines\n seconds = opts.seconds\n force_start = opts.force_start\n if opts.socket is None:\n socket = ipc.find_sockfile()\n else:\n socket = opts.socket\n client = ipc.Client(socket)\n client = command_interface.IPCCommandInterface(client)\n client = command_client.InteractiveCommandClient(client)\n\n try:\n if not opts.raw:\n curses.wrapper(get_stats, client, limit=lines, seconds=seconds,\n force_start=force_start)\n else:\n raw_stats(client, limit=lines, force_start=force_start)\n except TraceNotStarted:\n print(\"tracemalloc not started on qtile, start by setting \"\n \"PYTHONTRACEMALLOC=1 before starting qtile\")\n print(\"or force start tracemalloc now, but you'll lose early traces\")\n exit(1)\n except TraceCantStart:\n print(\"Can't start tracemalloc on qtile, check the logs\")\n except KeyboardInterrupt:\n exit(-1)\n", "path": "libqtile/scripts/qtile_top.py"}]} | 3,590 | 162 |
gh_patches_debug_62336 | rasdani/github-patches | git_diff | googleapis__google-api-python-client-273 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error importing discovery from the client API
Hello, I'm getting a name exception when running a simple script. I've tried running the script in two ways:
```
from apiclient import discovery
import json
from pprint import pprint
from httplib2 import Http
from oauth2client.service_account import ServiceAccountCredentials
scopes = ['https://www.googleapis.com/auth/fusiontables']
credentials = ServiceAccountCredentials.from_json_keyfile_name(
'service-account.json', scopes)
ft = discovery.build('fusiontables', 'v2', credentials=credentials)
pprint(ft)
```
Which gives:
> Traceback (most recent call last):
> File "apiTest.py", line 1, in <module>
> from apiclient import discovery
> File "build/bdist.linux-x86_64/egg/apiclient/**init**.py", line 18, in <module>
> File "build/bdist.linux-x86_64/egg/googleapiclient/channel.py", line 64, in <module>
> ImportError: cannot import name util
And:
```
from apiclient.discovery import build
import json
from pprint import pprint
from httplib2 import Http
from oauth2client.service_account import ServiceAccountCredentials
scopes = ['https://www.googleapis.com/auth/fusiontables']
credentials = ServiceAccountCredentials.from_json_keyfile_name(
'service-account.json', scopes)
ft = discovery.build('fusiontables', 'v2', credentials=credentials)
pprint(ft)
#print response
```
Which yields the same exception:
> python apiTest.py
> Traceback (most recent call last):
> File "apiTest.py", line 1, in <module>
> from apiclient.discovery import build
> File "build/bdist.linux-x86_64/egg/apiclient/**init**.py", line 18, in <module>
> File "build/bdist.linux-x86_64/egg/googleapiclient/channel.py", line 64, in <module>
> ImportError: cannot import name util
I'm not sure if I'm missing a prereq or what, but I'm not able to find a way around this.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `googleapiclient/channel.py`
Content:
```
1 """Channel notifications support.
2
3 Classes and functions to support channel subscriptions and notifications
4 on those channels.
5
6 Notes:
7 - This code is based on experimental APIs and is subject to change.
8 - Notification does not do deduplication of notification ids, that's up to
9 the receiver.
10 - Storing the Channel between calls is up to the caller.
11
12
13 Example setting up a channel:
14
15 # Create a new channel that gets notifications via webhook.
16 channel = new_webhook_channel("https://example.com/my_web_hook")
17
18 # Store the channel, keyed by 'channel.id'. Store it before calling the
19 # watch method because notifications may start arriving before the watch
20 # method returns.
21 ...
22
23 resp = service.objects().watchAll(
24 bucket="some_bucket_id", body=channel.body()).execute()
25 channel.update(resp)
26
27 # Store the channel, keyed by 'channel.id'. Store it after being updated
28 # since the resource_id value will now be correct, and that's needed to
29 # stop a subscription.
30 ...
31
32
33 An example Webhook implementation using webapp2. Note that webapp2 puts
34 headers in a case insensitive dictionary, as headers aren't guaranteed to
35 always be upper case.
36
37 id = self.request.headers[X_GOOG_CHANNEL_ID]
38
39 # Retrieve the channel by id.
40 channel = ...
41
42 # Parse notification from the headers, including validating the id.
43 n = notification_from_headers(channel, self.request.headers)
44
45 # Do app specific stuff with the notification here.
46 if n.resource_state == 'sync':
47 # Code to handle sync state.
48 elif n.resource_state == 'exists':
49 # Code to handle the exists state.
50 elif n.resource_state == 'not_exists':
51 # Code to handle the not exists state.
52
53
54 Example of unsubscribing.
55
56 service.channels().stop(channel.body())
57 """
58 from __future__ import absolute_import
59
60 import datetime
61 import uuid
62
63 from googleapiclient import errors
64 from oauth2client import util
65 import six
66
67 # Oauth2client < 3 has the positional helper in 'util', >= 3 has it
68 # in '_helpers'.
69 try:
70 from oauth2client import util
71 except ImportError:
72 from oauth2client import _helpers as util
73
74
75 # The unix time epoch starts at midnight 1970.
76 EPOCH = datetime.datetime.utcfromtimestamp(0)
77
78 # Map the names of the parameters in the JSON channel description to
79 # the parameter names we use in the Channel class.
80 CHANNEL_PARAMS = {
81 'address': 'address',
82 'id': 'id',
83 'expiration': 'expiration',
84 'params': 'params',
85 'resourceId': 'resource_id',
86 'resourceUri': 'resource_uri',
87 'type': 'type',
88 'token': 'token',
89 }
90
91 X_GOOG_CHANNEL_ID = 'X-GOOG-CHANNEL-ID'
92 X_GOOG_MESSAGE_NUMBER = 'X-GOOG-MESSAGE-NUMBER'
93 X_GOOG_RESOURCE_STATE = 'X-GOOG-RESOURCE-STATE'
94 X_GOOG_RESOURCE_URI = 'X-GOOG-RESOURCE-URI'
95 X_GOOG_RESOURCE_ID = 'X-GOOG-RESOURCE-ID'
96
97
98 def _upper_header_keys(headers):
99 new_headers = {}
100 for k, v in six.iteritems(headers):
101 new_headers[k.upper()] = v
102 return new_headers
103
104
105 class Notification(object):
106 """A Notification from a Channel.
107
108 Notifications are not usually constructed directly, but are returned
109 from functions like notification_from_headers().
110
111 Attributes:
112 message_number: int, The unique id number of this notification.
113 state: str, The state of the resource being monitored.
114 uri: str, The address of the resource being monitored.
115 resource_id: str, The unique identifier of the version of the resource at
116 this event.
117 """
118 @util.positional(5)
119 def __init__(self, message_number, state, resource_uri, resource_id):
120 """Notification constructor.
121
122 Args:
123 message_number: int, The unique id number of this notification.
124 state: str, The state of the resource being monitored. Can be one
125 of "exists", "not_exists", or "sync".
126 resource_uri: str, The address of the resource being monitored.
127 resource_id: str, The identifier of the watched resource.
128 """
129 self.message_number = message_number
130 self.state = state
131 self.resource_uri = resource_uri
132 self.resource_id = resource_id
133
134
135 class Channel(object):
136 """A Channel for notifications.
137
138 Usually not constructed directly, instead it is returned from helper
139 functions like new_webhook_channel().
140
141 Attributes:
142 type: str, The type of delivery mechanism used by this channel. For
143 example, 'web_hook'.
144 id: str, A UUID for the channel.
145 token: str, An arbitrary string associated with the channel that
146 is delivered to the target address with each event delivered
147 over this channel.
148 address: str, The address of the receiving entity where events are
149 delivered. Specific to the channel type.
150 expiration: int, The time, in milliseconds from the epoch, when this
151 channel will expire.
152 params: dict, A dictionary of string to string, with additional parameters
153 controlling delivery channel behavior.
154 resource_id: str, An opaque id that identifies the resource that is
155 being watched. Stable across different API versions.
156 resource_uri: str, The canonicalized ID of the watched resource.
157 """
158
159 @util.positional(5)
160 def __init__(self, type, id, token, address, expiration=None,
161 params=None, resource_id="", resource_uri=""):
162 """Create a new Channel.
163
164 In user code, this Channel constructor will not typically be called
165 manually since there are functions for creating channels for each specific
166 type with a more customized set of arguments to pass.
167
168 Args:
169 type: str, The type of delivery mechanism used by this channel. For
170 example, 'web_hook'.
171 id: str, A UUID for the channel.
172 token: str, An arbitrary string associated with the channel that
173 is delivered to the target address with each event delivered
174 over this channel.
175 address: str, The address of the receiving entity where events are
176 delivered. Specific to the channel type.
177 expiration: int, The time, in milliseconds from the epoch, when this
178 channel will expire.
179 params: dict, A dictionary of string to string, with additional parameters
180 controlling delivery channel behavior.
181 resource_id: str, An opaque id that identifies the resource that is
182 being watched. Stable across different API versions.
183 resource_uri: str, The canonicalized ID of the watched resource.
184 """
185 self.type = type
186 self.id = id
187 self.token = token
188 self.address = address
189 self.expiration = expiration
190 self.params = params
191 self.resource_id = resource_id
192 self.resource_uri = resource_uri
193
194 def body(self):
195 """Build a body from the Channel.
196
197 Constructs a dictionary that's appropriate for passing into watch()
198 methods as the value of body argument.
199
200 Returns:
201 A dictionary representation of the channel.
202 """
203 result = {
204 'id': self.id,
205 'token': self.token,
206 'type': self.type,
207 'address': self.address
208 }
209 if self.params:
210 result['params'] = self.params
211 if self.resource_id:
212 result['resourceId'] = self.resource_id
213 if self.resource_uri:
214 result['resourceUri'] = self.resource_uri
215 if self.expiration:
216 result['expiration'] = self.expiration
217
218 return result
219
220 def update(self, resp):
221 """Update a channel with information from the response of watch().
222
223 When a request is sent to watch() a resource, the response returned
224 from the watch() request is a dictionary with updated channel information,
225 such as the resource_id, which is needed when stopping a subscription.
226
227 Args:
228 resp: dict, The response from a watch() method.
229 """
230 for json_name, param_name in six.iteritems(CHANNEL_PARAMS):
231 value = resp.get(json_name)
232 if value is not None:
233 setattr(self, param_name, value)
234
235
236 def notification_from_headers(channel, headers):
237 """Parse a notification from the webhook request headers, validate
238 the notification, and return a Notification object.
239
240 Args:
241 channel: Channel, The channel that the notification is associated with.
242 headers: dict, A dictionary like object that contains the request headers
243 from the webhook HTTP request.
244
245 Returns:
246 A Notification object.
247
248 Raises:
249 errors.InvalidNotificationError if the notification is invalid.
250 ValueError if the X-GOOG-MESSAGE-NUMBER can't be converted to an int.
251 """
252 headers = _upper_header_keys(headers)
253 channel_id = headers[X_GOOG_CHANNEL_ID]
254 if channel.id != channel_id:
255 raise errors.InvalidNotificationError(
256 'Channel id mismatch: %s != %s' % (channel.id, channel_id))
257 else:
258 message_number = int(headers[X_GOOG_MESSAGE_NUMBER])
259 state = headers[X_GOOG_RESOURCE_STATE]
260 resource_uri = headers[X_GOOG_RESOURCE_URI]
261 resource_id = headers[X_GOOG_RESOURCE_ID]
262 return Notification(message_number, state, resource_uri, resource_id)
263
264
265 @util.positional(2)
266 def new_webhook_channel(url, token=None, expiration=None, params=None):
267 """Create a new webhook Channel.
268
269 Args:
270 url: str, URL to post notifications to.
271 token: str, An arbitrary string associated with the channel that
272 is delivered to the target address with each notification delivered
273 over this channel.
274 expiration: datetime.datetime, A time in the future when the channel
275 should expire. Can also be None if the subscription should use the
276 default expiration. Note that different services may have different
277 limits on how long a subscription lasts. Check the response from the
278 watch() method to see the value the service has set for an expiration
279 time.
280 params: dict, Extra parameters to pass on channel creation. Currently
281 not used for webhook channels.
282 """
283 expiration_ms = 0
284 if expiration:
285 delta = expiration - EPOCH
286 expiration_ms = delta.microseconds/1000 + (
287 delta.seconds + delta.days*24*3600)*1000
288 if expiration_ms < 0:
289 expiration_ms = 0
290
291 return Channel('web_hook', str(uuid.uuid4()),
292 token, url, expiration=expiration_ms,
293 params=params)
294
295
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/googleapiclient/channel.py b/googleapiclient/channel.py
--- a/googleapiclient/channel.py
+++ b/googleapiclient/channel.py
@@ -61,7 +61,6 @@
import uuid
from googleapiclient import errors
-from oauth2client import util
import six
# Oauth2client < 3 has the positional helper in 'util', >= 3 has it
| {"golden_diff": "diff --git a/googleapiclient/channel.py b/googleapiclient/channel.py\n--- a/googleapiclient/channel.py\n+++ b/googleapiclient/channel.py\n@@ -61,7 +61,6 @@\n import uuid\n \n from googleapiclient import errors\n-from oauth2client import util\n import six\n \n # Oauth2client < 3 has the positional helper in 'util', >= 3 has it\n", "issue": "Error importing discovery from the client API\nHello, I'm getting a name exception when running a simple script. I've tried running the script in two ways:\n\n```\nfrom apiclient import discovery\nimport json\n\nfrom pprint import pprint\nfrom httplib2 import Http\n\nfrom oauth2client.service_account import ServiceAccountCredentials\n\nscopes = ['https://www.googleapis.com/auth/fusiontables']\n\ncredentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'service-account.json', scopes)\n\nft = discovery.build('fusiontables', 'v2', credentials=credentials)\npprint(ft)\n```\n\nWhich gives:\n\n> Traceback (most recent call last):\n> File \"apiTest.py\", line 1, in <module>\n> from apiclient import discovery\n> File \"build/bdist.linux-x86_64/egg/apiclient/**init**.py\", line 18, in <module>\n> File \"build/bdist.linux-x86_64/egg/googleapiclient/channel.py\", line 64, in <module>\n> ImportError: cannot import name util\n\nAnd:\n\n```\nfrom apiclient.discovery import build\nimport json\n\nfrom pprint import pprint\nfrom httplib2 import Http\n\nfrom oauth2client.service_account import ServiceAccountCredentials\n\nscopes = ['https://www.googleapis.com/auth/fusiontables']\n\ncredentials = ServiceAccountCredentials.from_json_keyfile_name(\n 'service-account.json', scopes)\n\nft = discovery.build('fusiontables', 'v2', credentials=credentials)\npprint(ft)\n#print response\n```\n\nWhich yields the same exception:\n\n> python apiTest.py \n> Traceback (most recent call last):\n> File \"apiTest.py\", line 1, in <module>\n> from apiclient.discovery import build\n> File \"build/bdist.linux-x86_64/egg/apiclient/**init**.py\", line 18, in <module>\n> File \"build/bdist.linux-x86_64/egg/googleapiclient/channel.py\", line 64, in <module>\n> ImportError: cannot import name util\n\nI'm not sure if I'm missing a prereq or what, but I'm not able to find a way around this.\n\n", "before_files": [{"content": "\"\"\"Channel notifications support.\n\nClasses and functions to support channel subscriptions and notifications\non those channels.\n\nNotes:\n - This code is based on experimental APIs and is subject to change.\n - Notification does not do deduplication of notification ids, that's up to\n the receiver.\n - Storing the Channel between calls is up to the caller.\n\n\nExample setting up a channel:\n\n # Create a new channel that gets notifications via webhook.\n channel = new_webhook_channel(\"https://example.com/my_web_hook\")\n\n # Store the channel, keyed by 'channel.id'. Store it before calling the\n # watch method because notifications may start arriving before the watch\n # method returns.\n ...\n\n resp = service.objects().watchAll(\n bucket=\"some_bucket_id\", body=channel.body()).execute()\n channel.update(resp)\n\n # Store the channel, keyed by 'channel.id'. Store it after being updated\n # since the resource_id value will now be correct, and that's needed to\n # stop a subscription.\n ...\n\n\nAn example Webhook implementation using webapp2. Note that webapp2 puts\nheaders in a case insensitive dictionary, as headers aren't guaranteed to\nalways be upper case.\n\n id = self.request.headers[X_GOOG_CHANNEL_ID]\n\n # Retrieve the channel by id.\n channel = ...\n\n # Parse notification from the headers, including validating the id.\n n = notification_from_headers(channel, self.request.headers)\n\n # Do app specific stuff with the notification here.\n if n.resource_state == 'sync':\n # Code to handle sync state.\n elif n.resource_state == 'exists':\n # Code to handle the exists state.\n elif n.resource_state == 'not_exists':\n # Code to handle the not exists state.\n\n\nExample of unsubscribing.\n\n service.channels().stop(channel.body())\n\"\"\"\nfrom __future__ import absolute_import\n\nimport datetime\nimport uuid\n\nfrom googleapiclient import errors\nfrom oauth2client import util\nimport six\n\n# Oauth2client < 3 has the positional helper in 'util', >= 3 has it\n# in '_helpers'.\ntry:\n from oauth2client import util\nexcept ImportError:\n from oauth2client import _helpers as util\n\n\n# The unix time epoch starts at midnight 1970.\nEPOCH = datetime.datetime.utcfromtimestamp(0)\n\n# Map the names of the parameters in the JSON channel description to\n# the parameter names we use in the Channel class.\nCHANNEL_PARAMS = {\n 'address': 'address',\n 'id': 'id',\n 'expiration': 'expiration',\n 'params': 'params',\n 'resourceId': 'resource_id',\n 'resourceUri': 'resource_uri',\n 'type': 'type',\n 'token': 'token',\n }\n\nX_GOOG_CHANNEL_ID = 'X-GOOG-CHANNEL-ID'\nX_GOOG_MESSAGE_NUMBER = 'X-GOOG-MESSAGE-NUMBER'\nX_GOOG_RESOURCE_STATE = 'X-GOOG-RESOURCE-STATE'\nX_GOOG_RESOURCE_URI = 'X-GOOG-RESOURCE-URI'\nX_GOOG_RESOURCE_ID = 'X-GOOG-RESOURCE-ID'\n\n\ndef _upper_header_keys(headers):\n new_headers = {}\n for k, v in six.iteritems(headers):\n new_headers[k.upper()] = v\n return new_headers\n\n\nclass Notification(object):\n \"\"\"A Notification from a Channel.\n\n Notifications are not usually constructed directly, but are returned\n from functions like notification_from_headers().\n\n Attributes:\n message_number: int, The unique id number of this notification.\n state: str, The state of the resource being monitored.\n uri: str, The address of the resource being monitored.\n resource_id: str, The unique identifier of the version of the resource at\n this event.\n \"\"\"\n @util.positional(5)\n def __init__(self, message_number, state, resource_uri, resource_id):\n \"\"\"Notification constructor.\n\n Args:\n message_number: int, The unique id number of this notification.\n state: str, The state of the resource being monitored. Can be one\n of \"exists\", \"not_exists\", or \"sync\".\n resource_uri: str, The address of the resource being monitored.\n resource_id: str, The identifier of the watched resource.\n \"\"\"\n self.message_number = message_number\n self.state = state\n self.resource_uri = resource_uri\n self.resource_id = resource_id\n\n\nclass Channel(object):\n \"\"\"A Channel for notifications.\n\n Usually not constructed directly, instead it is returned from helper\n functions like new_webhook_channel().\n\n Attributes:\n type: str, The type of delivery mechanism used by this channel. For\n example, 'web_hook'.\n id: str, A UUID for the channel.\n token: str, An arbitrary string associated with the channel that\n is delivered to the target address with each event delivered\n over this channel.\n address: str, The address of the receiving entity where events are\n delivered. Specific to the channel type.\n expiration: int, The time, in milliseconds from the epoch, when this\n channel will expire.\n params: dict, A dictionary of string to string, with additional parameters\n controlling delivery channel behavior.\n resource_id: str, An opaque id that identifies the resource that is\n being watched. Stable across different API versions.\n resource_uri: str, The canonicalized ID of the watched resource.\n \"\"\"\n\n @util.positional(5)\n def __init__(self, type, id, token, address, expiration=None,\n params=None, resource_id=\"\", resource_uri=\"\"):\n \"\"\"Create a new Channel.\n\n In user code, this Channel constructor will not typically be called\n manually since there are functions for creating channels for each specific\n type with a more customized set of arguments to pass.\n\n Args:\n type: str, The type of delivery mechanism used by this channel. For\n example, 'web_hook'.\n id: str, A UUID for the channel.\n token: str, An arbitrary string associated with the channel that\n is delivered to the target address with each event delivered\n over this channel.\n address: str, The address of the receiving entity where events are\n delivered. Specific to the channel type.\n expiration: int, The time, in milliseconds from the epoch, when this\n channel will expire.\n params: dict, A dictionary of string to string, with additional parameters\n controlling delivery channel behavior.\n resource_id: str, An opaque id that identifies the resource that is\n being watched. Stable across different API versions.\n resource_uri: str, The canonicalized ID of the watched resource.\n \"\"\"\n self.type = type\n self.id = id\n self.token = token\n self.address = address\n self.expiration = expiration\n self.params = params\n self.resource_id = resource_id\n self.resource_uri = resource_uri\n\n def body(self):\n \"\"\"Build a body from the Channel.\n\n Constructs a dictionary that's appropriate for passing into watch()\n methods as the value of body argument.\n\n Returns:\n A dictionary representation of the channel.\n \"\"\"\n result = {\n 'id': self.id,\n 'token': self.token,\n 'type': self.type,\n 'address': self.address\n }\n if self.params:\n result['params'] = self.params\n if self.resource_id:\n result['resourceId'] = self.resource_id\n if self.resource_uri:\n result['resourceUri'] = self.resource_uri\n if self.expiration:\n result['expiration'] = self.expiration\n\n return result\n\n def update(self, resp):\n \"\"\"Update a channel with information from the response of watch().\n\n When a request is sent to watch() a resource, the response returned\n from the watch() request is a dictionary with updated channel information,\n such as the resource_id, which is needed when stopping a subscription.\n\n Args:\n resp: dict, The response from a watch() method.\n \"\"\"\n for json_name, param_name in six.iteritems(CHANNEL_PARAMS):\n value = resp.get(json_name)\n if value is not None:\n setattr(self, param_name, value)\n\n\ndef notification_from_headers(channel, headers):\n \"\"\"Parse a notification from the webhook request headers, validate\n the notification, and return a Notification object.\n\n Args:\n channel: Channel, The channel that the notification is associated with.\n headers: dict, A dictionary like object that contains the request headers\n from the webhook HTTP request.\n\n Returns:\n A Notification object.\n\n Raises:\n errors.InvalidNotificationError if the notification is invalid.\n ValueError if the X-GOOG-MESSAGE-NUMBER can't be converted to an int.\n \"\"\"\n headers = _upper_header_keys(headers)\n channel_id = headers[X_GOOG_CHANNEL_ID]\n if channel.id != channel_id:\n raise errors.InvalidNotificationError(\n 'Channel id mismatch: %s != %s' % (channel.id, channel_id))\n else:\n message_number = int(headers[X_GOOG_MESSAGE_NUMBER])\n state = headers[X_GOOG_RESOURCE_STATE]\n resource_uri = headers[X_GOOG_RESOURCE_URI]\n resource_id = headers[X_GOOG_RESOURCE_ID]\n return Notification(message_number, state, resource_uri, resource_id)\n\n\[email protected](2)\ndef new_webhook_channel(url, token=None, expiration=None, params=None):\n \"\"\"Create a new webhook Channel.\n\n Args:\n url: str, URL to post notifications to.\n token: str, An arbitrary string associated with the channel that\n is delivered to the target address with each notification delivered\n over this channel.\n expiration: datetime.datetime, A time in the future when the channel\n should expire. Can also be None if the subscription should use the\n default expiration. Note that different services may have different\n limits on how long a subscription lasts. Check the response from the\n watch() method to see the value the service has set for an expiration\n time.\n params: dict, Extra parameters to pass on channel creation. Currently\n not used for webhook channels.\n \"\"\"\n expiration_ms = 0\n if expiration:\n delta = expiration - EPOCH\n expiration_ms = delta.microseconds/1000 + (\n delta.seconds + delta.days*24*3600)*1000\n if expiration_ms < 0:\n expiration_ms = 0\n\n return Channel('web_hook', str(uuid.uuid4()),\n token, url, expiration=expiration_ms,\n params=params)\n\n", "path": "googleapiclient/channel.py"}], "after_files": [{"content": "\"\"\"Channel notifications support.\n\nClasses and functions to support channel subscriptions and notifications\non those channels.\n\nNotes:\n - This code is based on experimental APIs and is subject to change.\n - Notification does not do deduplication of notification ids, that's up to\n the receiver.\n - Storing the Channel between calls is up to the caller.\n\n\nExample setting up a channel:\n\n # Create a new channel that gets notifications via webhook.\n channel = new_webhook_channel(\"https://example.com/my_web_hook\")\n\n # Store the channel, keyed by 'channel.id'. Store it before calling the\n # watch method because notifications may start arriving before the watch\n # method returns.\n ...\n\n resp = service.objects().watchAll(\n bucket=\"some_bucket_id\", body=channel.body()).execute()\n channel.update(resp)\n\n # Store the channel, keyed by 'channel.id'. Store it after being updated\n # since the resource_id value will now be correct, and that's needed to\n # stop a subscription.\n ...\n\n\nAn example Webhook implementation using webapp2. Note that webapp2 puts\nheaders in a case insensitive dictionary, as headers aren't guaranteed to\nalways be upper case.\n\n id = self.request.headers[X_GOOG_CHANNEL_ID]\n\n # Retrieve the channel by id.\n channel = ...\n\n # Parse notification from the headers, including validating the id.\n n = notification_from_headers(channel, self.request.headers)\n\n # Do app specific stuff with the notification here.\n if n.resource_state == 'sync':\n # Code to handle sync state.\n elif n.resource_state == 'exists':\n # Code to handle the exists state.\n elif n.resource_state == 'not_exists':\n # Code to handle the not exists state.\n\n\nExample of unsubscribing.\n\n service.channels().stop(channel.body())\n\"\"\"\nfrom __future__ import absolute_import\n\nimport datetime\nimport uuid\n\nfrom googleapiclient import errors\nimport six\n\n# Oauth2client < 3 has the positional helper in 'util', >= 3 has it\n# in '_helpers'.\ntry:\n from oauth2client import util\nexcept ImportError:\n from oauth2client import _helpers as util\n\n\n# The unix time epoch starts at midnight 1970.\nEPOCH = datetime.datetime.utcfromtimestamp(0)\n\n# Map the names of the parameters in the JSON channel description to\n# the parameter names we use in the Channel class.\nCHANNEL_PARAMS = {\n 'address': 'address',\n 'id': 'id',\n 'expiration': 'expiration',\n 'params': 'params',\n 'resourceId': 'resource_id',\n 'resourceUri': 'resource_uri',\n 'type': 'type',\n 'token': 'token',\n }\n\nX_GOOG_CHANNEL_ID = 'X-GOOG-CHANNEL-ID'\nX_GOOG_MESSAGE_NUMBER = 'X-GOOG-MESSAGE-NUMBER'\nX_GOOG_RESOURCE_STATE = 'X-GOOG-RESOURCE-STATE'\nX_GOOG_RESOURCE_URI = 'X-GOOG-RESOURCE-URI'\nX_GOOG_RESOURCE_ID = 'X-GOOG-RESOURCE-ID'\n\n\ndef _upper_header_keys(headers):\n new_headers = {}\n for k, v in six.iteritems(headers):\n new_headers[k.upper()] = v\n return new_headers\n\n\nclass Notification(object):\n \"\"\"A Notification from a Channel.\n\n Notifications are not usually constructed directly, but are returned\n from functions like notification_from_headers().\n\n Attributes:\n message_number: int, The unique id number of this notification.\n state: str, The state of the resource being monitored.\n uri: str, The address of the resource being monitored.\n resource_id: str, The unique identifier of the version of the resource at\n this event.\n \"\"\"\n @util.positional(5)\n def __init__(self, message_number, state, resource_uri, resource_id):\n \"\"\"Notification constructor.\n\n Args:\n message_number: int, The unique id number of this notification.\n state: str, The state of the resource being monitored. Can be one\n of \"exists\", \"not_exists\", or \"sync\".\n resource_uri: str, The address of the resource being monitored.\n resource_id: str, The identifier of the watched resource.\n \"\"\"\n self.message_number = message_number\n self.state = state\n self.resource_uri = resource_uri\n self.resource_id = resource_id\n\n\nclass Channel(object):\n \"\"\"A Channel for notifications.\n\n Usually not constructed directly, instead it is returned from helper\n functions like new_webhook_channel().\n\n Attributes:\n type: str, The type of delivery mechanism used by this channel. For\n example, 'web_hook'.\n id: str, A UUID for the channel.\n token: str, An arbitrary string associated with the channel that\n is delivered to the target address with each event delivered\n over this channel.\n address: str, The address of the receiving entity where events are\n delivered. Specific to the channel type.\n expiration: int, The time, in milliseconds from the epoch, when this\n channel will expire.\n params: dict, A dictionary of string to string, with additional parameters\n controlling delivery channel behavior.\n resource_id: str, An opaque id that identifies the resource that is\n being watched. Stable across different API versions.\n resource_uri: str, The canonicalized ID of the watched resource.\n \"\"\"\n\n @util.positional(5)\n def __init__(self, type, id, token, address, expiration=None,\n params=None, resource_id=\"\", resource_uri=\"\"):\n \"\"\"Create a new Channel.\n\n In user code, this Channel constructor will not typically be called\n manually since there are functions for creating channels for each specific\n type with a more customized set of arguments to pass.\n\n Args:\n type: str, The type of delivery mechanism used by this channel. For\n example, 'web_hook'.\n id: str, A UUID for the channel.\n token: str, An arbitrary string associated with the channel that\n is delivered to the target address with each event delivered\n over this channel.\n address: str, The address of the receiving entity where events are\n delivered. Specific to the channel type.\n expiration: int, The time, in milliseconds from the epoch, when this\n channel will expire.\n params: dict, A dictionary of string to string, with additional parameters\n controlling delivery channel behavior.\n resource_id: str, An opaque id that identifies the resource that is\n being watched. Stable across different API versions.\n resource_uri: str, The canonicalized ID of the watched resource.\n \"\"\"\n self.type = type\n self.id = id\n self.token = token\n self.address = address\n self.expiration = expiration\n self.params = params\n self.resource_id = resource_id\n self.resource_uri = resource_uri\n\n def body(self):\n \"\"\"Build a body from the Channel.\n\n Constructs a dictionary that's appropriate for passing into watch()\n methods as the value of body argument.\n\n Returns:\n A dictionary representation of the channel.\n \"\"\"\n result = {\n 'id': self.id,\n 'token': self.token,\n 'type': self.type,\n 'address': self.address\n }\n if self.params:\n result['params'] = self.params\n if self.resource_id:\n result['resourceId'] = self.resource_id\n if self.resource_uri:\n result['resourceUri'] = self.resource_uri\n if self.expiration:\n result['expiration'] = self.expiration\n\n return result\n\n def update(self, resp):\n \"\"\"Update a channel with information from the response of watch().\n\n When a request is sent to watch() a resource, the response returned\n from the watch() request is a dictionary with updated channel information,\n such as the resource_id, which is needed when stopping a subscription.\n\n Args:\n resp: dict, The response from a watch() method.\n \"\"\"\n for json_name, param_name in six.iteritems(CHANNEL_PARAMS):\n value = resp.get(json_name)\n if value is not None:\n setattr(self, param_name, value)\n\n\ndef notification_from_headers(channel, headers):\n \"\"\"Parse a notification from the webhook request headers, validate\n the notification, and return a Notification object.\n\n Args:\n channel: Channel, The channel that the notification is associated with.\n headers: dict, A dictionary like object that contains the request headers\n from the webhook HTTP request.\n\n Returns:\n A Notification object.\n\n Raises:\n errors.InvalidNotificationError if the notification is invalid.\n ValueError if the X-GOOG-MESSAGE-NUMBER can't be converted to an int.\n \"\"\"\n headers = _upper_header_keys(headers)\n channel_id = headers[X_GOOG_CHANNEL_ID]\n if channel.id != channel_id:\n raise errors.InvalidNotificationError(\n 'Channel id mismatch: %s != %s' % (channel.id, channel_id))\n else:\n message_number = int(headers[X_GOOG_MESSAGE_NUMBER])\n state = headers[X_GOOG_RESOURCE_STATE]\n resource_uri = headers[X_GOOG_RESOURCE_URI]\n resource_id = headers[X_GOOG_RESOURCE_ID]\n return Notification(message_number, state, resource_uri, resource_id)\n\n\[email protected](2)\ndef new_webhook_channel(url, token=None, expiration=None, params=None):\n \"\"\"Create a new webhook Channel.\n\n Args:\n url: str, URL to post notifications to.\n token: str, An arbitrary string associated with the channel that\n is delivered to the target address with each notification delivered\n over this channel.\n expiration: datetime.datetime, A time in the future when the channel\n should expire. Can also be None if the subscription should use the\n default expiration. Note that different services may have different\n limits on how long a subscription lasts. Check the response from the\n watch() method to see the value the service has set for an expiration\n time.\n params: dict, Extra parameters to pass on channel creation. Currently\n not used for webhook channels.\n \"\"\"\n expiration_ms = 0\n if expiration:\n delta = expiration - EPOCH\n expiration_ms = delta.microseconds/1000 + (\n delta.seconds + delta.days*24*3600)*1000\n if expiration_ms < 0:\n expiration_ms = 0\n\n return Channel('web_hook', str(uuid.uuid4()),\n token, url, expiration=expiration_ms,\n params=params)\n\n", "path": "googleapiclient/channel.py"}]} | 3,807 | 91 |
gh_patches_debug_24411 | rasdani/github-patches | git_diff | python__python-docs-es-40 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Mejorar la guía de CONTRIBUTING
Tenemos una pequeña guía que explica el procedimiento. Sin embargo, estaría bueno mejorarla un poco para que sea más fácil de seguir para persona que no sepan mucho de github y demás herramientas: https://github.com/raulcd/python-docs-es/blob/3.7/CONTRIBUTING.rst
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conf.py`
Content:
```
1 # Sphinx configuration file.
2 #
3 # - import original configurations from cpython/Doc/conf.py
4 # - append the path considering the cpython submodule is at ./cpython
5 # - create the symbolic links under ./cpython/locale/es/LC_MESSAGES
6 # - make the build to work under Read the Docs
7 #
8 # The git submodule was created using this Stack Overflow answer
9 # to fetch only the commit that I needed and avoid clonning the whole history
10 # https://stackoverflow.com/a/27445058
11 #
12 # This can be built locally using `sphinx-build` by running
13 #
14 # $ sphinx-build -b html -n -d _build/doctrees -D language=es . _build/html
15
16 import sys, os, time
17 sys.path.append(os.path.abspath('cpython/Doc/tools/extensions'))
18 sys.path.append(os.path.abspath('cpython/Doc/includes'))
19
20 # Import all the Sphinx settings from cpython
21 sys.path.append(os.path.abspath('cpython/Doc'))
22 from conf import *
23
24 version = '3.7'
25 release = '3.7.7'
26
27 project = 'Python en Español'
28 copyright = '2001-%s, Python Software Foundation' % time.strftime('%Y')
29
30 html_theme_path = ['cpython/Doc/tools']
31 templates_path = ['cpython/Doc/tools/templates']
32 html_static_path = ['cpython/Doc/tools/static']
33
34 os.system('mkdir -p cpython/locales/es/')
35 os.system('ln -nfs `pwd` cpython/locales/es/LC_MESSAGES')
36
37 gettext_compact = False
38 locale_dirs = ['../locales', 'cpython/locales'] # relative to the sourcedir
39
40 def setup(app):
41 # Change the sourcedir programmatically because Read the Docs always call it with `.`
42 app.srcdir = 'cpython/Doc'
43
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conf.py b/conf.py
--- a/conf.py
+++ b/conf.py
@@ -33,10 +33,34 @@
os.system('mkdir -p cpython/locales/es/')
os.system('ln -nfs `pwd` cpython/locales/es/LC_MESSAGES')
+os.system('ln -nfs `pwd`/CONTRIBUTING.rst cpython/Doc/CONTRIBUTING.rst')
gettext_compact = False
locale_dirs = ['../locales', 'cpython/locales'] # relative to the sourcedir
def setup(app):
+
+ def add_contributing_banner(app, doctree):
+ """
+ Insert a banner at the top of the index.
+
+ This way, we can easily communicate people to help with the translation,
+ pointing them to different resources.
+ """
+ from docutils import nodes, core
+
+ message = '¡Ayúdanos a traducir la documentación oficial de Python al Español! ' \
+ f'Puedes encontrar más información en `Como contribuir </es/{version}/CONTRIBUTING.html>`_ ' \
+ 'y así ayudarnos a acercar Python a más personas de habla hispana.'
+
+ paragraph = core.publish_doctree(message)[0]
+ banner = nodes.warning(ids=['contributing-banner'])
+ banner.append(paragraph)
+
+ for document in doctree.traverse(nodes.document):
+ document.insert(0, banner)
+
# Change the sourcedir programmatically because Read the Docs always call it with `.`
app.srcdir = 'cpython/Doc'
+
+ app.connect('doctree-read', add_contributing_banner)
| {"golden_diff": "diff --git a/conf.py b/conf.py\n--- a/conf.py\n+++ b/conf.py\n@@ -33,10 +33,34 @@\n \n os.system('mkdir -p cpython/locales/es/')\n os.system('ln -nfs `pwd` cpython/locales/es/LC_MESSAGES')\n+os.system('ln -nfs `pwd`/CONTRIBUTING.rst cpython/Doc/CONTRIBUTING.rst')\n \n gettext_compact = False\n locale_dirs = ['../locales', 'cpython/locales'] # relative to the sourcedir\n \n def setup(app):\n+\n+ def add_contributing_banner(app, doctree):\n+ \"\"\"\n+ Insert a banner at the top of the index.\n+\n+ This way, we can easily communicate people to help with the translation,\n+ pointing them to different resources.\n+ \"\"\"\n+ from docutils import nodes, core\n+\n+ message = '\u00a1Ay\u00fadanos a traducir la documentaci\u00f3n oficial de Python al Espa\u00f1ol! ' \\\n+ f'Puedes encontrar m\u00e1s informaci\u00f3n en `Como contribuir </es/{version}/CONTRIBUTING.html>`_ ' \\\n+ 'y as\u00ed ayudarnos a acercar Python a m\u00e1s personas de habla hispana.'\n+\n+ paragraph = core.publish_doctree(message)[0]\n+ banner = nodes.warning(ids=['contributing-banner'])\n+ banner.append(paragraph)\n+\n+ for document in doctree.traverse(nodes.document):\n+ document.insert(0, banner)\n+\n # Change the sourcedir programmatically because Read the Docs always call it with `.`\n app.srcdir = 'cpython/Doc'\n+\n+ app.connect('doctree-read', add_contributing_banner)\n", "issue": "Mejorar la gu\u00eda de CONTRIBUTING\nTenemos una peque\u00f1a gu\u00eda que explica el procedimiento. Sin embargo, estar\u00eda bueno mejorarla un poco para que sea m\u00e1s f\u00e1cil de seguir para persona que no sepan mucho de github y dem\u00e1s herramientas: https://github.com/raulcd/python-docs-es/blob/3.7/CONTRIBUTING.rst\n", "before_files": [{"content": "# Sphinx configuration file.\n#\n# - import original configurations from cpython/Doc/conf.py\n# - append the path considering the cpython submodule is at ./cpython\n# - create the symbolic links under ./cpython/locale/es/LC_MESSAGES\n# - make the build to work under Read the Docs\n#\n# The git submodule was created using this Stack Overflow answer\n# to fetch only the commit that I needed and avoid clonning the whole history\n# https://stackoverflow.com/a/27445058\n#\n# This can be built locally using `sphinx-build` by running\n#\n# $ sphinx-build -b html -n -d _build/doctrees -D language=es . _build/html\n\nimport sys, os, time\nsys.path.append(os.path.abspath('cpython/Doc/tools/extensions'))\nsys.path.append(os.path.abspath('cpython/Doc/includes'))\n\n# Import all the Sphinx settings from cpython\nsys.path.append(os.path.abspath('cpython/Doc'))\nfrom conf import *\n\nversion = '3.7'\nrelease = '3.7.7'\n\nproject = 'Python en Espa\u00f1ol'\ncopyright = '2001-%s, Python Software Foundation' % time.strftime('%Y')\n\nhtml_theme_path = ['cpython/Doc/tools']\ntemplates_path = ['cpython/Doc/tools/templates']\nhtml_static_path = ['cpython/Doc/tools/static']\n\nos.system('mkdir -p cpython/locales/es/')\nos.system('ln -nfs `pwd` cpython/locales/es/LC_MESSAGES')\n\ngettext_compact = False\nlocale_dirs = ['../locales', 'cpython/locales'] # relative to the sourcedir\n\ndef setup(app):\n # Change the sourcedir programmatically because Read the Docs always call it with `.`\n app.srcdir = 'cpython/Doc'\n", "path": "conf.py"}], "after_files": [{"content": "# Sphinx configuration file.\n#\n# - import original configurations from cpython/Doc/conf.py\n# - append the path considering the cpython submodule is at ./cpython\n# - create the symbolic links under ./cpython/locale/es/LC_MESSAGES\n# - make the build to work under Read the Docs\n#\n# The git submodule was created using this Stack Overflow answer\n# to fetch only the commit that I needed and avoid clonning the whole history\n# https://stackoverflow.com/a/27445058\n#\n# This can be built locally using `sphinx-build` by running\n#\n# $ sphinx-build -b html -n -d _build/doctrees -D language=es . _build/html\n\nimport sys, os, time\nsys.path.append(os.path.abspath('cpython/Doc/tools/extensions'))\nsys.path.append(os.path.abspath('cpython/Doc/includes'))\n\n# Import all the Sphinx settings from cpython\nsys.path.append(os.path.abspath('cpython/Doc'))\nfrom conf import *\n\nversion = '3.7'\nrelease = '3.7.7'\n\nproject = 'Python en Espa\u00f1ol'\ncopyright = '2001-%s, Python Software Foundation' % time.strftime('%Y')\n\nhtml_theme_path = ['cpython/Doc/tools']\ntemplates_path = ['cpython/Doc/tools/templates']\nhtml_static_path = ['cpython/Doc/tools/static']\n\nos.system('mkdir -p cpython/locales/es/')\nos.system('ln -nfs `pwd` cpython/locales/es/LC_MESSAGES')\nos.system('ln -nfs `pwd`/CONTRIBUTING.rst cpython/Doc/CONTRIBUTING.rst')\n\ngettext_compact = False\nlocale_dirs = ['../locales', 'cpython/locales'] # relative to the sourcedir\n\ndef setup(app):\n\n def add_contributing_banner(app, doctree):\n \"\"\"\n Insert a banner at the top of the index.\n\n This way, we can easily communicate people to help with the translation,\n pointing them to different resources.\n \"\"\"\n from docutils import nodes, core\n\n message = '\u00a1Ay\u00fadanos a traducir la documentaci\u00f3n oficial de Python al Espa\u00f1ol! ' \\\n f'Puedes encontrar m\u00e1s informaci\u00f3n en `Como contribuir </es/{version}/CONTRIBUTING.html>`_ ' \\\n 'y as\u00ed ayudarnos a acercar Python a m\u00e1s personas de habla hispana.'\n\n paragraph = core.publish_doctree(message)[0]\n banner = nodes.warning(ids=['contributing-banner'])\n banner.append(paragraph)\n\n for document in doctree.traverse(nodes.document):\n document.insert(0, banner)\n\n # Change the sourcedir programmatically because Read the Docs always call it with `.`\n app.srcdir = 'cpython/Doc'\n\n app.connect('doctree-read', add_contributing_banner)\n", "path": "conf.py"}]} | 808 | 376 |
gh_patches_debug_18972 | rasdani/github-patches | git_diff | holoviz__panel-343 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
visual changes to subway map
With the current subway map its not apparent where we are on the map. Suggestions to improve the visual effect include making the horizontal line thinner and/or the 'gray' location indicator to something more obvious like red or blue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `panel/pipeline.py`
Content:
```
1 from __future__ import absolute_import, division, unicode_literals
2
3 import os
4
5 import param
6 import numpy as np
7
8 from .layout import Row, Column, HSpacer, VSpacer
9 from .pane import HoloViews, Markdown, Pane
10 from .param import Param
11 from .util import param_reprs
12
13
14 class Pipeline(param.Parameterized):
15 """
16 Allows connecting a linear series of panels to define a workflow.
17 Each stage in a pipeline should declare a panel method which
18 returns a panel object that can be displayed and annotate its
19 outputs using the param.output decorator.
20 """
21
22 debug = param.Boolean(default=False, precedence=-1, doc="""
23 Whether to raise errors, useful for debugging while building an application.""")
24
25 inherit_params = param.Boolean(default=True, precedence=-1, doc="""
26 Whether parameters should be inherited between pipeline stages""")
27
28 next = param.Action(default=lambda x: x.param.trigger('next'))
29
30 previous = param.Action(default=lambda x: x.param.trigger('previous'))
31
32 def __init__(self, stages=[], **params):
33 try:
34 import holoviews as hv
35 except:
36 raise ImportError('Pipeline requires holoviews to be installed')
37
38 self._stages = list(stages)
39 self._stage = 0
40 super(Pipeline, self).__init__(**params)
41 self._error = Markdown('')
42 self._states = []
43 self._state = None
44 self._progress_sel = hv.streams.Selection1D()
45 self._progress_sel.add_subscriber(self._set_stage)
46 prev_button = Param(self.param, parameters=['previous'], show_name=False)
47 next_button = Param(self.param, parameters=['next'], show_name=False)
48 prev_button.layout[0].disabled = True
49 self._progress_bar = Row(self._make_progress, prev_button, next_button)
50 spinner = Pane(os.path.join(os.path.dirname(__file__), 'assets', 'spinner.gif'))
51 self._spinner_layout = Row(HSpacer(), Column(VSpacer(), spinner, VSpacer()), HSpacer())
52 stage_layout = Row()
53 if len(stages):
54 stage_layout.append(self._init_stage())
55 self._layout = Column(self._progress_bar, self._error, stage_layout)
56
57 def add_stage(self, name, stage):
58 self._validate(stage)
59 self._stages.append((name, stage))
60 if len(self._stages) == 1:
61 self._layout[2].append(self._init_stage())
62
63 def _validate(self, stage):
64 if any(stage is s for n, s in self._stages):
65 raise ValueError('Stage %s is already in pipeline' % stage)
66 elif not ((isinstance(stage, type) and issubclass(stage, param.Parameterized))
67 or isinstance(stage, param.Parameterized)):
68 raise ValueError('Pipeline stages must be Parameterized classes or instances.')
69
70 def __repr__(self):
71 repr_str = 'Pipeline:'
72 for i, (name, stage) in enumerate(self._stages):
73 if isinstance(stage, param.Parameterized):
74 cls_name = type(stage).__name__
75 else:
76 cls_name = stage.__name__
77 params = ', '.join(param_reprs(stage))
78 repr_str += '\n [%d] %s: %s(%s)' % (i, name, cls_name, params)
79 return repr_str
80
81 def __getitem__(self, index):
82 return self._stages[index][1]
83
84 @property
85 def layout(self):
86 self._progress_bar[0] = self._make_progress
87 return self._layout
88
89
90 def _init_stage(self):
91 name, stage = self._stages[self._stage]
92 kwargs = {}
93 if self._state:
94 results = {}
95 for name, (_, method, index) in self._state.param.outputs().items():
96 if name not in stage.param:
97 continue
98 if method not in results:
99 results[method] = method()
100 result = results[method]
101 if index is not None:
102 result = result[index]
103 kwargs[name] = result
104 if self.inherit_params:
105 params = [k for k, v in self._state.param.objects('existing').items()
106 if v.precedence is None or v.precedence >= 0]
107 kwargs.update({k: v for k, v in self._state.param.get_param_values()
108 if k in stage.param and k != 'name' and k in params})
109
110 if isinstance(stage, param.Parameterized):
111 stage.set_param(**kwargs)
112 self._state = stage
113 else:
114 self._state = stage(**kwargs)
115 if len(self._states) <= self._stage:
116 self._states.append(self._state)
117 else:
118 self._states[self._stage] = self._state
119 return self._state.panel()
120
121 def _set_stage(self, index):
122 idx = index[0]
123 steps = idx-self._stage
124 if steps < 0:
125 for i in range(abs(steps)):
126 e = self._previous()
127 if e:
128 break
129 else:
130 for i in range(steps):
131 e = self._next()
132 if e:
133 break
134
135 def _update_button(self):
136 # Disable previous button
137 if self._stage == 0:
138 self._progress_bar[1].layout[0].disabled = True
139 else:
140 self._progress_bar[1].layout[0].disabled = False
141
142 # Disable next button
143 if self._stage == len(self._stages)-1:
144 self._progress_bar[2].layout[0].disabled = True
145 else:
146 self._progress_bar[2].layout[0].disabled = False
147
148 @param.depends('next', watch=True)
149 def _next(self):
150 self._stage += 1
151 prev_state = self._layout[2][0]
152 self._layout[2][0] = self._spinner_layout
153 try:
154 new_stage = self._init_stage()
155 self._layout[2][0] = new_stage
156 self._update_button()
157 except Exception as e:
158 self._stage -= 1
159 self._error.object = ('Next stage raised following error:\n\n\t%s: %s'
160 % (type(e).__name__, str(e)))
161 self._layout[2][0] = prev_state
162 if self.debug:
163 raise e
164 return e
165 else:
166 self._error.object = ''
167
168 @param.depends('previous', watch=True)
169 def _previous(self):
170 self._stage -= 1
171 try:
172 self._state = self._states[self._stage]
173 self._layout[2][0] = self._state.panel()
174 self._update_button()
175 except Exception as e:
176 self._stage += 1
177 self._error.object = ('Previous stage raised following error:\n\n\t%s: %s'
178 % (type(e).__name__, str(e)))
179 if self.debug:
180 raise e
181 else:
182 self._error.object = ''
183
184 @param.depends('previous', 'next')
185 def _make_progress(self):
186 import holoviews as hv
187 import holoviews.plotting.bokeh # noqa
188 stages = len(self._stages)
189 line = hv.Path([[(0, 0), (stages-1, 0)]]).options(
190 line_width=10, color='black', backend='bokeh'
191 )
192 vals = np.arange(stages)
193 active = [1 if v == self._stage else 0 for v in vals]
194 points = hv.Points((vals, np.zeros(stages), active), vdims=['active']).options(
195 color_index='active', line_color='black', cmap={0: 'white', 1: 'gray'},
196 show_legend=False, size=20, default_tools=[], tools=['tap'],
197 nonselection_alpha=1, backend='bokeh'
198 )
199 point_labels = points.add_dimension('text', 0, [n for n, _ in self._stages], vdim=True)
200 labels = hv.Labels(point_labels).options(yoffset=-2.5, backend='bokeh')
201 self._progress_sel.source = points
202 hv_plot = (line * points * labels).options(
203 xaxis=None, yaxis=None, width=800, show_frame=False, toolbar=None,
204 height=80, xlim=(-0.5, stages-0.5), ylim=(-4, 1.5),
205 clone=False, backend='bokeh'
206 )
207 return HoloViews(hv_plot, backend='bokeh')
208
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/panel/pipeline.py b/panel/pipeline.py
--- a/panel/pipeline.py
+++ b/panel/pipeline.py
@@ -187,12 +187,12 @@
import holoviews.plotting.bokeh # noqa
stages = len(self._stages)
line = hv.Path([[(0, 0), (stages-1, 0)]]).options(
- line_width=10, color='black', backend='bokeh'
+ line_width=6, color='black', backend='bokeh'
)
vals = np.arange(stages)
active = [1 if v == self._stage else 0 for v in vals]
points = hv.Points((vals, np.zeros(stages), active), vdims=['active']).options(
- color_index='active', line_color='black', cmap={0: 'white', 1: 'gray'},
+ color_index='active', line_color='black', cmap={0: 'white', 1: '#5cb85c'},
show_legend=False, size=20, default_tools=[], tools=['tap'],
nonselection_alpha=1, backend='bokeh'
)
| {"golden_diff": "diff --git a/panel/pipeline.py b/panel/pipeline.py\n--- a/panel/pipeline.py\n+++ b/panel/pipeline.py\n@@ -187,12 +187,12 @@\n import holoviews.plotting.bokeh # noqa\n stages = len(self._stages)\n line = hv.Path([[(0, 0), (stages-1, 0)]]).options(\n- line_width=10, color='black', backend='bokeh'\n+ line_width=6, color='black', backend='bokeh'\n )\n vals = np.arange(stages)\n active = [1 if v == self._stage else 0 for v in vals]\n points = hv.Points((vals, np.zeros(stages), active), vdims=['active']).options(\n- color_index='active', line_color='black', cmap={0: 'white', 1: 'gray'},\n+ color_index='active', line_color='black', cmap={0: 'white', 1: '#5cb85c'},\n show_legend=False, size=20, default_tools=[], tools=['tap'],\n nonselection_alpha=1, backend='bokeh'\n )\n", "issue": "visual changes to subway map\nWith the current subway map its not apparent where we are on the map. Suggestions to improve the visual effect include making the horizontal line thinner and/or the 'gray' location indicator to something more obvious like red or blue. \n", "before_files": [{"content": "from __future__ import absolute_import, division, unicode_literals\n\nimport os\n\nimport param\nimport numpy as np\n\nfrom .layout import Row, Column, HSpacer, VSpacer\nfrom .pane import HoloViews, Markdown, Pane\nfrom .param import Param\nfrom .util import param_reprs\n\n\nclass Pipeline(param.Parameterized):\n \"\"\"\n Allows connecting a linear series of panels to define a workflow.\n Each stage in a pipeline should declare a panel method which\n returns a panel object that can be displayed and annotate its\n outputs using the param.output decorator.\n \"\"\"\n\n debug = param.Boolean(default=False, precedence=-1, doc=\"\"\"\n Whether to raise errors, useful for debugging while building an application.\"\"\")\n\n inherit_params = param.Boolean(default=True, precedence=-1, doc=\"\"\"\n Whether parameters should be inherited between pipeline stages\"\"\")\n\n next = param.Action(default=lambda x: x.param.trigger('next'))\n\n previous = param.Action(default=lambda x: x.param.trigger('previous'))\n\n def __init__(self, stages=[], **params):\n try:\n import holoviews as hv\n except:\n raise ImportError('Pipeline requires holoviews to be installed')\n\n self._stages = list(stages)\n self._stage = 0\n super(Pipeline, self).__init__(**params)\n self._error = Markdown('')\n self._states = []\n self._state = None\n self._progress_sel = hv.streams.Selection1D()\n self._progress_sel.add_subscriber(self._set_stage)\n prev_button = Param(self.param, parameters=['previous'], show_name=False)\n next_button = Param(self.param, parameters=['next'], show_name=False)\n prev_button.layout[0].disabled = True\n self._progress_bar = Row(self._make_progress, prev_button, next_button)\n spinner = Pane(os.path.join(os.path.dirname(__file__), 'assets', 'spinner.gif'))\n self._spinner_layout = Row(HSpacer(), Column(VSpacer(), spinner, VSpacer()), HSpacer())\n stage_layout = Row()\n if len(stages):\n stage_layout.append(self._init_stage())\n self._layout = Column(self._progress_bar, self._error, stage_layout)\n\n def add_stage(self, name, stage):\n self._validate(stage)\n self._stages.append((name, stage))\n if len(self._stages) == 1:\n self._layout[2].append(self._init_stage())\n\n def _validate(self, stage):\n if any(stage is s for n, s in self._stages):\n raise ValueError('Stage %s is already in pipeline' % stage)\n elif not ((isinstance(stage, type) and issubclass(stage, param.Parameterized))\n or isinstance(stage, param.Parameterized)):\n raise ValueError('Pipeline stages must be Parameterized classes or instances.')\n\n def __repr__(self):\n repr_str = 'Pipeline:'\n for i, (name, stage) in enumerate(self._stages):\n if isinstance(stage, param.Parameterized):\n cls_name = type(stage).__name__\n else:\n cls_name = stage.__name__\n params = ', '.join(param_reprs(stage))\n repr_str += '\\n [%d] %s: %s(%s)' % (i, name, cls_name, params)\n return repr_str\n\n def __getitem__(self, index):\n return self._stages[index][1]\n\n @property\n def layout(self):\n self._progress_bar[0] = self._make_progress\n return self._layout\n\n\n def _init_stage(self):\n name, stage = self._stages[self._stage]\n kwargs = {}\n if self._state:\n results = {}\n for name, (_, method, index) in self._state.param.outputs().items():\n if name not in stage.param:\n continue\n if method not in results:\n results[method] = method()\n result = results[method]\n if index is not None:\n result = result[index]\n kwargs[name] = result\n if self.inherit_params:\n params = [k for k, v in self._state.param.objects('existing').items()\n if v.precedence is None or v.precedence >= 0]\n kwargs.update({k: v for k, v in self._state.param.get_param_values()\n if k in stage.param and k != 'name' and k in params})\n\n if isinstance(stage, param.Parameterized):\n stage.set_param(**kwargs)\n self._state = stage\n else:\n self._state = stage(**kwargs)\n if len(self._states) <= self._stage:\n self._states.append(self._state)\n else:\n self._states[self._stage] = self._state\n return self._state.panel()\n\n def _set_stage(self, index):\n idx = index[0]\n steps = idx-self._stage\n if steps < 0:\n for i in range(abs(steps)):\n e = self._previous()\n if e:\n break\n else:\n for i in range(steps):\n e = self._next()\n if e:\n break\n\n def _update_button(self):\n # Disable previous button\n if self._stage == 0:\n self._progress_bar[1].layout[0].disabled = True\n else:\n self._progress_bar[1].layout[0].disabled = False\n\n # Disable next button\n if self._stage == len(self._stages)-1:\n self._progress_bar[2].layout[0].disabled = True\n else:\n self._progress_bar[2].layout[0].disabled = False\n\n @param.depends('next', watch=True)\n def _next(self):\n self._stage += 1\n prev_state = self._layout[2][0]\n self._layout[2][0] = self._spinner_layout\n try:\n new_stage = self._init_stage()\n self._layout[2][0] = new_stage\n self._update_button()\n except Exception as e:\n self._stage -= 1\n self._error.object = ('Next stage raised following error:\\n\\n\\t%s: %s'\n % (type(e).__name__, str(e)))\n self._layout[2][0] = prev_state\n if self.debug:\n raise e\n return e\n else:\n self._error.object = ''\n\n @param.depends('previous', watch=True)\n def _previous(self):\n self._stage -= 1\n try:\n self._state = self._states[self._stage]\n self._layout[2][0] = self._state.panel()\n self._update_button()\n except Exception as e:\n self._stage += 1\n self._error.object = ('Previous stage raised following error:\\n\\n\\t%s: %s'\n % (type(e).__name__, str(e)))\n if self.debug:\n raise e\n else:\n self._error.object = ''\n\n @param.depends('previous', 'next')\n def _make_progress(self):\n import holoviews as hv\n import holoviews.plotting.bokeh # noqa\n stages = len(self._stages)\n line = hv.Path([[(0, 0), (stages-1, 0)]]).options(\n line_width=10, color='black', backend='bokeh'\n )\n vals = np.arange(stages)\n active = [1 if v == self._stage else 0 for v in vals]\n points = hv.Points((vals, np.zeros(stages), active), vdims=['active']).options(\n color_index='active', line_color='black', cmap={0: 'white', 1: 'gray'},\n show_legend=False, size=20, default_tools=[], tools=['tap'],\n nonselection_alpha=1, backend='bokeh'\n )\n point_labels = points.add_dimension('text', 0, [n for n, _ in self._stages], vdim=True)\n labels = hv.Labels(point_labels).options(yoffset=-2.5, backend='bokeh')\n self._progress_sel.source = points\n hv_plot = (line * points * labels).options(\n xaxis=None, yaxis=None, width=800, show_frame=False, toolbar=None,\n height=80, xlim=(-0.5, stages-0.5), ylim=(-4, 1.5),\n clone=False, backend='bokeh'\n )\n return HoloViews(hv_plot, backend='bokeh')\n", "path": "panel/pipeline.py"}], "after_files": [{"content": "from __future__ import absolute_import, division, unicode_literals\n\nimport os\n\nimport param\nimport numpy as np\n\nfrom .layout import Row, Column, HSpacer, VSpacer\nfrom .pane import HoloViews, Markdown, Pane\nfrom .param import Param\nfrom .util import param_reprs\n\n\nclass Pipeline(param.Parameterized):\n \"\"\"\n Allows connecting a linear series of panels to define a workflow.\n Each stage in a pipeline should declare a panel method which\n returns a panel object that can be displayed and annotate its\n outputs using the param.output decorator.\n \"\"\"\n\n debug = param.Boolean(default=False, precedence=-1, doc=\"\"\"\n Whether to raise errors, useful for debugging while building an application.\"\"\")\n\n inherit_params = param.Boolean(default=True, precedence=-1, doc=\"\"\"\n Whether parameters should be inherited between pipeline stages\"\"\")\n\n next = param.Action(default=lambda x: x.param.trigger('next'))\n\n previous = param.Action(default=lambda x: x.param.trigger('previous'))\n\n def __init__(self, stages=[], **params):\n try:\n import holoviews as hv\n except:\n raise ImportError('Pipeline requires holoviews to be installed')\n\n self._stages = list(stages)\n self._stage = 0\n super(Pipeline, self).__init__(**params)\n self._error = Markdown('')\n self._states = []\n self._state = None\n self._progress_sel = hv.streams.Selection1D()\n self._progress_sel.add_subscriber(self._set_stage)\n prev_button = Param(self.param, parameters=['previous'], show_name=False)\n next_button = Param(self.param, parameters=['next'], show_name=False)\n prev_button.layout[0].disabled = True\n self._progress_bar = Row(self._make_progress, prev_button, next_button)\n spinner = Pane(os.path.join(os.path.dirname(__file__), 'assets', 'spinner.gif'))\n self._spinner_layout = Row(HSpacer(), Column(VSpacer(), spinner, VSpacer()), HSpacer())\n stage_layout = Row()\n if len(stages):\n stage_layout.append(self._init_stage())\n self._layout = Column(self._progress_bar, self._error, stage_layout)\n\n def add_stage(self, name, stage):\n self._validate(stage)\n self._stages.append((name, stage))\n if len(self._stages) == 1:\n self._layout[2].append(self._init_stage())\n\n def _validate(self, stage):\n if any(stage is s for n, s in self._stages):\n raise ValueError('Stage %s is already in pipeline' % stage)\n elif not ((isinstance(stage, type) and issubclass(stage, param.Parameterized))\n or isinstance(stage, param.Parameterized)):\n raise ValueError('Pipeline stages must be Parameterized classes or instances.')\n\n def __repr__(self):\n repr_str = 'Pipeline:'\n for i, (name, stage) in enumerate(self._stages):\n if isinstance(stage, param.Parameterized):\n cls_name = type(stage).__name__\n else:\n cls_name = stage.__name__\n params = ', '.join(param_reprs(stage))\n repr_str += '\\n [%d] %s: %s(%s)' % (i, name, cls_name, params)\n return repr_str\n\n def __getitem__(self, index):\n return self._stages[index][1]\n\n @property\n def layout(self):\n self._progress_bar[0] = self._make_progress\n return self._layout\n\n\n def _init_stage(self):\n name, stage = self._stages[self._stage]\n kwargs = {}\n if self._state:\n results = {}\n for name, (_, method, index) in self._state.param.outputs().items():\n if name not in stage.param:\n continue\n if method not in results:\n results[method] = method()\n result = results[method]\n if index is not None:\n result = result[index]\n kwargs[name] = result\n if self.inherit_params:\n params = [k for k, v in self._state.param.objects('existing').items()\n if v.precedence is None or v.precedence >= 0]\n kwargs.update({k: v for k, v in self._state.param.get_param_values()\n if k in stage.param and k != 'name' and k in params})\n\n if isinstance(stage, param.Parameterized):\n stage.set_param(**kwargs)\n self._state = stage\n else:\n self._state = stage(**kwargs)\n if len(self._states) <= self._stage:\n self._states.append(self._state)\n else:\n self._states[self._stage] = self._state\n return self._state.panel()\n\n def _set_stage(self, index):\n idx = index[0]\n steps = idx-self._stage\n if steps < 0:\n for i in range(abs(steps)):\n e = self._previous()\n if e:\n break\n else:\n for i in range(steps):\n e = self._next()\n if e:\n break\n\n def _update_button(self):\n # Disable previous button\n if self._stage == 0:\n self._progress_bar[1].layout[0].disabled = True\n else:\n self._progress_bar[1].layout[0].disabled = False\n\n # Disable next button\n if self._stage == len(self._stages)-1:\n self._progress_bar[2].layout[0].disabled = True\n else:\n self._progress_bar[2].layout[0].disabled = False\n\n @param.depends('next', watch=True)\n def _next(self):\n self._stage += 1\n prev_state = self._layout[2][0]\n self._layout[2][0] = self._spinner_layout\n try:\n new_stage = self._init_stage()\n self._layout[2][0] = new_stage\n self._update_button()\n except Exception as e:\n self._stage -= 1\n self._error.object = ('Next stage raised following error:\\n\\n\\t%s: %s'\n % (type(e).__name__, str(e)))\n self._layout[2][0] = prev_state\n if self.debug:\n raise e\n return e\n else:\n self._error.object = ''\n\n @param.depends('previous', watch=True)\n def _previous(self):\n self._stage -= 1\n try:\n self._state = self._states[self._stage]\n self._layout[2][0] = self._state.panel()\n self._update_button()\n except Exception as e:\n self._stage += 1\n self._error.object = ('Previous stage raised following error:\\n\\n\\t%s: %s'\n % (type(e).__name__, str(e)))\n if self.debug:\n raise e\n else:\n self._error.object = ''\n\n @param.depends('previous', 'next')\n def _make_progress(self):\n import holoviews as hv\n import holoviews.plotting.bokeh # noqa\n stages = len(self._stages)\n line = hv.Path([[(0, 0), (stages-1, 0)]]).options(\n line_width=6, color='black', backend='bokeh'\n )\n vals = np.arange(stages)\n active = [1 if v == self._stage else 0 for v in vals]\n points = hv.Points((vals, np.zeros(stages), active), vdims=['active']).options(\n color_index='active', line_color='black', cmap={0: 'white', 1: '#5cb85c'},\n show_legend=False, size=20, default_tools=[], tools=['tap'],\n nonselection_alpha=1, backend='bokeh'\n )\n point_labels = points.add_dimension('text', 0, [n for n, _ in self._stages], vdim=True)\n labels = hv.Labels(point_labels).options(yoffset=-2.5, backend='bokeh')\n self._progress_sel.source = points\n hv_plot = (line * points * labels).options(\n xaxis=None, yaxis=None, width=800, show_frame=False, toolbar=None,\n height=80, xlim=(-0.5, stages-0.5), ylim=(-4, 1.5),\n clone=False, backend='bokeh'\n )\n return HoloViews(hv_plot, backend='bokeh')\n", "path": "panel/pipeline.py"}]} | 2,689 | 264 |
gh_patches_debug_18863 | rasdani/github-patches | git_diff | vega__altair-2785 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Equal default chart dimensions
Currently the default theme in Altair specifies an aspect ratio of 4/3, with a width of 400 and a height of 300 (for continuous data):

The default in VegaLite is to make both dimensions of equal length, which I think makes sense since it spreads the data over the same amount of pixels on both the X and Y axis. This could have benefits in terms of making it easier to fairly compare the distribution of the data between the two plotted variables instead of it appearing more spread out over the X axis due to the increase chart width. The default in Vega-Lite is to use 200 px for the width and height which I think is a bit small, but setting both to 300 px looks good:

What do you all think about changing the default width in Altair to 300 px, so that both the X and Y axes occupy the same amount of pixels by default? Are there benefits of having an unequal aspect ratio like the current default that I am missing (maybe that it is more similar to the screen aspect ratio)? I don't think this is a major concern, but thought I would bring it up and see if others also regard it as a small improvement or just a matter of personal aesthetics/taste.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `altair/vegalite/v5/theme.py`
Content:
```
1 """Tools for enabling and registering chart themes"""
2
3 from ...utils.theme import ThemeRegistry
4
5 VEGA_THEMES = [
6 "ggplot2",
7 "quartz",
8 "vox",
9 "fivethirtyeight",
10 "dark",
11 "latimes",
12 "urbaninstitute",
13 ]
14
15
16 class VegaTheme(object):
17 """Implementation of a builtin vega theme."""
18
19 def __init__(self, theme):
20 self.theme = theme
21
22 def __call__(self):
23 return {
24 "usermeta": {"embedOptions": {"theme": self.theme}},
25 "config": {"view": {"continuousWidth": 400, "continuousHeight": 300}},
26 }
27
28 def __repr__(self):
29 return "VegaTheme({!r})".format(self.theme)
30
31
32 # The entry point group that can be used by other packages to declare other
33 # renderers that will be auto-detected. Explicit registration is also
34 # allowed by the PluginRegistery API.
35 ENTRY_POINT_GROUP = "altair.vegalite.v5.theme" # type: str
36 themes = ThemeRegistry(entry_point_group=ENTRY_POINT_GROUP)
37
38 themes.register(
39 "default",
40 lambda: {"config": {"view": {"continuousWidth": 400, "continuousHeight": 300}}},
41 )
42 themes.register(
43 "opaque",
44 lambda: {
45 "config": {
46 "background": "white",
47 "view": {"continuousWidth": 400, "continuousHeight": 300},
48 }
49 },
50 )
51 themes.register("none", lambda: {})
52
53 for theme in VEGA_THEMES:
54 themes.register(theme, VegaTheme(theme))
55
56 themes.enable("default")
57
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/altair/vegalite/v5/theme.py b/altair/vegalite/v5/theme.py
--- a/altair/vegalite/v5/theme.py
+++ b/altair/vegalite/v5/theme.py
@@ -22,7 +22,7 @@
def __call__(self):
return {
"usermeta": {"embedOptions": {"theme": self.theme}},
- "config": {"view": {"continuousWidth": 400, "continuousHeight": 300}},
+ "config": {"view": {"continuousWidth": 300, "continuousHeight": 300}},
}
def __repr__(self):
@@ -37,14 +37,14 @@
themes.register(
"default",
- lambda: {"config": {"view": {"continuousWidth": 400, "continuousHeight": 300}}},
+ lambda: {"config": {"view": {"continuousWidth": 300, "continuousHeight": 300}}},
)
themes.register(
"opaque",
lambda: {
"config": {
"background": "white",
- "view": {"continuousWidth": 400, "continuousHeight": 300},
+ "view": {"continuousWidth": 300, "continuousHeight": 300},
}
},
)
| {"golden_diff": "diff --git a/altair/vegalite/v5/theme.py b/altair/vegalite/v5/theme.py\n--- a/altair/vegalite/v5/theme.py\n+++ b/altair/vegalite/v5/theme.py\n@@ -22,7 +22,7 @@\n def __call__(self):\n return {\n \"usermeta\": {\"embedOptions\": {\"theme\": self.theme}},\n- \"config\": {\"view\": {\"continuousWidth\": 400, \"continuousHeight\": 300}},\n+ \"config\": {\"view\": {\"continuousWidth\": 300, \"continuousHeight\": 300}},\n }\n \n def __repr__(self):\n@@ -37,14 +37,14 @@\n \n themes.register(\n \"default\",\n- lambda: {\"config\": {\"view\": {\"continuousWidth\": 400, \"continuousHeight\": 300}}},\n+ lambda: {\"config\": {\"view\": {\"continuousWidth\": 300, \"continuousHeight\": 300}}},\n )\n themes.register(\n \"opaque\",\n lambda: {\n \"config\": {\n \"background\": \"white\",\n- \"view\": {\"continuousWidth\": 400, \"continuousHeight\": 300},\n+ \"view\": {\"continuousWidth\": 300, \"continuousHeight\": 300},\n }\n },\n )\n", "issue": "Equal default chart dimensions\nCurrently the default theme in Altair specifies an aspect ratio of 4/3, with a width of 400 and a height of 300 (for continuous data):\r\n\r\n\r\n\r\nThe default in VegaLite is to make both dimensions of equal length, which I think makes sense since it spreads the data over the same amount of pixels on both the X and Y axis. This could have benefits in terms of making it easier to fairly compare the distribution of the data between the two plotted variables instead of it appearing more spread out over the X axis due to the increase chart width. The default in Vega-Lite is to use 200 px for the width and height which I think is a bit small, but setting both to 300 px looks good:\r\n\r\n\r\n\r\nWhat do you all think about changing the default width in Altair to 300 px, so that both the X and Y axes occupy the same amount of pixels by default? Are there benefits of having an unequal aspect ratio like the current default that I am missing (maybe that it is more similar to the screen aspect ratio)? I don't think this is a major concern, but thought I would bring it up and see if others also regard it as a small improvement or just a matter of personal aesthetics/taste.\n", "before_files": [{"content": "\"\"\"Tools for enabling and registering chart themes\"\"\"\n\nfrom ...utils.theme import ThemeRegistry\n\nVEGA_THEMES = [\n \"ggplot2\",\n \"quartz\",\n \"vox\",\n \"fivethirtyeight\",\n \"dark\",\n \"latimes\",\n \"urbaninstitute\",\n]\n\n\nclass VegaTheme(object):\n \"\"\"Implementation of a builtin vega theme.\"\"\"\n\n def __init__(self, theme):\n self.theme = theme\n\n def __call__(self):\n return {\n \"usermeta\": {\"embedOptions\": {\"theme\": self.theme}},\n \"config\": {\"view\": {\"continuousWidth\": 400, \"continuousHeight\": 300}},\n }\n\n def __repr__(self):\n return \"VegaTheme({!r})\".format(self.theme)\n\n\n# The entry point group that can be used by other packages to declare other\n# renderers that will be auto-detected. Explicit registration is also\n# allowed by the PluginRegistery API.\nENTRY_POINT_GROUP = \"altair.vegalite.v5.theme\" # type: str\nthemes = ThemeRegistry(entry_point_group=ENTRY_POINT_GROUP)\n\nthemes.register(\n \"default\",\n lambda: {\"config\": {\"view\": {\"continuousWidth\": 400, \"continuousHeight\": 300}}},\n)\nthemes.register(\n \"opaque\",\n lambda: {\n \"config\": {\n \"background\": \"white\",\n \"view\": {\"continuousWidth\": 400, \"continuousHeight\": 300},\n }\n },\n)\nthemes.register(\"none\", lambda: {})\n\nfor theme in VEGA_THEMES:\n themes.register(theme, VegaTheme(theme))\n\nthemes.enable(\"default\")\n", "path": "altair/vegalite/v5/theme.py"}], "after_files": [{"content": "\"\"\"Tools for enabling and registering chart themes\"\"\"\n\nfrom ...utils.theme import ThemeRegistry\n\nVEGA_THEMES = [\n \"ggplot2\",\n \"quartz\",\n \"vox\",\n \"fivethirtyeight\",\n \"dark\",\n \"latimes\",\n \"urbaninstitute\",\n]\n\n\nclass VegaTheme(object):\n \"\"\"Implementation of a builtin vega theme.\"\"\"\n\n def __init__(self, theme):\n self.theme = theme\n\n def __call__(self):\n return {\n \"usermeta\": {\"embedOptions\": {\"theme\": self.theme}},\n \"config\": {\"view\": {\"continuousWidth\": 300, \"continuousHeight\": 300}},\n }\n\n def __repr__(self):\n return \"VegaTheme({!r})\".format(self.theme)\n\n\n# The entry point group that can be used by other packages to declare other\n# renderers that will be auto-detected. Explicit registration is also\n# allowed by the PluginRegistery API.\nENTRY_POINT_GROUP = \"altair.vegalite.v5.theme\" # type: str\nthemes = ThemeRegistry(entry_point_group=ENTRY_POINT_GROUP)\n\nthemes.register(\n \"default\",\n lambda: {\"config\": {\"view\": {\"continuousWidth\": 300, \"continuousHeight\": 300}}},\n)\nthemes.register(\n \"opaque\",\n lambda: {\n \"config\": {\n \"background\": \"white\",\n \"view\": {\"continuousWidth\": 300, \"continuousHeight\": 300},\n }\n },\n)\nthemes.register(\"none\", lambda: {})\n\nfor theme in VEGA_THEMES:\n themes.register(theme, VegaTheme(theme))\n\nthemes.enable(\"default\")\n", "path": "altair/vegalite/v5/theme.py"}]} | 1,125 | 308 |
gh_patches_debug_22548 | rasdani/github-patches | git_diff | scalableminds__webknossos-libs-236 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
NIFTI Division by zero error
When converting a nifti file only consisting of zeros, the to_target_datatype function divides by 0.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wkcuber/convert_nifti.py`
Content:
```
1 import time
2 import logging
3 import numpy as np
4 from argparse import ArgumentParser
5 from pathlib import Path
6 import nibabel as nib
7
8 from wkcuber.api.Dataset import WKDataset, TiffDataset
9 from .utils import (
10 add_verbose_flag,
11 setup_logging,
12 add_scale_flag,
13 pad_or_crop_to_size_and_topleft,
14 parse_bounding_box,
15 )
16
17 from wkcuber.utils import DEFAULT_WKW_FILE_LEN, DEFAULT_WKW_VOXELS_PER_BLOCK
18
19
20 def create_parser():
21 parser = ArgumentParser()
22
23 parser.add_argument(
24 "source_path",
25 help="Path to NIFTY file or to a directory if multiple NIFTI files should be converted. "
26 "In the latter case, also see --color_file and --segmentation_file.",
27 )
28
29 parser.add_argument(
30 "target_path", help="Output directory for the generated WKW dataset."
31 )
32
33 parser.add_argument(
34 "--layer_name",
35 "-l",
36 help="Name of the cubed layer (color or segmentation).",
37 default="color",
38 )
39
40 parser.add_argument(
41 "--dtype", "-d", help="Target datatype (e.g. uint8, uint16).", default="uint8"
42 )
43
44 parser.add_argument(
45 "--color_file",
46 help="When converting folder, name of file to become color layer",
47 default=None,
48 )
49
50 parser.add_argument(
51 "--segmentation_file",
52 help="When converting folder, name of file to become segmentation layer",
53 default=None,
54 )
55
56 parser.add_argument(
57 "--write_tiff",
58 help="Output tiff dataset instead of wkw.",
59 default=False,
60 action="store_true",
61 )
62
63 parser.add_argument(
64 "--use_orientation_header",
65 help="Use orientation information from header to interpret the input data (should be tried if output orientation seems to be wrong).",
66 default=False,
67 action="store_true",
68 )
69
70 parser.add_argument(
71 "--enforce_bounding_box",
72 help="The BoundingBox to which the input data should be written. If the input data is too small, it will be padded. If it's too large, it will be cropped. The input format is x,y,z,width,height,depth.",
73 default=None,
74 type=parse_bounding_box,
75 )
76
77 parser.add_argument(
78 "--flip_axes",
79 help="The axes at which should be flipped. Input format is a comma separated list of axis indices. For example, 1,2,3 will flip the x, y and z axes.",
80 default=None,
81 )
82
83 add_scale_flag(parser, required=False)
84 add_verbose_flag(parser)
85
86 return parser
87
88
89 def to_target_datatype(
90 data: np.ndarray, target_dtype, is_probably_binary: bool
91 ) -> np.ndarray:
92 if is_probably_binary:
93 logging.info(
94 f"Casting directly to {target_dtype}, as input seems to be binary."
95 )
96 return data.astype(np.dtype(target_dtype))
97
98 if data.dtype == np.dtype("float32"):
99 factor = data.max()
100 elif data.dtype == np.dtype("float64"):
101 factor = data.max() / np.iinfo(target_dtype).max
102 else:
103 factor = np.iinfo(data.dtype).max / np.iinfo(target_dtype).max
104
105 return (data / factor).astype(np.dtype(target_dtype))
106
107
108 def convert_nifti(
109 source_nifti_path,
110 target_path,
111 layer_name,
112 dtype,
113 scale,
114 mag=1,
115 file_len=DEFAULT_WKW_FILE_LEN,
116 bbox_to_enforce=None,
117 write_tiff=False,
118 use_orientation_header=False,
119 flip_axes=None,
120 ):
121 voxels_per_cube = file_len * DEFAULT_WKW_VOXELS_PER_BLOCK
122 ref_time = time.time()
123
124 source_nifti = nib.load(str(source_nifti_path.resolve()))
125
126 if use_orientation_header:
127 # Get canonical representation of data to incorporate
128 # encoded transformations. Needs to be flipped later
129 # to match the coordinate system of WKW.
130 source_nifti = nib.funcs.as_closest_canonical(source_nifti, enforce_diag=False)
131
132 cube_data = np.array(source_nifti.get_fdata())
133
134 is_probably_binary = np.unique(cube_data).shape[0] == 2
135 assume_segmentation_layer = (
136 False
137 ) # Since webKnossos does not support multiple segmention layers, this is hardcoded to False right now.
138
139 max_cell_id_args = (
140 {"largest_segment_id": int(np.max(cube_data) + 1)}
141 if assume_segmentation_layer
142 else {}
143 )
144 category_type = "segmentation" if assume_segmentation_layer else "color"
145 logging.debug(f"Assuming {category_type} as layer type for {layer_name}")
146
147 if len(source_nifti.shape) == 3:
148 cube_data = cube_data.reshape((1,) + source_nifti.shape)
149
150 elif len(source_nifti.shape) == 4:
151 cube_data = np.transpose(cube_data, (3, 0, 1, 2))
152
153 else:
154 logging.warning(
155 "Converting of {} failed! Too many or too less dimensions".format(
156 source_nifti_path
157 )
158 )
159
160 return
161
162 if use_orientation_header:
163 # Flip y and z to transform data into wkw's coordinate system.
164 cube_data = np.flip(cube_data, (2, 3))
165
166 if flip_axes:
167 cube_data = np.flip(cube_data, flip_axes)
168
169 if scale is None:
170 scale = tuple(map(float, source_nifti.header["pixdim"][:3]))
171 logging.info(f"Using scale: {scale}")
172 cube_data = to_target_datatype(cube_data, dtype, is_probably_binary)
173
174 # everything needs to be padded to
175 if bbox_to_enforce is not None:
176 target_topleft = np.array((0,) + tuple(bbox_to_enforce.topleft))
177 target_size = np.array((1,) + tuple(bbox_to_enforce.size))
178
179 cube_data = pad_or_crop_to_size_and_topleft(
180 cube_data, target_size, target_topleft
181 )
182
183 # Writing wkw compressed requires files of shape (voxels_per_cube, voxels_per_cube, voxels_per_cube)
184 # Pad data accordingly
185 padding_offset = voxels_per_cube - np.array(cube_data.shape[1:4]) % voxels_per_cube
186 padding_offset = (0, 0, 0)
187 cube_data = np.pad(
188 cube_data,
189 (
190 (0, 0),
191 (0, int(padding_offset[0])),
192 (0, int(padding_offset[1])),
193 (0, int(padding_offset[2])),
194 ),
195 )
196
197 if write_tiff:
198 ds = TiffDataset.get_or_create(target_path, scale=scale or (1, 1, 1))
199 layer = ds.get_or_add_layer(
200 layer_name, category_type, np.dtype(dtype), **max_cell_id_args
201 )
202 mag = layer.get_or_add_mag("1")
203
204 mag.write(cube_data.squeeze())
205 else:
206 ds = WKDataset.get_or_create(target_path, scale=scale or (1, 1, 1))
207 layer = ds.get_or_add_layer(
208 layer_name, category_type, np.dtype(dtype), **max_cell_id_args
209 )
210 mag = layer.get_or_add_mag("1", file_len=file_len)
211 mag.write(cube_data)
212
213 logging.debug(
214 "Converting of {} took {:.8f}s".format(
215 source_nifti_path, time.time() - ref_time
216 )
217 )
218
219
220 def convert_folder_nifti(
221 source_folder_path,
222 target_path,
223 color_subpath,
224 segmentation_subpath,
225 scale,
226 use_orientation_header=False,
227 bbox_to_enforce=None,
228 write_tiff=False,
229 flip_axes=None,
230 ):
231 paths = list(source_folder_path.rglob("**/*.nii"))
232
233 color_path = None
234 segmentation_path = None
235 if color_subpath is not None:
236 color_path = target_path / color_subpath
237 if color_path not in paths:
238 logging.warning(
239 "Specified color file {} not in source path {}!".format(
240 color_path, source_folder_path
241 )
242 )
243
244 if segmentation_subpath is not None:
245 segmentation_path = target_path / segmentation_subpath
246 if segmentation_path not in paths:
247 logging.warning(
248 "Specified segmentation_file file {} not in source path {}!".format(
249 segmentation_path, segmentation_path
250 )
251 )
252
253 logging.info("Segmentation file will also use uint8 as a datatype.")
254
255 conversion_args = {
256 "scale": scale,
257 "write_tiff": write_tiff,
258 "bbox_to_enforce": bbox_to_enforce,
259 "use_orientation_header": use_orientation_header,
260 "flip_axes": flip_axes,
261 }
262 for path in paths:
263 if path == color_path:
264 convert_nifti(path, target_path, "color", "uint8", **conversion_args)
265 elif path == segmentation_path:
266 convert_nifti(path, target_path, "segmentation", "uint8", **conversion_args)
267 else:
268 convert_nifti(path, target_path, path.stem, "uint8", **conversion_args)
269
270
271 def main():
272 args = create_parser().parse_args()
273 setup_logging(args)
274
275 source_path = Path(args.source_path)
276
277 flip_axes = None
278 if args.flip_axes is not None:
279 flip_axes = tuple(int(x) for x in args.flip_axes.split(","))
280 for index in flip_axes:
281 assert (
282 0 <= index <= 3
283 ), "flip_axes parameter must only contain indices between 0 and 3."
284
285 conversion_args = {
286 "scale": args.scale,
287 "write_tiff": args.write_tiff,
288 "bbox_to_enforce": args.enforce_bounding_box,
289 "use_orientation_header": args.use_orientation_header,
290 "flip_axes": flip_axes,
291 }
292
293 if source_path.is_dir():
294 convert_folder_nifti(
295 source_path,
296 Path(args.target_path),
297 args.color_file,
298 args.segmentation_file,
299 **conversion_args,
300 )
301 else:
302 convert_nifti(
303 source_path,
304 Path(args.target_path),
305 args.layer_name,
306 args.dtype,
307 **conversion_args,
308 )
309
310
311 if __name__ == "__main__":
312 main()
313
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/wkcuber/convert_nifti.py b/wkcuber/convert_nifti.py
--- a/wkcuber/convert_nifti.py
+++ b/wkcuber/convert_nifti.py
@@ -1,21 +1,22 @@
-import time
import logging
-import numpy as np
+import time
from argparse import ArgumentParser
from pathlib import Path
+
import nibabel as nib
+import numpy as np
-from wkcuber.api.Dataset import WKDataset, TiffDataset
-from .utils import (
- add_verbose_flag,
- setup_logging,
+from wkcuber.api.Dataset import TiffDataset, WKDataset
+from wkcuber.utils import (
+ DEFAULT_WKW_FILE_LEN,
+ DEFAULT_WKW_VOXELS_PER_BLOCK,
add_scale_flag,
+ add_verbose_flag,
pad_or_crop_to_size_and_topleft,
parse_bounding_box,
+ setup_logging,
)
-from wkcuber.utils import DEFAULT_WKW_FILE_LEN, DEFAULT_WKW_VOXELS_PER_BLOCK
-
def create_parser():
parser = ArgumentParser()
@@ -102,6 +103,10 @@
else:
factor = np.iinfo(data.dtype).max / np.iinfo(target_dtype).max
+ if data.max() == 0:
+ logging.warning("Not rescaling data since maximum is 0")
+ factor = 1
+
return (data / factor).astype(np.dtype(target_dtype))
| {"golden_diff": "diff --git a/wkcuber/convert_nifti.py b/wkcuber/convert_nifti.py\n--- a/wkcuber/convert_nifti.py\n+++ b/wkcuber/convert_nifti.py\n@@ -1,21 +1,22 @@\n-import time\n import logging\n-import numpy as np\n+import time\n from argparse import ArgumentParser\n from pathlib import Path\n+\n import nibabel as nib\n+import numpy as np\n \n-from wkcuber.api.Dataset import WKDataset, TiffDataset\n-from .utils import (\n- add_verbose_flag,\n- setup_logging,\n+from wkcuber.api.Dataset import TiffDataset, WKDataset\n+from wkcuber.utils import (\n+ DEFAULT_WKW_FILE_LEN,\n+ DEFAULT_WKW_VOXELS_PER_BLOCK,\n add_scale_flag,\n+ add_verbose_flag,\n pad_or_crop_to_size_and_topleft,\n parse_bounding_box,\n+ setup_logging,\n )\n \n-from wkcuber.utils import DEFAULT_WKW_FILE_LEN, DEFAULT_WKW_VOXELS_PER_BLOCK\n-\n \n def create_parser():\n parser = ArgumentParser()\n@@ -102,6 +103,10 @@\n else:\n factor = np.iinfo(data.dtype).max / np.iinfo(target_dtype).max\n \n+ if data.max() == 0:\n+ logging.warning(\"Not rescaling data since maximum is 0\")\n+ factor = 1\n+\n return (data / factor).astype(np.dtype(target_dtype))\n", "issue": "NIFTI Division by zero error\nWhen converting a nifti file only consisting of zeros, the to_target_datatype function divides by 0.\n", "before_files": [{"content": "import time\nimport logging\nimport numpy as np\nfrom argparse import ArgumentParser\nfrom pathlib import Path\nimport nibabel as nib\n\nfrom wkcuber.api.Dataset import WKDataset, TiffDataset\nfrom .utils import (\n add_verbose_flag,\n setup_logging,\n add_scale_flag,\n pad_or_crop_to_size_and_topleft,\n parse_bounding_box,\n)\n\nfrom wkcuber.utils import DEFAULT_WKW_FILE_LEN, DEFAULT_WKW_VOXELS_PER_BLOCK\n\n\ndef create_parser():\n parser = ArgumentParser()\n\n parser.add_argument(\n \"source_path\",\n help=\"Path to NIFTY file or to a directory if multiple NIFTI files should be converted. \"\n \"In the latter case, also see --color_file and --segmentation_file.\",\n )\n\n parser.add_argument(\n \"target_path\", help=\"Output directory for the generated WKW dataset.\"\n )\n\n parser.add_argument(\n \"--layer_name\",\n \"-l\",\n help=\"Name of the cubed layer (color or segmentation).\",\n default=\"color\",\n )\n\n parser.add_argument(\n \"--dtype\", \"-d\", help=\"Target datatype (e.g. uint8, uint16).\", default=\"uint8\"\n )\n\n parser.add_argument(\n \"--color_file\",\n help=\"When converting folder, name of file to become color layer\",\n default=None,\n )\n\n parser.add_argument(\n \"--segmentation_file\",\n help=\"When converting folder, name of file to become segmentation layer\",\n default=None,\n )\n\n parser.add_argument(\n \"--write_tiff\",\n help=\"Output tiff dataset instead of wkw.\",\n default=False,\n action=\"store_true\",\n )\n\n parser.add_argument(\n \"--use_orientation_header\",\n help=\"Use orientation information from header to interpret the input data (should be tried if output orientation seems to be wrong).\",\n default=False,\n action=\"store_true\",\n )\n\n parser.add_argument(\n \"--enforce_bounding_box\",\n help=\"The BoundingBox to which the input data should be written. If the input data is too small, it will be padded. If it's too large, it will be cropped. The input format is x,y,z,width,height,depth.\",\n default=None,\n type=parse_bounding_box,\n )\n\n parser.add_argument(\n \"--flip_axes\",\n help=\"The axes at which should be flipped. Input format is a comma separated list of axis indices. For example, 1,2,3 will flip the x, y and z axes.\",\n default=None,\n )\n\n add_scale_flag(parser, required=False)\n add_verbose_flag(parser)\n\n return parser\n\n\ndef to_target_datatype(\n data: np.ndarray, target_dtype, is_probably_binary: bool\n) -> np.ndarray:\n if is_probably_binary:\n logging.info(\n f\"Casting directly to {target_dtype}, as input seems to be binary.\"\n )\n return data.astype(np.dtype(target_dtype))\n\n if data.dtype == np.dtype(\"float32\"):\n factor = data.max()\n elif data.dtype == np.dtype(\"float64\"):\n factor = data.max() / np.iinfo(target_dtype).max\n else:\n factor = np.iinfo(data.dtype).max / np.iinfo(target_dtype).max\n\n return (data / factor).astype(np.dtype(target_dtype))\n\n\ndef convert_nifti(\n source_nifti_path,\n target_path,\n layer_name,\n dtype,\n scale,\n mag=1,\n file_len=DEFAULT_WKW_FILE_LEN,\n bbox_to_enforce=None,\n write_tiff=False,\n use_orientation_header=False,\n flip_axes=None,\n):\n voxels_per_cube = file_len * DEFAULT_WKW_VOXELS_PER_BLOCK\n ref_time = time.time()\n\n source_nifti = nib.load(str(source_nifti_path.resolve()))\n\n if use_orientation_header:\n # Get canonical representation of data to incorporate\n # encoded transformations. Needs to be flipped later\n # to match the coordinate system of WKW.\n source_nifti = nib.funcs.as_closest_canonical(source_nifti, enforce_diag=False)\n\n cube_data = np.array(source_nifti.get_fdata())\n\n is_probably_binary = np.unique(cube_data).shape[0] == 2\n assume_segmentation_layer = (\n False\n ) # Since webKnossos does not support multiple segmention layers, this is hardcoded to False right now.\n\n max_cell_id_args = (\n {\"largest_segment_id\": int(np.max(cube_data) + 1)}\n if assume_segmentation_layer\n else {}\n )\n category_type = \"segmentation\" if assume_segmentation_layer else \"color\"\n logging.debug(f\"Assuming {category_type} as layer type for {layer_name}\")\n\n if len(source_nifti.shape) == 3:\n cube_data = cube_data.reshape((1,) + source_nifti.shape)\n\n elif len(source_nifti.shape) == 4:\n cube_data = np.transpose(cube_data, (3, 0, 1, 2))\n\n else:\n logging.warning(\n \"Converting of {} failed! Too many or too less dimensions\".format(\n source_nifti_path\n )\n )\n\n return\n\n if use_orientation_header:\n # Flip y and z to transform data into wkw's coordinate system.\n cube_data = np.flip(cube_data, (2, 3))\n\n if flip_axes:\n cube_data = np.flip(cube_data, flip_axes)\n\n if scale is None:\n scale = tuple(map(float, source_nifti.header[\"pixdim\"][:3]))\n logging.info(f\"Using scale: {scale}\")\n cube_data = to_target_datatype(cube_data, dtype, is_probably_binary)\n\n # everything needs to be padded to\n if bbox_to_enforce is not None:\n target_topleft = np.array((0,) + tuple(bbox_to_enforce.topleft))\n target_size = np.array((1,) + tuple(bbox_to_enforce.size))\n\n cube_data = pad_or_crop_to_size_and_topleft(\n cube_data, target_size, target_topleft\n )\n\n # Writing wkw compressed requires files of shape (voxels_per_cube, voxels_per_cube, voxels_per_cube)\n # Pad data accordingly\n padding_offset = voxels_per_cube - np.array(cube_data.shape[1:4]) % voxels_per_cube\n padding_offset = (0, 0, 0)\n cube_data = np.pad(\n cube_data,\n (\n (0, 0),\n (0, int(padding_offset[0])),\n (0, int(padding_offset[1])),\n (0, int(padding_offset[2])),\n ),\n )\n\n if write_tiff:\n ds = TiffDataset.get_or_create(target_path, scale=scale or (1, 1, 1))\n layer = ds.get_or_add_layer(\n layer_name, category_type, np.dtype(dtype), **max_cell_id_args\n )\n mag = layer.get_or_add_mag(\"1\")\n\n mag.write(cube_data.squeeze())\n else:\n ds = WKDataset.get_or_create(target_path, scale=scale or (1, 1, 1))\n layer = ds.get_or_add_layer(\n layer_name, category_type, np.dtype(dtype), **max_cell_id_args\n )\n mag = layer.get_or_add_mag(\"1\", file_len=file_len)\n mag.write(cube_data)\n\n logging.debug(\n \"Converting of {} took {:.8f}s\".format(\n source_nifti_path, time.time() - ref_time\n )\n )\n\n\ndef convert_folder_nifti(\n source_folder_path,\n target_path,\n color_subpath,\n segmentation_subpath,\n scale,\n use_orientation_header=False,\n bbox_to_enforce=None,\n write_tiff=False,\n flip_axes=None,\n):\n paths = list(source_folder_path.rglob(\"**/*.nii\"))\n\n color_path = None\n segmentation_path = None\n if color_subpath is not None:\n color_path = target_path / color_subpath\n if color_path not in paths:\n logging.warning(\n \"Specified color file {} not in source path {}!\".format(\n color_path, source_folder_path\n )\n )\n\n if segmentation_subpath is not None:\n segmentation_path = target_path / segmentation_subpath\n if segmentation_path not in paths:\n logging.warning(\n \"Specified segmentation_file file {} not in source path {}!\".format(\n segmentation_path, segmentation_path\n )\n )\n\n logging.info(\"Segmentation file will also use uint8 as a datatype.\")\n\n conversion_args = {\n \"scale\": scale,\n \"write_tiff\": write_tiff,\n \"bbox_to_enforce\": bbox_to_enforce,\n \"use_orientation_header\": use_orientation_header,\n \"flip_axes\": flip_axes,\n }\n for path in paths:\n if path == color_path:\n convert_nifti(path, target_path, \"color\", \"uint8\", **conversion_args)\n elif path == segmentation_path:\n convert_nifti(path, target_path, \"segmentation\", \"uint8\", **conversion_args)\n else:\n convert_nifti(path, target_path, path.stem, \"uint8\", **conversion_args)\n\n\ndef main():\n args = create_parser().parse_args()\n setup_logging(args)\n\n source_path = Path(args.source_path)\n\n flip_axes = None\n if args.flip_axes is not None:\n flip_axes = tuple(int(x) for x in args.flip_axes.split(\",\"))\n for index in flip_axes:\n assert (\n 0 <= index <= 3\n ), \"flip_axes parameter must only contain indices between 0 and 3.\"\n\n conversion_args = {\n \"scale\": args.scale,\n \"write_tiff\": args.write_tiff,\n \"bbox_to_enforce\": args.enforce_bounding_box,\n \"use_orientation_header\": args.use_orientation_header,\n \"flip_axes\": flip_axes,\n }\n\n if source_path.is_dir():\n convert_folder_nifti(\n source_path,\n Path(args.target_path),\n args.color_file,\n args.segmentation_file,\n **conversion_args,\n )\n else:\n convert_nifti(\n source_path,\n Path(args.target_path),\n args.layer_name,\n args.dtype,\n **conversion_args,\n )\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "wkcuber/convert_nifti.py"}], "after_files": [{"content": "import logging\nimport time\nfrom argparse import ArgumentParser\nfrom pathlib import Path\n\nimport nibabel as nib\nimport numpy as np\n\nfrom wkcuber.api.Dataset import TiffDataset, WKDataset\nfrom wkcuber.utils import (\n DEFAULT_WKW_FILE_LEN,\n DEFAULT_WKW_VOXELS_PER_BLOCK,\n add_scale_flag,\n add_verbose_flag,\n pad_or_crop_to_size_and_topleft,\n parse_bounding_box,\n setup_logging,\n)\n\n\ndef create_parser():\n parser = ArgumentParser()\n\n parser.add_argument(\n \"source_path\",\n help=\"Path to NIFTY file or to a directory if multiple NIFTI files should be converted. \"\n \"In the latter case, also see --color_file and --segmentation_file.\",\n )\n\n parser.add_argument(\n \"target_path\", help=\"Output directory for the generated WKW dataset.\"\n )\n\n parser.add_argument(\n \"--layer_name\",\n \"-l\",\n help=\"Name of the cubed layer (color or segmentation).\",\n default=\"color\",\n )\n\n parser.add_argument(\n \"--dtype\", \"-d\", help=\"Target datatype (e.g. uint8, uint16).\", default=\"uint8\"\n )\n\n parser.add_argument(\n \"--color_file\",\n help=\"When converting folder, name of file to become color layer\",\n default=None,\n )\n\n parser.add_argument(\n \"--segmentation_file\",\n help=\"When converting folder, name of file to become segmentation layer\",\n default=None,\n )\n\n parser.add_argument(\n \"--write_tiff\",\n help=\"Output tiff dataset instead of wkw.\",\n default=False,\n action=\"store_true\",\n )\n\n parser.add_argument(\n \"--use_orientation_header\",\n help=\"Use orientation information from header to interpret the input data (should be tried if output orientation seems to be wrong).\",\n default=False,\n action=\"store_true\",\n )\n\n parser.add_argument(\n \"--enforce_bounding_box\",\n help=\"The BoundingBox to which the input data should be written. If the input data is too small, it will be padded. If it's too large, it will be cropped. The input format is x,y,z,width,height,depth.\",\n default=None,\n type=parse_bounding_box,\n )\n\n parser.add_argument(\n \"--flip_axes\",\n help=\"The axes at which should be flipped. Input format is a comma separated list of axis indices. For example, 1,2,3 will flip the x, y and z axes.\",\n default=None,\n )\n\n add_scale_flag(parser, required=False)\n add_verbose_flag(parser)\n\n return parser\n\n\ndef to_target_datatype(\n data: np.ndarray, target_dtype, is_probably_binary: bool\n) -> np.ndarray:\n if is_probably_binary:\n logging.info(\n f\"Casting directly to {target_dtype}, as input seems to be binary.\"\n )\n return data.astype(np.dtype(target_dtype))\n\n if data.dtype == np.dtype(\"float32\"):\n factor = data.max()\n elif data.dtype == np.dtype(\"float64\"):\n factor = data.max() / np.iinfo(target_dtype).max\n else:\n factor = np.iinfo(data.dtype).max / np.iinfo(target_dtype).max\n\n if data.max() == 0:\n logging.warning(\"Not rescaling data since maximum is 0\")\n factor = 1\n\n return (data / factor).astype(np.dtype(target_dtype))\n\n\ndef convert_nifti(\n source_nifti_path,\n target_path,\n layer_name,\n dtype,\n scale,\n mag=1,\n file_len=DEFAULT_WKW_FILE_LEN,\n bbox_to_enforce=None,\n write_tiff=False,\n use_orientation_header=False,\n flip_axes=None,\n):\n voxels_per_cube = file_len * DEFAULT_WKW_VOXELS_PER_BLOCK\n ref_time = time.time()\n\n source_nifti = nib.load(str(source_nifti_path.resolve()))\n\n if use_orientation_header:\n # Get canonical representation of data to incorporate\n # encoded transformations. Needs to be flipped later\n # to match the coordinate system of WKW.\n source_nifti = nib.funcs.as_closest_canonical(source_nifti, enforce_diag=False)\n\n cube_data = np.array(source_nifti.get_fdata())\n\n is_probably_binary = np.unique(cube_data).shape[0] == 2\n assume_segmentation_layer = (\n False\n ) # Since webKnossos does not support multiple segmention layers, this is hardcoded to False right now.\n\n max_cell_id_args = (\n {\"largest_segment_id\": int(np.max(cube_data) + 1)}\n if assume_segmentation_layer\n else {}\n )\n category_type = \"segmentation\" if assume_segmentation_layer else \"color\"\n logging.debug(f\"Assuming {category_type} as layer type for {layer_name}\")\n\n if len(source_nifti.shape) == 3:\n cube_data = cube_data.reshape((1,) + source_nifti.shape)\n\n elif len(source_nifti.shape) == 4:\n cube_data = np.transpose(cube_data, (3, 0, 1, 2))\n\n else:\n logging.warning(\n \"Converting of {} failed! Too many or too less dimensions\".format(\n source_nifti_path\n )\n )\n\n return\n\n if use_orientation_header:\n # Flip y and z to transform data into wkw's coordinate system.\n cube_data = np.flip(cube_data, (2, 3))\n\n if flip_axes:\n cube_data = np.flip(cube_data, flip_axes)\n\n if scale is None:\n scale = tuple(map(float, source_nifti.header[\"pixdim\"][:3]))\n logging.info(f\"Using scale: {scale}\")\n cube_data = to_target_datatype(cube_data, dtype, is_probably_binary)\n\n # everything needs to be padded to\n if bbox_to_enforce is not None:\n target_topleft = np.array((0,) + tuple(bbox_to_enforce.topleft))\n target_size = np.array((1,) + tuple(bbox_to_enforce.size))\n\n cube_data = pad_or_crop_to_size_and_topleft(\n cube_data, target_size, target_topleft\n )\n\n # Writing wkw compressed requires files of shape (voxels_per_cube, voxels_per_cube, voxels_per_cube)\n # Pad data accordingly\n padding_offset = voxels_per_cube - np.array(cube_data.shape[1:4]) % voxels_per_cube\n padding_offset = (0, 0, 0)\n cube_data = np.pad(\n cube_data,\n (\n (0, 0),\n (0, int(padding_offset[0])),\n (0, int(padding_offset[1])),\n (0, int(padding_offset[2])),\n ),\n )\n\n if write_tiff:\n ds = TiffDataset.get_or_create(target_path, scale=scale or (1, 1, 1))\n layer = ds.get_or_add_layer(\n layer_name, category_type, np.dtype(dtype), **max_cell_id_args\n )\n mag = layer.get_or_add_mag(\"1\")\n\n mag.write(cube_data.squeeze())\n else:\n ds = WKDataset.get_or_create(target_path, scale=scale or (1, 1, 1))\n layer = ds.get_or_add_layer(\n layer_name, category_type, np.dtype(dtype), **max_cell_id_args\n )\n mag = layer.get_or_add_mag(\"1\", file_len=file_len)\n mag.write(cube_data)\n\n logging.debug(\n \"Converting of {} took {:.8f}s\".format(\n source_nifti_path, time.time() - ref_time\n )\n )\n\n\ndef convert_folder_nifti(\n source_folder_path,\n target_path,\n color_subpath,\n segmentation_subpath,\n scale,\n use_orientation_header=False,\n bbox_to_enforce=None,\n write_tiff=False,\n flip_axes=None,\n):\n paths = list(source_folder_path.rglob(\"**/*.nii\"))\n\n color_path = None\n segmentation_path = None\n if color_subpath is not None:\n color_path = target_path / color_subpath\n if color_path not in paths:\n logging.warning(\n \"Specified color file {} not in source path {}!\".format(\n color_path, source_folder_path\n )\n )\n\n if segmentation_subpath is not None:\n segmentation_path = target_path / segmentation_subpath\n if segmentation_path not in paths:\n logging.warning(\n \"Specified segmentation_file file {} not in source path {}!\".format(\n segmentation_path, segmentation_path\n )\n )\n\n logging.info(\"Segmentation file will also use uint8 as a datatype.\")\n\n conversion_args = {\n \"scale\": scale,\n \"write_tiff\": write_tiff,\n \"bbox_to_enforce\": bbox_to_enforce,\n \"use_orientation_header\": use_orientation_header,\n \"flip_axes\": flip_axes,\n }\n for path in paths:\n if path == color_path:\n convert_nifti(path, target_path, \"color\", \"uint8\", **conversion_args)\n elif path == segmentation_path:\n convert_nifti(path, target_path, \"segmentation\", \"uint8\", **conversion_args)\n else:\n convert_nifti(path, target_path, path.stem, \"uint8\", **conversion_args)\n\n\ndef main():\n args = create_parser().parse_args()\n setup_logging(args)\n\n source_path = Path(args.source_path)\n\n flip_axes = None\n if args.flip_axes is not None:\n flip_axes = tuple(int(x) for x in args.flip_axes.split(\",\"))\n for index in flip_axes:\n assert (\n 0 <= index <= 3\n ), \"flip_axes parameter must only contain indices between 0 and 3.\"\n\n conversion_args = {\n \"scale\": args.scale,\n \"write_tiff\": args.write_tiff,\n \"bbox_to_enforce\": args.enforce_bounding_box,\n \"use_orientation_header\": args.use_orientation_header,\n \"flip_axes\": flip_axes,\n }\n\n if source_path.is_dir():\n convert_folder_nifti(\n source_path,\n Path(args.target_path),\n args.color_file,\n args.segmentation_file,\n **conversion_args,\n )\n else:\n convert_nifti(\n source_path,\n Path(args.target_path),\n args.layer_name,\n args.dtype,\n **conversion_args,\n )\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "wkcuber/convert_nifti.py"}]} | 3,391 | 321 |
gh_patches_debug_31862 | rasdani/github-patches | git_diff | freedomofpress__securedrop-4935 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Python3 admin virtualenv is broken if improperly set up (doesn't contain pip3)
## Description
Initially discovered while reviewing https://github.com/freedomofpress/securedrop/pull/4927#issuecomment-543763957 , if the admin virtualenv is improperly setup, one must manually remove `admin/.venv3` to fix the virtualenv.
## Steps to Reproduce
This is very hard to reproduce, but you can do it by pressing ctrl+c when the virtualenv is being setup as part of an initial `./securedrop-admin setup` run. Subsequent `./securedrop-admin setup` invocations will no longer work
## Expected Behavior
`./securedrop-admin setup` should not fail
## Actual Behavior
`./securedrop-admin setup` fails with the following output:
```
amnesia@amnesia:~/Persistent/securedrop$ ./securedrop-admin setup
INFO: Virtualenv already exists, not creating
INFO: Checking Python dependencies for securedrop-admin
ERROR: Failed to install pip dependencies. Check network connection and try again.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `admin/bootstrap.py`
Content:
```
1 # -*- mode: python; coding: utf-8 -*-
2 #
3 # Copyright (C) 2013-2018 Freedom of the Press Foundation & al
4 # Copyright (C) 2018 Loic Dachary <[email protected]>
5 #
6 # This program is free software: you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation, either version 3 of the License, or
9 # (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #
19
20 import argparse
21 import logging
22 import os
23 import shutil
24 import subprocess
25 import sys
26
27 sdlog = logging.getLogger(__name__)
28
29 DIR = os.path.dirname(os.path.realpath(__file__))
30 VENV_DIR = os.path.join(DIR, ".venv3")
31
32
33 def setup_logger(verbose=False):
34 """ Configure logging handler """
35 # Set default level on parent
36 sdlog.setLevel(logging.DEBUG)
37 level = logging.DEBUG if verbose else logging.INFO
38
39 stdout = logging.StreamHandler(sys.stdout)
40 stdout.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
41 stdout.setLevel(level)
42 sdlog.addHandler(stdout)
43
44
45 def run_command(command):
46 """
47 Wrapper function to display stdout for running command,
48 similar to how shelling out in a Bash script displays rolling output.
49
50 Yields a list of the stdout from the `command`, and raises a
51 CalledProcessError if `command` returns non-zero.
52 """
53 popen = subprocess.Popen(command,
54 stdout=subprocess.PIPE,
55 stderr=subprocess.STDOUT)
56 for stdout_line in iter(popen.stdout.readline, b""):
57 yield stdout_line
58 popen.stdout.close()
59 return_code = popen.wait()
60 if return_code:
61 raise subprocess.CalledProcessError(return_code, command)
62
63
64 def is_tails():
65 try:
66 id = subprocess.check_output('lsb_release --id --short',
67 shell=True).decode('utf-8').strip()
68 except subprocess.CalledProcessError:
69 id = None
70
71 # dirty hack to unreliably detect Tails 4.0~beta2
72 if id == 'Debian':
73 if os.uname()[1] == 'amnesia':
74 id = 'Tails'
75
76 return id == 'Tails'
77
78
79 def clean_up_tails3_venv(virtualenv_dir=VENV_DIR):
80 """
81 Tails 3.x, based on debian stretch uses libpython3.5, whereas Tails 4.x is
82 based on Debian Buster and uses libpython3.7. This means that the Tails 3.x
83 virtualenv will not work under Tails 4.x, and will need to be destroyed and
84 rebuilt. We can detect if the version of libpython is 3.5 in the
85 admin/.venv3/ folder, and delete it if that's the case. This will ensure a
86 smooth upgrade from Tails 3.x to Tails 4.x.
87 """
88 if is_tails():
89 try:
90 dist = subprocess.check_output('lsb_release --codename --short',
91 shell=True).strip()
92 except subprocess.CalledProcessError:
93 dist = None
94
95 # tails4 is based on buster
96 if dist == b'buster':
97 python_lib_path = os.path.join(virtualenv_dir, "lib/python3.5")
98 if os.path.exists(os.path.join(python_lib_path)):
99 sdlog.info(
100 "Tails 3 Python 3 virtualenv detected. "
101 "Removing it."
102 )
103 shutil.rmtree(virtualenv_dir)
104 sdlog.info("Tails 3 Python 3 virtualenv deleted.")
105
106
107 def checkenv(args):
108 clean_up_tails3_venv(VENV_DIR)
109 if not os.path.exists(os.path.join(VENV_DIR, "bin/activate")):
110 sdlog.error('Please run "securedrop-admin setup".')
111 sys.exit(1)
112
113
114 def maybe_torify():
115 if is_tails():
116 return ['torify']
117 else:
118 return []
119
120
121 def install_apt_dependencies(args):
122 """
123 Install apt dependencies in Tails. In order to install Ansible in
124 a virtualenv, first there are a number of Python prerequisites.
125 """
126 sdlog.info("Installing SecureDrop Admin dependencies")
127 sdlog.info(("You'll be prompted for the temporary Tails admin password,"
128 " which was set on Tails login screen"))
129
130 apt_command = ['sudo', 'su', '-c',
131 "apt-get update && \
132 apt-get -q -o=Dpkg::Use-Pty=0 install -y \
133 python3-virtualenv \
134 python3-yaml \
135 python3-pip \
136 ccontrol \
137 virtualenv \
138 libffi-dev \
139 libssl-dev \
140 libpython3-dev",
141 ]
142
143 try:
144 # Print command results in real-time, to keep Admin apprised
145 # of progress during long-running command.
146 for output_line in run_command(apt_command):
147 print(output_line.decode('utf-8').rstrip())
148 except subprocess.CalledProcessError:
149 # Tails supports apt persistence, which was used by SecureDrop
150 # under Tails 2.x. If updates are being applied, don't try to pile
151 # on with more apt requests.
152 sdlog.error(("Failed to install apt dependencies. Check network"
153 " connection and try again."))
154 raise
155
156
157 def envsetup(args):
158 """Installs Admin tooling required for managing SecureDrop. Specifically:
159
160 * updates apt-cache
161 * installs apt packages for Python virtualenv
162 * creates virtualenv
163 * installs pip packages inside virtualenv
164
165 The virtualenv is created within the Persistence volume in Tails, so that
166 Ansible is available to the Admin on subsequent boots without requiring
167 installation of packages again.
168 """
169 # clean up tails 3.x venv when migrating to tails 4.x
170 clean_up_tails3_venv(VENV_DIR)
171
172 # virtualenv doesnt exist? Install dependencies and create
173 if not os.path.exists(VENV_DIR):
174
175 install_apt_dependencies(args)
176
177 # Technically you can create a virtualenv from within python
178 # but pip can only be run over tor on tails, and debugging that
179 # along with instaling a third-party dependency is not worth
180 # the effort here.
181 sdlog.info("Setting up virtualenv")
182 try:
183 sdlog.debug(subprocess.check_output(
184 maybe_torify() + ['virtualenv', '--python=python3', VENV_DIR],
185 stderr=subprocess.STDOUT))
186 except subprocess.CalledProcessError as e:
187 sdlog.debug(e.output)
188 sdlog.error(("Unable to create virtualenv. Check network settings"
189 " and try again."))
190 raise
191 else:
192 sdlog.info("Virtualenv already exists, not creating")
193
194 install_pip_dependencies(args)
195 if os.path.exists(os.path.join(DIR, 'setup.py')):
196 install_pip_self(args)
197
198 sdlog.info("Finished installing SecureDrop dependencies")
199
200
201 def install_pip_self(args):
202 pip_install_cmd = [
203 os.path.join(VENV_DIR, 'bin', 'pip3'),
204 'install', '-e', DIR
205 ]
206 try:
207 subprocess.check_output(maybe_torify() + pip_install_cmd,
208 stderr=subprocess.STDOUT)
209 except subprocess.CalledProcessError as e:
210 sdlog.debug(e.output)
211 sdlog.error("Unable to install self, run with -v for more information")
212 raise
213
214
215 def install_pip_dependencies(args, pip_install_cmd=[
216 os.path.join(VENV_DIR, 'bin', 'pip3'),
217 'install',
218 # Specify requirements file.
219 '-r', os.path.join(DIR, 'requirements.txt'),
220 '--require-hashes',
221 # Make sure to upgrade packages only if necessary.
222 '-U', '--upgrade-strategy', 'only-if-needed',
223 ]):
224 """
225 Install Python dependencies via pip into virtualenv.
226 """
227
228 sdlog.info("Checking Python dependencies for securedrop-admin")
229 try:
230 pip_output = subprocess.check_output(maybe_torify() + pip_install_cmd,
231 stderr=subprocess.STDOUT)
232 except subprocess.CalledProcessError as e:
233 sdlog.debug(e.output)
234 sdlog.error(("Failed to install pip dependencies. Check network"
235 " connection and try again."))
236 raise
237
238 sdlog.debug(pip_output)
239 if "Successfully installed" in str(pip_output):
240 sdlog.info("Python dependencies for securedrop-admin upgraded")
241 else:
242 sdlog.info("Python dependencies for securedrop-admin are up-to-date")
243
244
245 def parse_argv(argv):
246 parser = argparse.ArgumentParser()
247 parser.add_argument('-v', action='store_true', default=False,
248 help="Increase verbosity on output")
249 parser.set_defaults(func=envsetup)
250
251 subparsers = parser.add_subparsers()
252
253 envsetup_parser = subparsers.add_parser(
254 'envsetup',
255 help='Set up the admin virtualenv.'
256 )
257 envsetup_parser.set_defaults(func=envsetup)
258
259 checkenv_parser = subparsers.add_parser(
260 'checkenv',
261 help='Check that the admin virtualenv is properly set up.'
262 )
263 checkenv_parser.set_defaults(func=checkenv)
264
265 return parser.parse_args(argv)
266
267
268 if __name__ == "__main__":
269 args = parse_argv(sys.argv[1:])
270 setup_logger(args.v)
271
272 try:
273 args.func(args)
274 except Exception:
275 sys.exit(1)
276 else:
277 sys.exit(0)
278
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/admin/bootstrap.py b/admin/bootstrap.py
--- a/admin/bootstrap.py
+++ b/admin/bootstrap.py
@@ -154,7 +154,7 @@
raise
-def envsetup(args):
+def envsetup(args, virtualenv_dir=VENV_DIR):
"""Installs Admin tooling required for managing SecureDrop. Specifically:
* updates apt-cache
@@ -167,10 +167,10 @@
installation of packages again.
"""
# clean up tails 3.x venv when migrating to tails 4.x
- clean_up_tails3_venv(VENV_DIR)
+ clean_up_tails3_venv(virtualenv_dir)
# virtualenv doesnt exist? Install dependencies and create
- if not os.path.exists(VENV_DIR):
+ if not os.path.exists(virtualenv_dir):
install_apt_dependencies(args)
@@ -181,12 +181,18 @@
sdlog.info("Setting up virtualenv")
try:
sdlog.debug(subprocess.check_output(
- maybe_torify() + ['virtualenv', '--python=python3', VENV_DIR],
+ maybe_torify() + ['virtualenv',
+ '--python=python3',
+ virtualenv_dir
+ ],
stderr=subprocess.STDOUT))
except subprocess.CalledProcessError as e:
sdlog.debug(e.output)
sdlog.error(("Unable to create virtualenv. Check network settings"
" and try again."))
+ sdlog.debug("Cleaning up virtualenv")
+ if os.path.exists(virtualenv_dir):
+ shutil.rmtree(virtualenv_dir)
raise
else:
sdlog.info("Virtualenv already exists, not creating")
| {"golden_diff": "diff --git a/admin/bootstrap.py b/admin/bootstrap.py\n--- a/admin/bootstrap.py\n+++ b/admin/bootstrap.py\n@@ -154,7 +154,7 @@\n raise\n \n \n-def envsetup(args):\n+def envsetup(args, virtualenv_dir=VENV_DIR):\n \"\"\"Installs Admin tooling required for managing SecureDrop. Specifically:\n \n * updates apt-cache\n@@ -167,10 +167,10 @@\n installation of packages again.\n \"\"\"\n # clean up tails 3.x venv when migrating to tails 4.x\n- clean_up_tails3_venv(VENV_DIR)\n+ clean_up_tails3_venv(virtualenv_dir)\n \n # virtualenv doesnt exist? Install dependencies and create\n- if not os.path.exists(VENV_DIR):\n+ if not os.path.exists(virtualenv_dir):\n \n install_apt_dependencies(args)\n \n@@ -181,12 +181,18 @@\n sdlog.info(\"Setting up virtualenv\")\n try:\n sdlog.debug(subprocess.check_output(\n- maybe_torify() + ['virtualenv', '--python=python3', VENV_DIR],\n+ maybe_torify() + ['virtualenv',\n+ '--python=python3',\n+ virtualenv_dir\n+ ],\n stderr=subprocess.STDOUT))\n except subprocess.CalledProcessError as e:\n sdlog.debug(e.output)\n sdlog.error((\"Unable to create virtualenv. Check network settings\"\n \" and try again.\"))\n+ sdlog.debug(\"Cleaning up virtualenv\")\n+ if os.path.exists(virtualenv_dir):\n+ shutil.rmtree(virtualenv_dir)\n raise\n else:\n sdlog.info(\"Virtualenv already exists, not creating\")\n", "issue": "Python3 admin virtualenv is broken if improperly set up (doesn't contain pip3)\n## Description\r\n\r\nInitially discovered while reviewing https://github.com/freedomofpress/securedrop/pull/4927#issuecomment-543763957 , if the admin virtualenv is improperly setup, one must manually remove `admin/.venv3` to fix the virtualenv.\r\n\r\n## Steps to Reproduce\r\nThis is very hard to reproduce, but you can do it by pressing ctrl+c when the virtualenv is being setup as part of an initial `./securedrop-admin setup` run. Subsequent `./securedrop-admin setup` invocations will no longer work\r\n\r\n## Expected Behavior\r\n`./securedrop-admin setup` should not fail\r\n\r\n## Actual Behavior\r\n`./securedrop-admin setup` fails with the following output:\r\n```\r\namnesia@amnesia:~/Persistent/securedrop$ ./securedrop-admin setup\r\nINFO: Virtualenv already exists, not creating\r\nINFO: Checking Python dependencies for securedrop-admin\r\nERROR: Failed to install pip dependencies. Check network connection and try again.\r\n```\n", "before_files": [{"content": "# -*- mode: python; coding: utf-8 -*-\n#\n# Copyright (C) 2013-2018 Freedom of the Press Foundation & al\n# Copyright (C) 2018 Loic Dachary <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n\nimport argparse\nimport logging\nimport os\nimport shutil\nimport subprocess\nimport sys\n\nsdlog = logging.getLogger(__name__)\n\nDIR = os.path.dirname(os.path.realpath(__file__))\nVENV_DIR = os.path.join(DIR, \".venv3\")\n\n\ndef setup_logger(verbose=False):\n \"\"\" Configure logging handler \"\"\"\n # Set default level on parent\n sdlog.setLevel(logging.DEBUG)\n level = logging.DEBUG if verbose else logging.INFO\n\n stdout = logging.StreamHandler(sys.stdout)\n stdout.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))\n stdout.setLevel(level)\n sdlog.addHandler(stdout)\n\n\ndef run_command(command):\n \"\"\"\n Wrapper function to display stdout for running command,\n similar to how shelling out in a Bash script displays rolling output.\n\n Yields a list of the stdout from the `command`, and raises a\n CalledProcessError if `command` returns non-zero.\n \"\"\"\n popen = subprocess.Popen(command,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n for stdout_line in iter(popen.stdout.readline, b\"\"):\n yield stdout_line\n popen.stdout.close()\n return_code = popen.wait()\n if return_code:\n raise subprocess.CalledProcessError(return_code, command)\n\n\ndef is_tails():\n try:\n id = subprocess.check_output('lsb_release --id --short',\n shell=True).decode('utf-8').strip()\n except subprocess.CalledProcessError:\n id = None\n\n # dirty hack to unreliably detect Tails 4.0~beta2\n if id == 'Debian':\n if os.uname()[1] == 'amnesia':\n id = 'Tails'\n\n return id == 'Tails'\n\n\ndef clean_up_tails3_venv(virtualenv_dir=VENV_DIR):\n \"\"\"\n Tails 3.x, based on debian stretch uses libpython3.5, whereas Tails 4.x is\n based on Debian Buster and uses libpython3.7. This means that the Tails 3.x\n virtualenv will not work under Tails 4.x, and will need to be destroyed and\n rebuilt. We can detect if the version of libpython is 3.5 in the\n admin/.venv3/ folder, and delete it if that's the case. This will ensure a\n smooth upgrade from Tails 3.x to Tails 4.x.\n \"\"\"\n if is_tails():\n try:\n dist = subprocess.check_output('lsb_release --codename --short',\n shell=True).strip()\n except subprocess.CalledProcessError:\n dist = None\n\n # tails4 is based on buster\n if dist == b'buster':\n python_lib_path = os.path.join(virtualenv_dir, \"lib/python3.5\")\n if os.path.exists(os.path.join(python_lib_path)):\n sdlog.info(\n \"Tails 3 Python 3 virtualenv detected. \"\n \"Removing it.\"\n )\n shutil.rmtree(virtualenv_dir)\n sdlog.info(\"Tails 3 Python 3 virtualenv deleted.\")\n\n\ndef checkenv(args):\n clean_up_tails3_venv(VENV_DIR)\n if not os.path.exists(os.path.join(VENV_DIR, \"bin/activate\")):\n sdlog.error('Please run \"securedrop-admin setup\".')\n sys.exit(1)\n\n\ndef maybe_torify():\n if is_tails():\n return ['torify']\n else:\n return []\n\n\ndef install_apt_dependencies(args):\n \"\"\"\n Install apt dependencies in Tails. In order to install Ansible in\n a virtualenv, first there are a number of Python prerequisites.\n \"\"\"\n sdlog.info(\"Installing SecureDrop Admin dependencies\")\n sdlog.info((\"You'll be prompted for the temporary Tails admin password,\"\n \" which was set on Tails login screen\"))\n\n apt_command = ['sudo', 'su', '-c',\n \"apt-get update && \\\n apt-get -q -o=Dpkg::Use-Pty=0 install -y \\\n python3-virtualenv \\\n python3-yaml \\\n python3-pip \\\n ccontrol \\\n virtualenv \\\n libffi-dev \\\n libssl-dev \\\n libpython3-dev\",\n ]\n\n try:\n # Print command results in real-time, to keep Admin apprised\n # of progress during long-running command.\n for output_line in run_command(apt_command):\n print(output_line.decode('utf-8').rstrip())\n except subprocess.CalledProcessError:\n # Tails supports apt persistence, which was used by SecureDrop\n # under Tails 2.x. If updates are being applied, don't try to pile\n # on with more apt requests.\n sdlog.error((\"Failed to install apt dependencies. Check network\"\n \" connection and try again.\"))\n raise\n\n\ndef envsetup(args):\n \"\"\"Installs Admin tooling required for managing SecureDrop. Specifically:\n\n * updates apt-cache\n * installs apt packages for Python virtualenv\n * creates virtualenv\n * installs pip packages inside virtualenv\n\n The virtualenv is created within the Persistence volume in Tails, so that\n Ansible is available to the Admin on subsequent boots without requiring\n installation of packages again.\n \"\"\"\n # clean up tails 3.x venv when migrating to tails 4.x\n clean_up_tails3_venv(VENV_DIR)\n\n # virtualenv doesnt exist? Install dependencies and create\n if not os.path.exists(VENV_DIR):\n\n install_apt_dependencies(args)\n\n # Technically you can create a virtualenv from within python\n # but pip can only be run over tor on tails, and debugging that\n # along with instaling a third-party dependency is not worth\n # the effort here.\n sdlog.info(\"Setting up virtualenv\")\n try:\n sdlog.debug(subprocess.check_output(\n maybe_torify() + ['virtualenv', '--python=python3', VENV_DIR],\n stderr=subprocess.STDOUT))\n except subprocess.CalledProcessError as e:\n sdlog.debug(e.output)\n sdlog.error((\"Unable to create virtualenv. Check network settings\"\n \" and try again.\"))\n raise\n else:\n sdlog.info(\"Virtualenv already exists, not creating\")\n\n install_pip_dependencies(args)\n if os.path.exists(os.path.join(DIR, 'setup.py')):\n install_pip_self(args)\n\n sdlog.info(\"Finished installing SecureDrop dependencies\")\n\n\ndef install_pip_self(args):\n pip_install_cmd = [\n os.path.join(VENV_DIR, 'bin', 'pip3'),\n 'install', '-e', DIR\n ]\n try:\n subprocess.check_output(maybe_torify() + pip_install_cmd,\n stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n sdlog.debug(e.output)\n sdlog.error(\"Unable to install self, run with -v for more information\")\n raise\n\n\ndef install_pip_dependencies(args, pip_install_cmd=[\n os.path.join(VENV_DIR, 'bin', 'pip3'),\n 'install',\n # Specify requirements file.\n '-r', os.path.join(DIR, 'requirements.txt'),\n '--require-hashes',\n # Make sure to upgrade packages only if necessary.\n '-U', '--upgrade-strategy', 'only-if-needed',\n]):\n \"\"\"\n Install Python dependencies via pip into virtualenv.\n \"\"\"\n\n sdlog.info(\"Checking Python dependencies for securedrop-admin\")\n try:\n pip_output = subprocess.check_output(maybe_torify() + pip_install_cmd,\n stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n sdlog.debug(e.output)\n sdlog.error((\"Failed to install pip dependencies. Check network\"\n \" connection and try again.\"))\n raise\n\n sdlog.debug(pip_output)\n if \"Successfully installed\" in str(pip_output):\n sdlog.info(\"Python dependencies for securedrop-admin upgraded\")\n else:\n sdlog.info(\"Python dependencies for securedrop-admin are up-to-date\")\n\n\ndef parse_argv(argv):\n parser = argparse.ArgumentParser()\n parser.add_argument('-v', action='store_true', default=False,\n help=\"Increase verbosity on output\")\n parser.set_defaults(func=envsetup)\n\n subparsers = parser.add_subparsers()\n\n envsetup_parser = subparsers.add_parser(\n 'envsetup',\n help='Set up the admin virtualenv.'\n )\n envsetup_parser.set_defaults(func=envsetup)\n\n checkenv_parser = subparsers.add_parser(\n 'checkenv',\n help='Check that the admin virtualenv is properly set up.'\n )\n checkenv_parser.set_defaults(func=checkenv)\n\n return parser.parse_args(argv)\n\n\nif __name__ == \"__main__\":\n args = parse_argv(sys.argv[1:])\n setup_logger(args.v)\n\n try:\n args.func(args)\n except Exception:\n sys.exit(1)\n else:\n sys.exit(0)\n", "path": "admin/bootstrap.py"}], "after_files": [{"content": "# -*- mode: python; coding: utf-8 -*-\n#\n# Copyright (C) 2013-2018 Freedom of the Press Foundation & al\n# Copyright (C) 2018 Loic Dachary <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n\nimport argparse\nimport logging\nimport os\nimport shutil\nimport subprocess\nimport sys\n\nsdlog = logging.getLogger(__name__)\n\nDIR = os.path.dirname(os.path.realpath(__file__))\nVENV_DIR = os.path.join(DIR, \".venv3\")\n\n\ndef setup_logger(verbose=False):\n \"\"\" Configure logging handler \"\"\"\n # Set default level on parent\n sdlog.setLevel(logging.DEBUG)\n level = logging.DEBUG if verbose else logging.INFO\n\n stdout = logging.StreamHandler(sys.stdout)\n stdout.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))\n stdout.setLevel(level)\n sdlog.addHandler(stdout)\n\n\ndef run_command(command):\n \"\"\"\n Wrapper function to display stdout for running command,\n similar to how shelling out in a Bash script displays rolling output.\n\n Yields a list of the stdout from the `command`, and raises a\n CalledProcessError if `command` returns non-zero.\n \"\"\"\n popen = subprocess.Popen(command,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n for stdout_line in iter(popen.stdout.readline, b\"\"):\n yield stdout_line\n popen.stdout.close()\n return_code = popen.wait()\n if return_code:\n raise subprocess.CalledProcessError(return_code, command)\n\n\ndef is_tails():\n try:\n id = subprocess.check_output('lsb_release --id --short',\n shell=True).decode('utf-8').strip()\n except subprocess.CalledProcessError:\n id = None\n\n # dirty hack to unreliably detect Tails 4.0~beta2\n if id == 'Debian':\n if os.uname()[1] == 'amnesia':\n id = 'Tails'\n\n return id == 'Tails'\n\n\ndef clean_up_tails3_venv(virtualenv_dir=VENV_DIR):\n \"\"\"\n Tails 3.x, based on debian stretch uses libpython3.5, whereas Tails 4.x is\n based on Debian Buster and uses libpython3.7. This means that the Tails 3.x\n virtualenv will not work under Tails 4.x, and will need to be destroyed and\n rebuilt. We can detect if the version of libpython is 3.5 in the\n admin/.venv3/ folder, and delete it if that's the case. This will ensure a\n smooth upgrade from Tails 3.x to Tails 4.x.\n \"\"\"\n if is_tails():\n try:\n dist = subprocess.check_output('lsb_release --codename --short',\n shell=True).strip()\n except subprocess.CalledProcessError:\n dist = None\n\n # tails4 is based on buster\n if dist == b'buster':\n python_lib_path = os.path.join(virtualenv_dir, \"lib/python3.5\")\n if os.path.exists(os.path.join(python_lib_path)):\n sdlog.info(\n \"Tails 3 Python 3 virtualenv detected. \"\n \"Removing it.\"\n )\n shutil.rmtree(virtualenv_dir)\n sdlog.info(\"Tails 3 Python 3 virtualenv deleted.\")\n\n\ndef checkenv(args):\n clean_up_tails3_venv(VENV_DIR)\n if not os.path.exists(os.path.join(VENV_DIR, \"bin/activate\")):\n sdlog.error('Please run \"securedrop-admin setup\".')\n sys.exit(1)\n\n\ndef maybe_torify():\n if is_tails():\n return ['torify']\n else:\n return []\n\n\ndef install_apt_dependencies(args):\n \"\"\"\n Install apt dependencies in Tails. In order to install Ansible in\n a virtualenv, first there are a number of Python prerequisites.\n \"\"\"\n sdlog.info(\"Installing SecureDrop Admin dependencies\")\n sdlog.info((\"You'll be prompted for the temporary Tails admin password,\"\n \" which was set on Tails login screen\"))\n\n apt_command = ['sudo', 'su', '-c',\n \"apt-get update && \\\n apt-get -q -o=Dpkg::Use-Pty=0 install -y \\\n python3-virtualenv \\\n python3-yaml \\\n python3-pip \\\n ccontrol \\\n virtualenv \\\n libffi-dev \\\n libssl-dev \\\n libpython3-dev\",\n ]\n\n try:\n # Print command results in real-time, to keep Admin apprised\n # of progress during long-running command.\n for output_line in run_command(apt_command):\n print(output_line.decode('utf-8').rstrip())\n except subprocess.CalledProcessError:\n # Tails supports apt persistence, which was used by SecureDrop\n # under Tails 2.x. If updates are being applied, don't try to pile\n # on with more apt requests.\n sdlog.error((\"Failed to install apt dependencies. Check network\"\n \" connection and try again.\"))\n raise\n\n\ndef envsetup(args, virtualenv_dir=VENV_DIR):\n \"\"\"Installs Admin tooling required for managing SecureDrop. Specifically:\n\n * updates apt-cache\n * installs apt packages for Python virtualenv\n * creates virtualenv\n * installs pip packages inside virtualenv\n\n The virtualenv is created within the Persistence volume in Tails, so that\n Ansible is available to the Admin on subsequent boots without requiring\n installation of packages again.\n \"\"\"\n # clean up tails 3.x venv when migrating to tails 4.x\n clean_up_tails3_venv(virtualenv_dir)\n\n # virtualenv doesnt exist? Install dependencies and create\n if not os.path.exists(virtualenv_dir):\n\n install_apt_dependencies(args)\n\n # Technically you can create a virtualenv from within python\n # but pip can only be run over tor on tails, and debugging that\n # along with instaling a third-party dependency is not worth\n # the effort here.\n sdlog.info(\"Setting up virtualenv\")\n try:\n sdlog.debug(subprocess.check_output(\n maybe_torify() + ['virtualenv',\n '--python=python3',\n virtualenv_dir\n ],\n stderr=subprocess.STDOUT))\n except subprocess.CalledProcessError as e:\n sdlog.debug(e.output)\n sdlog.error((\"Unable to create virtualenv. Check network settings\"\n \" and try again.\"))\n sdlog.debug(\"Cleaning up virtualenv\")\n if os.path.exists(virtualenv_dir):\n shutil.rmtree(virtualenv_dir)\n raise\n else:\n sdlog.info(\"Virtualenv already exists, not creating\")\n\n install_pip_dependencies(args)\n if os.path.exists(os.path.join(DIR, 'setup.py')):\n install_pip_self(args)\n\n sdlog.info(\"Finished installing SecureDrop dependencies\")\n\n\ndef install_pip_self(args):\n pip_install_cmd = [\n os.path.join(VENV_DIR, 'bin', 'pip3'),\n 'install', '-e', DIR\n ]\n try:\n subprocess.check_output(maybe_torify() + pip_install_cmd,\n stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n sdlog.debug(e.output)\n sdlog.error(\"Unable to install self, run with -v for more information\")\n raise\n\n\ndef install_pip_dependencies(args, pip_install_cmd=[\n os.path.join(VENV_DIR, 'bin', 'pip3'),\n 'install',\n # Specify requirements file.\n '-r', os.path.join(DIR, 'requirements.txt'),\n '--require-hashes',\n # Make sure to upgrade packages only if necessary.\n '-U', '--upgrade-strategy', 'only-if-needed',\n]):\n \"\"\"\n Install Python dependencies via pip into virtualenv.\n \"\"\"\n\n sdlog.info(\"Checking Python dependencies for securedrop-admin\")\n try:\n pip_output = subprocess.check_output(maybe_torify() + pip_install_cmd,\n stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n sdlog.debug(e.output)\n sdlog.error((\"Failed to install pip dependencies. Check network\"\n \" connection and try again.\"))\n raise\n\n sdlog.debug(pip_output)\n if \"Successfully installed\" in str(pip_output):\n sdlog.info(\"Python dependencies for securedrop-admin upgraded\")\n else:\n sdlog.info(\"Python dependencies for securedrop-admin are up-to-date\")\n\n\ndef parse_argv(argv):\n parser = argparse.ArgumentParser()\n parser.add_argument('-v', action='store_true', default=False,\n help=\"Increase verbosity on output\")\n parser.set_defaults(func=envsetup)\n\n subparsers = parser.add_subparsers()\n\n envsetup_parser = subparsers.add_parser(\n 'envsetup',\n help='Set up the admin virtualenv.'\n )\n envsetup_parser.set_defaults(func=envsetup)\n\n checkenv_parser = subparsers.add_parser(\n 'checkenv',\n help='Check that the admin virtualenv is properly set up.'\n )\n checkenv_parser.set_defaults(func=checkenv)\n\n return parser.parse_args(argv)\n\n\nif __name__ == \"__main__\":\n args = parse_argv(sys.argv[1:])\n setup_logger(args.v)\n\n try:\n args.func(args)\n except Exception:\n sys.exit(1)\n else:\n sys.exit(0)\n", "path": "admin/bootstrap.py"}]} | 3,374 | 384 |
gh_patches_debug_6214 | rasdani/github-patches | git_diff | voicepaw__so-vits-svc-fork-1157 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unable to use svc pre-sd with pyannote.audio 3.1.1
### Describe the bug
To use svc pre-sd for a long audio file with multiple speakers, I followed the [setup guide](https://github.com/voicepaw/so-vits-svc-fork/#before-training) and manually installed pyannote.audio, getting the latest version, 3.1.1.
Attempting to run svc pre-sd triggered the following error messages:
```
Model was trained with pyannote.audio 0.0.1, yours is 3.1.1. Bad things might happen unless you revert pyannote.audio to 0.x.
Model was trained with torch 1.10.0+cu102, yours is 2.2.2+cu121. Bad things might happen unless you revert torch to 1.x.
```
According to [PyPI](https://pypi.org/project/pyannote.audio/3.1.1/), pyannote.audio 3.1.1 works with speaker-diarization-3.1. So, it’s necessary to explicitly specify this version in the code.
### To Reproduce
1. Set up the environment. (I'm using torch 2.2.2+cu121.)
2. Install so-vits-svc-fork and its dependencies.
3. Install pyannote.audio with `pip3 install pyannote-audio`.
4. Prepare your data and organize data folders.
5. Run svc pre-sd with options suited to your data.
### Additional context
_No response_
### Version
4.1.61
### Platform
WSL-Ubuntu 22.04 LTS
### Code of Conduct
- [X] I agree to follow this project's Code of Conduct.
### No Duplicate
- [X] I have checked existing issues to avoid duplicates.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py`
Content:
```
1 from __future__ import annotations
2
3 from collections import defaultdict
4 from logging import getLogger
5 from pathlib import Path
6
7 import librosa
8 import soundfile as sf
9 import torch
10 from joblib import Parallel, delayed
11 from pyannote.audio import Pipeline
12 from tqdm import tqdm
13 from tqdm_joblib import tqdm_joblib
14
15 LOG = getLogger(__name__)
16
17
18 def _process_one(
19 input_path: Path,
20 output_dir: Path,
21 sr: int,
22 *,
23 min_speakers: int = 1,
24 max_speakers: int = 1,
25 huggingface_token: str | None = None,
26 ) -> None:
27 try:
28 audio, sr = librosa.load(input_path, sr=sr, mono=True)
29 except Exception as e:
30 LOG.warning(f"Failed to read {input_path}: {e}")
31 return
32 pipeline = Pipeline.from_pretrained(
33 "pyannote/speaker-diarization", use_auth_token=huggingface_token
34 )
35 if pipeline is None:
36 raise ValueError("Failed to load pipeline")
37 pipeline = pipeline.to(torch.device("cuda"))
38 LOG.info(f"Processing {input_path}. This may take a while...")
39 diarization = pipeline(
40 input_path, min_speakers=min_speakers, max_speakers=max_speakers
41 )
42
43 LOG.info(f"Found {len(diarization)} tracks, writing to {output_dir}")
44 speaker_count = defaultdict(int)
45
46 output_dir.mkdir(parents=True, exist_ok=True)
47 for segment, track, speaker in tqdm(
48 list(diarization.itertracks(yield_label=True)), desc=f"Writing {input_path}"
49 ):
50 if segment.end - segment.start < 1:
51 continue
52 speaker_count[speaker] += 1
53 audio_cut = audio[int(segment.start * sr) : int(segment.end * sr)]
54 sf.write(
55 (output_dir / f"{speaker}_{speaker_count[speaker]}.wav"),
56 audio_cut,
57 sr,
58 )
59
60 LOG.info(f"Speaker count: {speaker_count}")
61
62
63 def preprocess_speaker_diarization(
64 input_dir: Path | str,
65 output_dir: Path | str,
66 sr: int,
67 *,
68 min_speakers: int = 1,
69 max_speakers: int = 1,
70 huggingface_token: str | None = None,
71 n_jobs: int = -1,
72 ) -> None:
73 if huggingface_token is not None and not huggingface_token.startswith("hf_"):
74 LOG.warning("Huggingface token probably should start with hf_")
75 if not torch.cuda.is_available():
76 LOG.warning("CUDA is not available. This will be extremely slow.")
77 input_dir = Path(input_dir)
78 output_dir = Path(output_dir)
79 input_dir.mkdir(parents=True, exist_ok=True)
80 output_dir.mkdir(parents=True, exist_ok=True)
81 input_paths = list(input_dir.rglob("*.*"))
82 with tqdm_joblib(desc="Preprocessing speaker diarization", total=len(input_paths)):
83 Parallel(n_jobs=n_jobs)(
84 delayed(_process_one)(
85 input_path,
86 output_dir / input_path.relative_to(input_dir).parent / input_path.stem,
87 sr,
88 max_speakers=max_speakers,
89 min_speakers=min_speakers,
90 huggingface_token=huggingface_token,
91 )
92 for input_path in input_paths
93 )
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py b/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py
--- a/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py
+++ b/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py
@@ -30,7 +30,7 @@
LOG.warning(f"Failed to read {input_path}: {e}")
return
pipeline = Pipeline.from_pretrained(
- "pyannote/speaker-diarization", use_auth_token=huggingface_token
+ "pyannote/speaker-diarization-3.1", use_auth_token=huggingface_token
)
if pipeline is None:
raise ValueError("Failed to load pipeline")
| {"golden_diff": "diff --git a/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py b/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py\n--- a/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py\n+++ b/src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py\n@@ -30,7 +30,7 @@\n LOG.warning(f\"Failed to read {input_path}: {e}\")\n return\n pipeline = Pipeline.from_pretrained(\n- \"pyannote/speaker-diarization\", use_auth_token=huggingface_token\n+ \"pyannote/speaker-diarization-3.1\", use_auth_token=huggingface_token\n )\n if pipeline is None:\n raise ValueError(\"Failed to load pipeline\")\n", "issue": "Unable to use svc pre-sd with pyannote.audio 3.1.1\n### Describe the bug\n\nTo use svc pre-sd for a long audio file with multiple speakers, I followed the [setup guide](https://github.com/voicepaw/so-vits-svc-fork/#before-training) and manually installed pyannote.audio, getting the latest version, 3.1.1.\r\n\r\nAttempting to run svc pre-sd triggered the following error messages:\r\n\r\n```\r\nModel was trained with pyannote.audio 0.0.1, yours is 3.1.1. Bad things might happen unless you revert pyannote.audio to 0.x.\r\n\r\nModel was trained with torch 1.10.0+cu102, yours is 2.2.2+cu121. Bad things might happen unless you revert torch to 1.x.\r\n```\r\n\r\nAccording to [PyPI](https://pypi.org/project/pyannote.audio/3.1.1/), pyannote.audio 3.1.1 works with speaker-diarization-3.1. So, it\u2019s necessary to explicitly specify this version in the code.\n\n### To Reproduce\n\n1. Set up the environment. (I'm using torch 2.2.2+cu121.)\r\n2. Install so-vits-svc-fork and its dependencies.\r\n3. Install pyannote.audio with `pip3 install pyannote-audio`.\r\n4. Prepare your data and organize data folders.\r\n5. Run svc pre-sd with options suited to your data.\n\n### Additional context\n\n_No response_\n\n### Version\n\n4.1.61\n\n### Platform\n\nWSL-Ubuntu 22.04 LTS\n\n### Code of Conduct\n\n- [X] I agree to follow this project's Code of Conduct.\n\n### No Duplicate\n\n- [X] I have checked existing issues to avoid duplicates.\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom collections import defaultdict\nfrom logging import getLogger\nfrom pathlib import Path\n\nimport librosa\nimport soundfile as sf\nimport torch\nfrom joblib import Parallel, delayed\nfrom pyannote.audio import Pipeline\nfrom tqdm import tqdm\nfrom tqdm_joblib import tqdm_joblib\n\nLOG = getLogger(__name__)\n\n\ndef _process_one(\n input_path: Path,\n output_dir: Path,\n sr: int,\n *,\n min_speakers: int = 1,\n max_speakers: int = 1,\n huggingface_token: str | None = None,\n) -> None:\n try:\n audio, sr = librosa.load(input_path, sr=sr, mono=True)\n except Exception as e:\n LOG.warning(f\"Failed to read {input_path}: {e}\")\n return\n pipeline = Pipeline.from_pretrained(\n \"pyannote/speaker-diarization\", use_auth_token=huggingface_token\n )\n if pipeline is None:\n raise ValueError(\"Failed to load pipeline\")\n pipeline = pipeline.to(torch.device(\"cuda\"))\n LOG.info(f\"Processing {input_path}. This may take a while...\")\n diarization = pipeline(\n input_path, min_speakers=min_speakers, max_speakers=max_speakers\n )\n\n LOG.info(f\"Found {len(diarization)} tracks, writing to {output_dir}\")\n speaker_count = defaultdict(int)\n\n output_dir.mkdir(parents=True, exist_ok=True)\n for segment, track, speaker in tqdm(\n list(diarization.itertracks(yield_label=True)), desc=f\"Writing {input_path}\"\n ):\n if segment.end - segment.start < 1:\n continue\n speaker_count[speaker] += 1\n audio_cut = audio[int(segment.start * sr) : int(segment.end * sr)]\n sf.write(\n (output_dir / f\"{speaker}_{speaker_count[speaker]}.wav\"),\n audio_cut,\n sr,\n )\n\n LOG.info(f\"Speaker count: {speaker_count}\")\n\n\ndef preprocess_speaker_diarization(\n input_dir: Path | str,\n output_dir: Path | str,\n sr: int,\n *,\n min_speakers: int = 1,\n max_speakers: int = 1,\n huggingface_token: str | None = None,\n n_jobs: int = -1,\n) -> None:\n if huggingface_token is not None and not huggingface_token.startswith(\"hf_\"):\n LOG.warning(\"Huggingface token probably should start with hf_\")\n if not torch.cuda.is_available():\n LOG.warning(\"CUDA is not available. This will be extremely slow.\")\n input_dir = Path(input_dir)\n output_dir = Path(output_dir)\n input_dir.mkdir(parents=True, exist_ok=True)\n output_dir.mkdir(parents=True, exist_ok=True)\n input_paths = list(input_dir.rglob(\"*.*\"))\n with tqdm_joblib(desc=\"Preprocessing speaker diarization\", total=len(input_paths)):\n Parallel(n_jobs=n_jobs)(\n delayed(_process_one)(\n input_path,\n output_dir / input_path.relative_to(input_dir).parent / input_path.stem,\n sr,\n max_speakers=max_speakers,\n min_speakers=min_speakers,\n huggingface_token=huggingface_token,\n )\n for input_path in input_paths\n )\n", "path": "src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom collections import defaultdict\nfrom logging import getLogger\nfrom pathlib import Path\n\nimport librosa\nimport soundfile as sf\nimport torch\nfrom joblib import Parallel, delayed\nfrom pyannote.audio import Pipeline\nfrom tqdm import tqdm\nfrom tqdm_joblib import tqdm_joblib\n\nLOG = getLogger(__name__)\n\n\ndef _process_one(\n input_path: Path,\n output_dir: Path,\n sr: int,\n *,\n min_speakers: int = 1,\n max_speakers: int = 1,\n huggingface_token: str | None = None,\n) -> None:\n try:\n audio, sr = librosa.load(input_path, sr=sr, mono=True)\n except Exception as e:\n LOG.warning(f\"Failed to read {input_path}: {e}\")\n return\n pipeline = Pipeline.from_pretrained(\n \"pyannote/speaker-diarization-3.1\", use_auth_token=huggingface_token\n )\n if pipeline is None:\n raise ValueError(\"Failed to load pipeline\")\n pipeline = pipeline.to(torch.device(\"cuda\"))\n LOG.info(f\"Processing {input_path}. This may take a while...\")\n diarization = pipeline(\n input_path, min_speakers=min_speakers, max_speakers=max_speakers\n )\n\n LOG.info(f\"Found {len(diarization)} tracks, writing to {output_dir}\")\n speaker_count = defaultdict(int)\n\n output_dir.mkdir(parents=True, exist_ok=True)\n for segment, track, speaker in tqdm(\n list(diarization.itertracks(yield_label=True)), desc=f\"Writing {input_path}\"\n ):\n if segment.end - segment.start < 1:\n continue\n speaker_count[speaker] += 1\n audio_cut = audio[int(segment.start * sr) : int(segment.end * sr)]\n sf.write(\n (output_dir / f\"{speaker}_{speaker_count[speaker]}.wav\"),\n audio_cut,\n sr,\n )\n\n LOG.info(f\"Speaker count: {speaker_count}\")\n\n\ndef preprocess_speaker_diarization(\n input_dir: Path | str,\n output_dir: Path | str,\n sr: int,\n *,\n min_speakers: int = 1,\n max_speakers: int = 1,\n huggingface_token: str | None = None,\n n_jobs: int = -1,\n) -> None:\n if huggingface_token is not None and not huggingface_token.startswith(\"hf_\"):\n LOG.warning(\"Huggingface token probably should start with hf_\")\n if not torch.cuda.is_available():\n LOG.warning(\"CUDA is not available. This will be extremely slow.\")\n input_dir = Path(input_dir)\n output_dir = Path(output_dir)\n input_dir.mkdir(parents=True, exist_ok=True)\n output_dir.mkdir(parents=True, exist_ok=True)\n input_paths = list(input_dir.rglob(\"*.*\"))\n with tqdm_joblib(desc=\"Preprocessing speaker diarization\", total=len(input_paths)):\n Parallel(n_jobs=n_jobs)(\n delayed(_process_one)(\n input_path,\n output_dir / input_path.relative_to(input_dir).parent / input_path.stem,\n sr,\n max_speakers=max_speakers,\n min_speakers=min_speakers,\n huggingface_token=huggingface_token,\n )\n for input_path in input_paths\n )\n", "path": "src/so_vits_svc_fork/preprocessing/preprocess_speaker_diarization.py"}]} | 1,585 | 183 |
gh_patches_debug_6467 | rasdani/github-patches | git_diff | getnikola__nikola-1145 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Never copy/list listings/*.py[co]
kinda related to invariance
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nikola/plugins/task/listings.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright © 2012-2014 Roberto Alsina and others.
4
5 # Permission is hereby granted, free of charge, to any
6 # person obtaining a copy of this software and associated
7 # documentation files (the "Software"), to deal in the
8 # Software without restriction, including without limitation
9 # the rights to use, copy, modify, merge, publish,
10 # distribute, sublicense, and/or sell copies of the
11 # Software, and to permit persons to whom the Software is
12 # furnished to do so, subject to the following conditions:
13 #
14 # The above copyright notice and this permission notice
15 # shall be included in all copies or substantial portions of
16 # the Software.
17 #
18 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
19 # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
20 # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
21 # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
22 # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24 # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26
27 from __future__ import unicode_literals, print_function
28
29 import os
30
31 from pygments import highlight
32 from pygments.lexers import get_lexer_for_filename, TextLexer
33 from pygments.formatters import HtmlFormatter
34
35 from nikola.plugin_categories import Task
36 from nikola import utils
37
38
39 class Listings(Task):
40 """Render pretty listings."""
41
42 name = "render_listings"
43
44 def set_site(self, site):
45 site.register_path_handler('listing', self.listing_path)
46 return super(Listings, self).set_site(site)
47
48 def gen_tasks(self):
49 """Render pretty code listings."""
50 kw = {
51 "default_lang": self.site.config["DEFAULT_LANG"],
52 "listings_folder": self.site.config["LISTINGS_FOLDER"],
53 "output_folder": self.site.config["OUTPUT_FOLDER"],
54 "index_file": self.site.config["INDEX_FILE"],
55 }
56
57 # Things to ignore in listings
58 ignored_extensions = (".pyc", ".pyo")
59
60 def render_listing(in_name, out_name, folders=[], files=[]):
61 if in_name:
62 with open(in_name, 'r') as fd:
63 try:
64 lexer = get_lexer_for_filename(in_name)
65 except:
66 lexer = TextLexer()
67 code = highlight(fd.read(), lexer,
68 HtmlFormatter(cssclass='code',
69 linenos="table", nowrap=False,
70 lineanchors=utils.slugify(in_name),
71 anchorlinenos=True))
72 title = os.path.basename(in_name)
73 else:
74 code = ''
75 title = ''
76 crumbs = utils.get_crumbs(os.path.relpath(out_name,
77 kw['output_folder']),
78 is_file=True)
79 context = {
80 'code': code,
81 'title': title,
82 'crumbs': crumbs,
83 'lang': kw['default_lang'],
84 'folders': folders,
85 'files': files,
86 'description': title,
87 }
88 self.site.render_template('listing.tmpl', out_name,
89 context)
90
91 yield self.group_task()
92
93 template_deps = self.site.template_system.template_deps('listing.tmpl')
94 for root, dirs, files in os.walk(kw['listings_folder']):
95 # Render all files
96 out_name = os.path.join(
97 kw['output_folder'],
98 root, kw['index_file']
99 )
100 yield {
101 'basename': self.name,
102 'name': out_name,
103 'file_dep': template_deps,
104 'targets': [out_name],
105 'actions': [(render_listing, [None, out_name, dirs, files])],
106 # This is necessary to reflect changes in blog title,
107 # sidebar links, etc.
108 'uptodate': [utils.config_changed(
109 self.site.GLOBAL_CONTEXT)],
110 'clean': True,
111 }
112 for f in files:
113 ext = os.path.splitext(f)[-1]
114 if ext in ignored_extensions:
115 continue
116 in_name = os.path.join(root, f)
117 out_name = os.path.join(
118 kw['output_folder'],
119 root,
120 f) + '.html'
121 yield {
122 'basename': self.name,
123 'name': out_name,
124 'file_dep': template_deps + [in_name],
125 'targets': [out_name],
126 'actions': [(render_listing, [in_name, out_name])],
127 # This is necessary to reflect changes in blog title,
128 # sidebar links, etc.
129 'uptodate': [utils.config_changed(
130 self.site.GLOBAL_CONTEXT)],
131 'clean': True,
132 }
133
134 def listing_path(self, name, lang):
135 return [_f for _f in [self.site.config['LISTINGS_FOLDER'], name +
136 '.html'] if _f]
137
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nikola/plugins/task/listings.py b/nikola/plugins/task/listings.py
--- a/nikola/plugins/task/listings.py
+++ b/nikola/plugins/task/listings.py
@@ -92,6 +92,7 @@
template_deps = self.site.template_system.template_deps('listing.tmpl')
for root, dirs, files in os.walk(kw['listings_folder']):
+ files = [f for f in files if os.path.splitext(f)[-1] not in ignored_extensions]
# Render all files
out_name = os.path.join(
kw['output_folder'],
| {"golden_diff": "diff --git a/nikola/plugins/task/listings.py b/nikola/plugins/task/listings.py\n--- a/nikola/plugins/task/listings.py\n+++ b/nikola/plugins/task/listings.py\n@@ -92,6 +92,7 @@\n \n template_deps = self.site.template_system.template_deps('listing.tmpl')\n for root, dirs, files in os.walk(kw['listings_folder']):\n+ files = [f for f in files if os.path.splitext(f)[-1] not in ignored_extensions]\n # Render all files\n out_name = os.path.join(\n kw['output_folder'],\n", "issue": "Never copy/list listings/*.py[co]\nkinda related to invariance\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2014 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nfrom __future__ import unicode_literals, print_function\n\nimport os\n\nfrom pygments import highlight\nfrom pygments.lexers import get_lexer_for_filename, TextLexer\nfrom pygments.formatters import HtmlFormatter\n\nfrom nikola.plugin_categories import Task\nfrom nikola import utils\n\n\nclass Listings(Task):\n \"\"\"Render pretty listings.\"\"\"\n\n name = \"render_listings\"\n\n def set_site(self, site):\n site.register_path_handler('listing', self.listing_path)\n return super(Listings, self).set_site(site)\n\n def gen_tasks(self):\n \"\"\"Render pretty code listings.\"\"\"\n kw = {\n \"default_lang\": self.site.config[\"DEFAULT_LANG\"],\n \"listings_folder\": self.site.config[\"LISTINGS_FOLDER\"],\n \"output_folder\": self.site.config[\"OUTPUT_FOLDER\"],\n \"index_file\": self.site.config[\"INDEX_FILE\"],\n }\n\n # Things to ignore in listings\n ignored_extensions = (\".pyc\", \".pyo\")\n\n def render_listing(in_name, out_name, folders=[], files=[]):\n if in_name:\n with open(in_name, 'r') as fd:\n try:\n lexer = get_lexer_for_filename(in_name)\n except:\n lexer = TextLexer()\n code = highlight(fd.read(), lexer,\n HtmlFormatter(cssclass='code',\n linenos=\"table\", nowrap=False,\n lineanchors=utils.slugify(in_name),\n anchorlinenos=True))\n title = os.path.basename(in_name)\n else:\n code = ''\n title = ''\n crumbs = utils.get_crumbs(os.path.relpath(out_name,\n kw['output_folder']),\n is_file=True)\n context = {\n 'code': code,\n 'title': title,\n 'crumbs': crumbs,\n 'lang': kw['default_lang'],\n 'folders': folders,\n 'files': files,\n 'description': title,\n }\n self.site.render_template('listing.tmpl', out_name,\n context)\n\n yield self.group_task()\n\n template_deps = self.site.template_system.template_deps('listing.tmpl')\n for root, dirs, files in os.walk(kw['listings_folder']):\n # Render all files\n out_name = os.path.join(\n kw['output_folder'],\n root, kw['index_file']\n )\n yield {\n 'basename': self.name,\n 'name': out_name,\n 'file_dep': template_deps,\n 'targets': [out_name],\n 'actions': [(render_listing, [None, out_name, dirs, files])],\n # This is necessary to reflect changes in blog title,\n # sidebar links, etc.\n 'uptodate': [utils.config_changed(\n self.site.GLOBAL_CONTEXT)],\n 'clean': True,\n }\n for f in files:\n ext = os.path.splitext(f)[-1]\n if ext in ignored_extensions:\n continue\n in_name = os.path.join(root, f)\n out_name = os.path.join(\n kw['output_folder'],\n root,\n f) + '.html'\n yield {\n 'basename': self.name,\n 'name': out_name,\n 'file_dep': template_deps + [in_name],\n 'targets': [out_name],\n 'actions': [(render_listing, [in_name, out_name])],\n # This is necessary to reflect changes in blog title,\n # sidebar links, etc.\n 'uptodate': [utils.config_changed(\n self.site.GLOBAL_CONTEXT)],\n 'clean': True,\n }\n\n def listing_path(self, name, lang):\n return [_f for _f in [self.site.config['LISTINGS_FOLDER'], name +\n '.html'] if _f]\n", "path": "nikola/plugins/task/listings.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright \u00a9 2012-2014 Roberto Alsina and others.\n\n# Permission is hereby granted, free of charge, to any\n# person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the\n# Software without restriction, including without limitation\n# the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the\n# Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice\n# shall be included in all copies or substantial portions of\n# the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY\n# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR\n# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS\n# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR\n# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nfrom __future__ import unicode_literals, print_function\n\nimport os\n\nfrom pygments import highlight\nfrom pygments.lexers import get_lexer_for_filename, TextLexer\nfrom pygments.formatters import HtmlFormatter\n\nfrom nikola.plugin_categories import Task\nfrom nikola import utils\n\n\nclass Listings(Task):\n \"\"\"Render pretty listings.\"\"\"\n\n name = \"render_listings\"\n\n def set_site(self, site):\n site.register_path_handler('listing', self.listing_path)\n return super(Listings, self).set_site(site)\n\n def gen_tasks(self):\n \"\"\"Render pretty code listings.\"\"\"\n kw = {\n \"default_lang\": self.site.config[\"DEFAULT_LANG\"],\n \"listings_folder\": self.site.config[\"LISTINGS_FOLDER\"],\n \"output_folder\": self.site.config[\"OUTPUT_FOLDER\"],\n \"index_file\": self.site.config[\"INDEX_FILE\"],\n }\n\n # Things to ignore in listings\n ignored_extensions = (\".pyc\", \".pyo\")\n\n def render_listing(in_name, out_name, folders=[], files=[]):\n if in_name:\n with open(in_name, 'r') as fd:\n try:\n lexer = get_lexer_for_filename(in_name)\n except:\n lexer = TextLexer()\n code = highlight(fd.read(), lexer,\n HtmlFormatter(cssclass='code',\n linenos=\"table\", nowrap=False,\n lineanchors=utils.slugify(in_name),\n anchorlinenos=True))\n title = os.path.basename(in_name)\n else:\n code = ''\n title = ''\n crumbs = utils.get_crumbs(os.path.relpath(out_name,\n kw['output_folder']),\n is_file=True)\n context = {\n 'code': code,\n 'title': title,\n 'crumbs': crumbs,\n 'lang': kw['default_lang'],\n 'folders': folders,\n 'files': files,\n 'description': title,\n }\n self.site.render_template('listing.tmpl', out_name,\n context)\n\n yield self.group_task()\n\n template_deps = self.site.template_system.template_deps('listing.tmpl')\n for root, dirs, files in os.walk(kw['listings_folder']):\n files = [f for f in files if os.path.splitext(f)[-1] not in ignored_extensions]\n # Render all files\n out_name = os.path.join(\n kw['output_folder'],\n root, kw['index_file']\n )\n yield {\n 'basename': self.name,\n 'name': out_name,\n 'file_dep': template_deps,\n 'targets': [out_name],\n 'actions': [(render_listing, [None, out_name, dirs, files])],\n # This is necessary to reflect changes in blog title,\n # sidebar links, etc.\n 'uptodate': [utils.config_changed(\n self.site.GLOBAL_CONTEXT)],\n 'clean': True,\n }\n for f in files:\n ext = os.path.splitext(f)[-1]\n if ext in ignored_extensions:\n continue\n in_name = os.path.join(root, f)\n out_name = os.path.join(\n kw['output_folder'],\n root,\n f) + '.html'\n yield {\n 'basename': self.name,\n 'name': out_name,\n 'file_dep': template_deps + [in_name],\n 'targets': [out_name],\n 'actions': [(render_listing, [in_name, out_name])],\n # This is necessary to reflect changes in blog title,\n # sidebar links, etc.\n 'uptodate': [utils.config_changed(\n self.site.GLOBAL_CONTEXT)],\n 'clean': True,\n }\n\n def listing_path(self, name, lang):\n return [_f for _f in [self.site.config['LISTINGS_FOLDER'], name +\n '.html'] if _f]\n", "path": "nikola/plugins/task/listings.py"}]} | 1,621 | 132 |
gh_patches_debug_20260 | rasdani/github-patches | git_diff | quantumlib__Cirq-5731 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Qasm output decomposes single qubit MatrixGate into multiple operations instead of one u3 operation
```
import cirq
c = cirq.Circuit(
cirq.MatrixGate(cirq.unitary(cirq.H) @ cirq.unitary(cirq.T)).on(cirq.LineQubit(0)),
)
print(c.to_qasm())
```
Outputs:
```
OPENQASM 2.0;
include "qelib1.inc";
// Qubits: [q(0)]
qreg q[1];
// Gate: [[ 0.707+0.j 0.5 +0.5j]
// [ 0.707+0.j -0.5 -0.5j]]
rz(pi*0.75) q[0];
sx q[0];
s q[0];
```
But should output:
```
// Generated from Cirq v0.15.0
OPENQASM 2.0;
include "qelib1.inc";
// Qubits: [q(0)]
qreg q[1];
u3(pi*1.5,pi*1.0,pi*0.25) q[0];
```
Which I got by using `cirq.PhasedXZGate.from_matrix(...)` and turning that into qasm.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cirq-core/cirq/ops/matrix_gates.py`
Content:
```
1 # Copyright 2018 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Quantum gates defined by a matrix."""
16
17 from typing import Any, cast, Dict, Iterable, Optional, Tuple, TYPE_CHECKING
18
19 import numpy as np
20
21 from cirq import linalg, protocols, _import
22 from cirq._compat import proper_repr
23 from cirq.ops import raw_types
24
25 if TYPE_CHECKING:
26 import cirq
27
28 single_qubit_decompositions = _import.LazyLoader(
29 'single_qubit_decompositions', globals(), 'cirq.transformers.analytical_decompositions'
30 )
31 two_qubit_to_cz = _import.LazyLoader(
32 'two_qubit_to_cz', globals(), 'cirq.transformers.analytical_decompositions'
33 )
34 three_qubit_decomposition = _import.LazyLoader(
35 'three_qubit_decomposition', globals(), 'cirq.transformers.analytical_decompositions'
36 )
37
38
39 class MatrixGate(raw_types.Gate):
40 r"""A unitary qubit or qudit gate defined entirely by its numpy matrix.
41
42 For example `cirq.MatrixGate(np.array([[0, 1j], [1, 0]]))` has the unitary matrix:
43
44 $$
45 \begin{bmatrix}
46 0 & i \\
47 1 & 0
48 \end{bmatrix}
49 $$
50 """
51
52 def __init__(
53 self,
54 matrix: np.ndarray,
55 *,
56 name: str = None,
57 qid_shape: Optional[Iterable[int]] = None,
58 unitary_check_rtol: float = 1e-5,
59 unitary_check_atol: float = 1e-8,
60 ) -> None:
61 """Initializes a matrix gate.
62
63 Args:
64 matrix: The matrix that defines the gate.
65 name: The optional name of the gate to be displayed.
66 qid_shape: The shape of state tensor that the matrix applies to.
67 If not specified, this value is inferred by assuming that the
68 matrix is supposed to apply to qubits.
69 unitary_check_rtol: The relative tolerance for checking whether the supplied matrix
70 is unitary. See `cirq.is_unitary`.
71 unitary_check_atol: The absolute tolerance for checking whether the supplied matrix
72 is unitary. See `cirq.is_unitary`.
73
74 Raises:
75 ValueError: If the matrix is not a square numpy array, if the matrix does not match
76 the `qid_shape`, if `qid_shape` is not supplied and the matrix dimension is
77 not a power of 2, or if the matrix not unitary (to the supplied precisions).
78 """
79 if len(matrix.shape) != 2 or matrix.shape[0] != matrix.shape[1]:
80 raise ValueError('`matrix` must be a square 2d numpy array.')
81
82 if qid_shape is None:
83 n = int(np.round(np.log2(matrix.shape[0] or 1)))
84 if 2**n != matrix.shape[0]:
85 raise ValueError(
86 f'Matrix width ({matrix.shape[0]}) is not a power of 2 and '
87 f'qid_shape is not specified.'
88 )
89 qid_shape = (2,) * n
90
91 self._matrix = matrix
92 self._qid_shape = tuple(qid_shape)
93 self._name = name
94 m = int(np.prod(self._qid_shape, dtype=np.int64))
95 if self._matrix.shape != (m, m):
96 raise ValueError(
97 'Wrong matrix shape for qid_shape.\n'
98 f'Matrix shape: {self._matrix.shape}\n'
99 f'qid_shape: {self._qid_shape}\n'
100 )
101
102 if not linalg.is_unitary(matrix, rtol=unitary_check_rtol, atol=unitary_check_atol):
103 raise ValueError(f'Not a unitary matrix: {self._matrix}')
104
105 def _json_dict_(self) -> Dict[str, Any]:
106 return {'matrix': self._matrix.tolist(), 'qid_shape': self._qid_shape}
107
108 @classmethod
109 def _from_json_dict_(cls, matrix, qid_shape, **kwargs):
110 return cls(matrix=np.array(matrix), qid_shape=qid_shape)
111
112 def _qid_shape_(self) -> Tuple[int, ...]:
113 return self._qid_shape
114
115 def __pow__(self, exponent: Any) -> 'MatrixGate':
116 if not isinstance(exponent, (int, float)):
117 return NotImplemented
118 e = cast(float, exponent)
119 new_mat = linalg.map_eigenvalues(self._matrix, lambda b: b**e)
120 return MatrixGate(new_mat, qid_shape=self._qid_shape)
121
122 def _phase_by_(self, phase_turns: float, qubit_index: int) -> 'MatrixGate':
123 if not isinstance(phase_turns, (int, float)):
124 return NotImplemented
125 if self._qid_shape[qubit_index] != 2:
126 return NotImplemented
127 result = np.copy(self._matrix).reshape(self._qid_shape * 2)
128
129 p = np.exp(2j * np.pi * phase_turns)
130 i = qubit_index
131 j = qubit_index + len(self._qid_shape)
132 result[linalg.slice_for_qubits_equal_to([i], 1)] *= p
133 result[linalg.slice_for_qubits_equal_to([j], 1)] *= np.conj(p)
134 return MatrixGate(matrix=result.reshape(self._matrix.shape), qid_shape=self._qid_shape)
135
136 def _decompose_(self, qubits: Tuple['cirq.Qid', ...]) -> 'cirq.OP_TREE':
137 if self._qid_shape == (2,):
138 return [
139 g.on(qubits[0])
140 for g in single_qubit_decompositions.single_qubit_matrix_to_gates(self._matrix)
141 ]
142 if self._qid_shape == (2,) * 2:
143 return two_qubit_to_cz.two_qubit_matrix_to_cz_operations(
144 *qubits, self._matrix, allow_partial_czs=True
145 )
146 if self._qid_shape == (2,) * 3:
147 return three_qubit_decomposition.three_qubit_matrix_to_operations(*qubits, self._matrix)
148 return NotImplemented
149
150 def _has_unitary_(self) -> bool:
151 return True
152
153 def _unitary_(self) -> np.ndarray:
154 return np.copy(self._matrix)
155
156 def _circuit_diagram_info_(
157 self, args: 'cirq.CircuitDiagramInfoArgs'
158 ) -> 'cirq.CircuitDiagramInfo':
159 n_qubits = len(self._qid_shape)
160 if self._name is not None:
161 symbols = (
162 [self._name] if n_qubits == 1 else [f'{self._name}[{i+1}]' for i in range(n_qubits)]
163 )
164 return protocols.CircuitDiagramInfo(wire_symbols=symbols)
165 main = _matrix_to_diagram_symbol(self._matrix, args)
166 rest = [f'#{i+1}' for i in range(1, n_qubits)]
167 return protocols.CircuitDiagramInfo(wire_symbols=[main, *rest])
168
169 def __hash__(self) -> int:
170 vals = tuple(v for _, v in np.ndenumerate(self._matrix))
171 return hash((MatrixGate, vals))
172
173 def _approx_eq_(self, other: Any, atol) -> bool:
174 if not isinstance(other, type(self)):
175 return NotImplemented
176 return np.allclose(self._matrix, other._matrix, rtol=0, atol=atol)
177
178 def __eq__(self, other):
179 if not isinstance(other, type(self)):
180 return NotImplemented
181 return self._qid_shape == other._qid_shape and np.array_equal(self._matrix, other._matrix)
182
183 def __ne__(self, other):
184 return not self == other
185
186 def __repr__(self) -> str:
187 if all(e == 2 for e in self._qid_shape):
188 return f'cirq.MatrixGate({proper_repr(self._matrix)})'
189 return f'cirq.MatrixGate({proper_repr(self._matrix)}, qid_shape={self._qid_shape})'
190
191 def __str__(self) -> str:
192 return str(self._matrix.round(3))
193
194
195 def _matrix_to_diagram_symbol(matrix: np.ndarray, args: 'protocols.CircuitDiagramInfoArgs') -> str:
196 if args.precision is not None:
197 matrix = matrix.round(args.precision)
198 result = str(matrix)
199 if args.use_unicode_characters:
200 lines = result.split('\n')
201 for i in range(len(lines)):
202 lines[i] = lines[i].replace('[[', '')
203 lines[i] = lines[i].replace(' [', '')
204 lines[i] = lines[i].replace(']', '')
205 w = max(len(line) for line in lines)
206 for i in range(len(lines)):
207 lines[i] = '│' + lines[i].ljust(w) + '│'
208 lines.insert(0, '┌' + ' ' * w + '┐')
209 lines.append('└' + ' ' * w + '┘')
210 result = '\n'.join(lines)
211 return result
212
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cirq-core/cirq/ops/matrix_gates.py b/cirq-core/cirq/ops/matrix_gates.py
--- a/cirq-core/cirq/ops/matrix_gates.py
+++ b/cirq-core/cirq/ops/matrix_gates.py
@@ -20,7 +20,7 @@
from cirq import linalg, protocols, _import
from cirq._compat import proper_repr
-from cirq.ops import raw_types
+from cirq.ops import raw_types, phased_x_z_gate
if TYPE_CHECKING:
import cirq
@@ -166,6 +166,14 @@
rest = [f'#{i+1}' for i in range(1, n_qubits)]
return protocols.CircuitDiagramInfo(wire_symbols=[main, *rest])
+ def _qasm_(self, args: 'cirq.QasmArgs', qubits: Tuple['cirq.Qid', ...]) -> Optional[str]:
+ args.validate_version('2.0')
+ if self._qid_shape == (2,):
+ return protocols.qasm(
+ phased_x_z_gate.PhasedXZGate.from_matrix(self._matrix), args=args, qubits=qubits
+ )
+ return None
+
def __hash__(self) -> int:
vals = tuple(v for _, v in np.ndenumerate(self._matrix))
return hash((MatrixGate, vals))
| {"golden_diff": "diff --git a/cirq-core/cirq/ops/matrix_gates.py b/cirq-core/cirq/ops/matrix_gates.py\n--- a/cirq-core/cirq/ops/matrix_gates.py\n+++ b/cirq-core/cirq/ops/matrix_gates.py\n@@ -20,7 +20,7 @@\n \n from cirq import linalg, protocols, _import\n from cirq._compat import proper_repr\n-from cirq.ops import raw_types\n+from cirq.ops import raw_types, phased_x_z_gate\n \n if TYPE_CHECKING:\n import cirq\n@@ -166,6 +166,14 @@\n rest = [f'#{i+1}' for i in range(1, n_qubits)]\n return protocols.CircuitDiagramInfo(wire_symbols=[main, *rest])\n \n+ def _qasm_(self, args: 'cirq.QasmArgs', qubits: Tuple['cirq.Qid', ...]) -> Optional[str]:\n+ args.validate_version('2.0')\n+ if self._qid_shape == (2,):\n+ return protocols.qasm(\n+ phased_x_z_gate.PhasedXZGate.from_matrix(self._matrix), args=args, qubits=qubits\n+ )\n+ return None\n+\n def __hash__(self) -> int:\n vals = tuple(v for _, v in np.ndenumerate(self._matrix))\n return hash((MatrixGate, vals))\n", "issue": "Qasm output decomposes single qubit MatrixGate into multiple operations instead of one u3 operation\n```\r\nimport cirq\r\n\r\nc = cirq.Circuit(\r\n cirq.MatrixGate(cirq.unitary(cirq.H) @ cirq.unitary(cirq.T)).on(cirq.LineQubit(0)),\r\n)\r\nprint(c.to_qasm())\r\n```\r\n\r\nOutputs:\r\n\r\n```\r\nOPENQASM 2.0;\r\ninclude \"qelib1.inc\";\r\n\r\n\r\n// Qubits: [q(0)]\r\nqreg q[1];\r\n\r\n\r\n// Gate: [[ 0.707+0.j 0.5 +0.5j]\r\n // [ 0.707+0.j -0.5 -0.5j]]\r\nrz(pi*0.75) q[0];\r\nsx q[0];\r\ns q[0];\r\n```\r\n\r\nBut should output:\r\n\r\n```\r\n// Generated from Cirq v0.15.0\r\n\r\nOPENQASM 2.0;\r\ninclude \"qelib1.inc\";\r\n\r\n\r\n// Qubits: [q(0)]\r\nqreg q[1];\r\n\r\n\r\nu3(pi*1.5,pi*1.0,pi*0.25) q[0];\r\n```\r\n\r\nWhich I got by using `cirq.PhasedXZGate.from_matrix(...)` and turning that into qasm.\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Quantum gates defined by a matrix.\"\"\"\n\nfrom typing import Any, cast, Dict, Iterable, Optional, Tuple, TYPE_CHECKING\n\nimport numpy as np\n\nfrom cirq import linalg, protocols, _import\nfrom cirq._compat import proper_repr\nfrom cirq.ops import raw_types\n\nif TYPE_CHECKING:\n import cirq\n\nsingle_qubit_decompositions = _import.LazyLoader(\n 'single_qubit_decompositions', globals(), 'cirq.transformers.analytical_decompositions'\n)\ntwo_qubit_to_cz = _import.LazyLoader(\n 'two_qubit_to_cz', globals(), 'cirq.transformers.analytical_decompositions'\n)\nthree_qubit_decomposition = _import.LazyLoader(\n 'three_qubit_decomposition', globals(), 'cirq.transformers.analytical_decompositions'\n)\n\n\nclass MatrixGate(raw_types.Gate):\n r\"\"\"A unitary qubit or qudit gate defined entirely by its numpy matrix.\n\n For example `cirq.MatrixGate(np.array([[0, 1j], [1, 0]]))` has the unitary matrix:\n\n $$\n \\begin{bmatrix}\n 0 & i \\\\\n 1 & 0\n \\end{bmatrix}\n $$\n \"\"\"\n\n def __init__(\n self,\n matrix: np.ndarray,\n *,\n name: str = None,\n qid_shape: Optional[Iterable[int]] = None,\n unitary_check_rtol: float = 1e-5,\n unitary_check_atol: float = 1e-8,\n ) -> None:\n \"\"\"Initializes a matrix gate.\n\n Args:\n matrix: The matrix that defines the gate.\n name: The optional name of the gate to be displayed.\n qid_shape: The shape of state tensor that the matrix applies to.\n If not specified, this value is inferred by assuming that the\n matrix is supposed to apply to qubits.\n unitary_check_rtol: The relative tolerance for checking whether the supplied matrix\n is unitary. See `cirq.is_unitary`.\n unitary_check_atol: The absolute tolerance for checking whether the supplied matrix\n is unitary. See `cirq.is_unitary`.\n\n Raises:\n ValueError: If the matrix is not a square numpy array, if the matrix does not match\n the `qid_shape`, if `qid_shape` is not supplied and the matrix dimension is\n not a power of 2, or if the matrix not unitary (to the supplied precisions).\n \"\"\"\n if len(matrix.shape) != 2 or matrix.shape[0] != matrix.shape[1]:\n raise ValueError('`matrix` must be a square 2d numpy array.')\n\n if qid_shape is None:\n n = int(np.round(np.log2(matrix.shape[0] or 1)))\n if 2**n != matrix.shape[0]:\n raise ValueError(\n f'Matrix width ({matrix.shape[0]}) is not a power of 2 and '\n f'qid_shape is not specified.'\n )\n qid_shape = (2,) * n\n\n self._matrix = matrix\n self._qid_shape = tuple(qid_shape)\n self._name = name\n m = int(np.prod(self._qid_shape, dtype=np.int64))\n if self._matrix.shape != (m, m):\n raise ValueError(\n 'Wrong matrix shape for qid_shape.\\n'\n f'Matrix shape: {self._matrix.shape}\\n'\n f'qid_shape: {self._qid_shape}\\n'\n )\n\n if not linalg.is_unitary(matrix, rtol=unitary_check_rtol, atol=unitary_check_atol):\n raise ValueError(f'Not a unitary matrix: {self._matrix}')\n\n def _json_dict_(self) -> Dict[str, Any]:\n return {'matrix': self._matrix.tolist(), 'qid_shape': self._qid_shape}\n\n @classmethod\n def _from_json_dict_(cls, matrix, qid_shape, **kwargs):\n return cls(matrix=np.array(matrix), qid_shape=qid_shape)\n\n def _qid_shape_(self) -> Tuple[int, ...]:\n return self._qid_shape\n\n def __pow__(self, exponent: Any) -> 'MatrixGate':\n if not isinstance(exponent, (int, float)):\n return NotImplemented\n e = cast(float, exponent)\n new_mat = linalg.map_eigenvalues(self._matrix, lambda b: b**e)\n return MatrixGate(new_mat, qid_shape=self._qid_shape)\n\n def _phase_by_(self, phase_turns: float, qubit_index: int) -> 'MatrixGate':\n if not isinstance(phase_turns, (int, float)):\n return NotImplemented\n if self._qid_shape[qubit_index] != 2:\n return NotImplemented\n result = np.copy(self._matrix).reshape(self._qid_shape * 2)\n\n p = np.exp(2j * np.pi * phase_turns)\n i = qubit_index\n j = qubit_index + len(self._qid_shape)\n result[linalg.slice_for_qubits_equal_to([i], 1)] *= p\n result[linalg.slice_for_qubits_equal_to([j], 1)] *= np.conj(p)\n return MatrixGate(matrix=result.reshape(self._matrix.shape), qid_shape=self._qid_shape)\n\n def _decompose_(self, qubits: Tuple['cirq.Qid', ...]) -> 'cirq.OP_TREE':\n if self._qid_shape == (2,):\n return [\n g.on(qubits[0])\n for g in single_qubit_decompositions.single_qubit_matrix_to_gates(self._matrix)\n ]\n if self._qid_shape == (2,) * 2:\n return two_qubit_to_cz.two_qubit_matrix_to_cz_operations(\n *qubits, self._matrix, allow_partial_czs=True\n )\n if self._qid_shape == (2,) * 3:\n return three_qubit_decomposition.three_qubit_matrix_to_operations(*qubits, self._matrix)\n return NotImplemented\n\n def _has_unitary_(self) -> bool:\n return True\n\n def _unitary_(self) -> np.ndarray:\n return np.copy(self._matrix)\n\n def _circuit_diagram_info_(\n self, args: 'cirq.CircuitDiagramInfoArgs'\n ) -> 'cirq.CircuitDiagramInfo':\n n_qubits = len(self._qid_shape)\n if self._name is not None:\n symbols = (\n [self._name] if n_qubits == 1 else [f'{self._name}[{i+1}]' for i in range(n_qubits)]\n )\n return protocols.CircuitDiagramInfo(wire_symbols=symbols)\n main = _matrix_to_diagram_symbol(self._matrix, args)\n rest = [f'#{i+1}' for i in range(1, n_qubits)]\n return protocols.CircuitDiagramInfo(wire_symbols=[main, *rest])\n\n def __hash__(self) -> int:\n vals = tuple(v for _, v in np.ndenumerate(self._matrix))\n return hash((MatrixGate, vals))\n\n def _approx_eq_(self, other: Any, atol) -> bool:\n if not isinstance(other, type(self)):\n return NotImplemented\n return np.allclose(self._matrix, other._matrix, rtol=0, atol=atol)\n\n def __eq__(self, other):\n if not isinstance(other, type(self)):\n return NotImplemented\n return self._qid_shape == other._qid_shape and np.array_equal(self._matrix, other._matrix)\n\n def __ne__(self, other):\n return not self == other\n\n def __repr__(self) -> str:\n if all(e == 2 for e in self._qid_shape):\n return f'cirq.MatrixGate({proper_repr(self._matrix)})'\n return f'cirq.MatrixGate({proper_repr(self._matrix)}, qid_shape={self._qid_shape})'\n\n def __str__(self) -> str:\n return str(self._matrix.round(3))\n\n\ndef _matrix_to_diagram_symbol(matrix: np.ndarray, args: 'protocols.CircuitDiagramInfoArgs') -> str:\n if args.precision is not None:\n matrix = matrix.round(args.precision)\n result = str(matrix)\n if args.use_unicode_characters:\n lines = result.split('\\n')\n for i in range(len(lines)):\n lines[i] = lines[i].replace('[[', '')\n lines[i] = lines[i].replace(' [', '')\n lines[i] = lines[i].replace(']', '')\n w = max(len(line) for line in lines)\n for i in range(len(lines)):\n lines[i] = '\u2502' + lines[i].ljust(w) + '\u2502'\n lines.insert(0, '\u250c' + ' ' * w + '\u2510')\n lines.append('\u2514' + ' ' * w + '\u2518')\n result = '\\n'.join(lines)\n return result\n", "path": "cirq-core/cirq/ops/matrix_gates.py"}], "after_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Quantum gates defined by a matrix.\"\"\"\n\nfrom typing import Any, cast, Dict, Iterable, Optional, Tuple, TYPE_CHECKING\n\nimport numpy as np\n\nfrom cirq import linalg, protocols, _import\nfrom cirq._compat import proper_repr\nfrom cirq.ops import raw_types, phased_x_z_gate\n\nif TYPE_CHECKING:\n import cirq\n\nsingle_qubit_decompositions = _import.LazyLoader(\n 'single_qubit_decompositions', globals(), 'cirq.transformers.analytical_decompositions'\n)\ntwo_qubit_to_cz = _import.LazyLoader(\n 'two_qubit_to_cz', globals(), 'cirq.transformers.analytical_decompositions'\n)\nthree_qubit_decomposition = _import.LazyLoader(\n 'three_qubit_decomposition', globals(), 'cirq.transformers.analytical_decompositions'\n)\n\n\nclass MatrixGate(raw_types.Gate):\n r\"\"\"A unitary qubit or qudit gate defined entirely by its numpy matrix.\n\n For example `cirq.MatrixGate(np.array([[0, 1j], [1, 0]]))` has the unitary matrix:\n\n $$\n \\begin{bmatrix}\n 0 & i \\\\\n 1 & 0\n \\end{bmatrix}\n $$\n \"\"\"\n\n def __init__(\n self,\n matrix: np.ndarray,\n *,\n name: str = None,\n qid_shape: Optional[Iterable[int]] = None,\n unitary_check_rtol: float = 1e-5,\n unitary_check_atol: float = 1e-8,\n ) -> None:\n \"\"\"Initializes a matrix gate.\n\n Args:\n matrix: The matrix that defines the gate.\n name: The optional name of the gate to be displayed.\n qid_shape: The shape of state tensor that the matrix applies to.\n If not specified, this value is inferred by assuming that the\n matrix is supposed to apply to qubits.\n unitary_check_rtol: The relative tolerance for checking whether the supplied matrix\n is unitary. See `cirq.is_unitary`.\n unitary_check_atol: The absolute tolerance for checking whether the supplied matrix\n is unitary. See `cirq.is_unitary`.\n\n Raises:\n ValueError: If the matrix is not a square numpy array, if the matrix does not match\n the `qid_shape`, if `qid_shape` is not supplied and the matrix dimension is\n not a power of 2, or if the matrix not unitary (to the supplied precisions).\n \"\"\"\n if len(matrix.shape) != 2 or matrix.shape[0] != matrix.shape[1]:\n raise ValueError('`matrix` must be a square 2d numpy array.')\n\n if qid_shape is None:\n n = int(np.round(np.log2(matrix.shape[0] or 1)))\n if 2**n != matrix.shape[0]:\n raise ValueError(\n f'Matrix width ({matrix.shape[0]}) is not a power of 2 and '\n f'qid_shape is not specified.'\n )\n qid_shape = (2,) * n\n\n self._matrix = matrix\n self._qid_shape = tuple(qid_shape)\n self._name = name\n m = int(np.prod(self._qid_shape, dtype=np.int64))\n if self._matrix.shape != (m, m):\n raise ValueError(\n 'Wrong matrix shape for qid_shape.\\n'\n f'Matrix shape: {self._matrix.shape}\\n'\n f'qid_shape: {self._qid_shape}\\n'\n )\n\n if not linalg.is_unitary(matrix, rtol=unitary_check_rtol, atol=unitary_check_atol):\n raise ValueError(f'Not a unitary matrix: {self._matrix}')\n\n def _json_dict_(self) -> Dict[str, Any]:\n return {'matrix': self._matrix.tolist(), 'qid_shape': self._qid_shape}\n\n @classmethod\n def _from_json_dict_(cls, matrix, qid_shape, **kwargs):\n return cls(matrix=np.array(matrix), qid_shape=qid_shape)\n\n def _qid_shape_(self) -> Tuple[int, ...]:\n return self._qid_shape\n\n def __pow__(self, exponent: Any) -> 'MatrixGate':\n if not isinstance(exponent, (int, float)):\n return NotImplemented\n e = cast(float, exponent)\n new_mat = linalg.map_eigenvalues(self._matrix, lambda b: b**e)\n return MatrixGate(new_mat, qid_shape=self._qid_shape)\n\n def _phase_by_(self, phase_turns: float, qubit_index: int) -> 'MatrixGate':\n if not isinstance(phase_turns, (int, float)):\n return NotImplemented\n if self._qid_shape[qubit_index] != 2:\n return NotImplemented\n result = np.copy(self._matrix).reshape(self._qid_shape * 2)\n\n p = np.exp(2j * np.pi * phase_turns)\n i = qubit_index\n j = qubit_index + len(self._qid_shape)\n result[linalg.slice_for_qubits_equal_to([i], 1)] *= p\n result[linalg.slice_for_qubits_equal_to([j], 1)] *= np.conj(p)\n return MatrixGate(matrix=result.reshape(self._matrix.shape), qid_shape=self._qid_shape)\n\n def _decompose_(self, qubits: Tuple['cirq.Qid', ...]) -> 'cirq.OP_TREE':\n if self._qid_shape == (2,):\n return [\n g.on(qubits[0])\n for g in single_qubit_decompositions.single_qubit_matrix_to_gates(self._matrix)\n ]\n if self._qid_shape == (2,) * 2:\n return two_qubit_to_cz.two_qubit_matrix_to_cz_operations(\n *qubits, self._matrix, allow_partial_czs=True\n )\n if self._qid_shape == (2,) * 3:\n return three_qubit_decomposition.three_qubit_matrix_to_operations(*qubits, self._matrix)\n return NotImplemented\n\n def _has_unitary_(self) -> bool:\n return True\n\n def _unitary_(self) -> np.ndarray:\n return np.copy(self._matrix)\n\n def _circuit_diagram_info_(\n self, args: 'cirq.CircuitDiagramInfoArgs'\n ) -> 'cirq.CircuitDiagramInfo':\n n_qubits = len(self._qid_shape)\n if self._name is not None:\n symbols = (\n [self._name] if n_qubits == 1 else [f'{self._name}[{i+1}]' for i in range(n_qubits)]\n )\n return protocols.CircuitDiagramInfo(wire_symbols=symbols)\n main = _matrix_to_diagram_symbol(self._matrix, args)\n rest = [f'#{i+1}' for i in range(1, n_qubits)]\n return protocols.CircuitDiagramInfo(wire_symbols=[main, *rest])\n\n def _qasm_(self, args: 'cirq.QasmArgs', qubits: Tuple['cirq.Qid', ...]) -> Optional[str]:\n args.validate_version('2.0')\n if self._qid_shape == (2,):\n return protocols.qasm(\n phased_x_z_gate.PhasedXZGate.from_matrix(self._matrix), args=args, qubits=qubits\n )\n return None\n\n def __hash__(self) -> int:\n vals = tuple(v for _, v in np.ndenumerate(self._matrix))\n return hash((MatrixGate, vals))\n\n def _approx_eq_(self, other: Any, atol) -> bool:\n if not isinstance(other, type(self)):\n return NotImplemented\n return np.allclose(self._matrix, other._matrix, rtol=0, atol=atol)\n\n def __eq__(self, other):\n if not isinstance(other, type(self)):\n return NotImplemented\n return self._qid_shape == other._qid_shape and np.array_equal(self._matrix, other._matrix)\n\n def __ne__(self, other):\n return not self == other\n\n def __repr__(self) -> str:\n if all(e == 2 for e in self._qid_shape):\n return f'cirq.MatrixGate({proper_repr(self._matrix)})'\n return f'cirq.MatrixGate({proper_repr(self._matrix)}, qid_shape={self._qid_shape})'\n\n def __str__(self) -> str:\n return str(self._matrix.round(3))\n\n\ndef _matrix_to_diagram_symbol(matrix: np.ndarray, args: 'protocols.CircuitDiagramInfoArgs') -> str:\n if args.precision is not None:\n matrix = matrix.round(args.precision)\n result = str(matrix)\n if args.use_unicode_characters:\n lines = result.split('\\n')\n for i in range(len(lines)):\n lines[i] = lines[i].replace('[[', '')\n lines[i] = lines[i].replace(' [', '')\n lines[i] = lines[i].replace(']', '')\n w = max(len(line) for line in lines)\n for i in range(len(lines)):\n lines[i] = '\u2502' + lines[i].ljust(w) + '\u2502'\n lines.insert(0, '\u250c' + ' ' * w + '\u2510')\n lines.append('\u2514' + ' ' * w + '\u2518')\n result = '\\n'.join(lines)\n return result\n", "path": "cirq-core/cirq/ops/matrix_gates.py"}]} | 3,184 | 312 |
gh_patches_debug_18123 | rasdani/github-patches | git_diff | svthalia__concrexit-1719 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Missing frontpage articles in API v2
### Describe the bug
When there are multiple frontpage articles that should be visible (such as currently on staging), `api/v2/announcements/frontpage-articles/` shows only one.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/announcements/models.py`
Content:
```
1 """The models defined by the announcement package."""
2 from django.core.validators import (
3 FileExtensionValidator,
4 get_available_image_extensions,
5 )
6 from django.db import models
7 from django.db.models import CharField, Manager, Q
8 from django.utils import timezone
9 from django.utils.translation import gettext_lazy as _
10 from tinymce.models import HTMLField
11
12
13 class VisibleObjectManager(Manager):
14 """Get all active members, i.e. who have a committee membership."""
15
16 def get_queryset(self):
17 """Select all visible items."""
18 return (
19 super()
20 .get_queryset()
21 .filter(
22 (Q(until__isnull=True) | Q(until__gt=timezone.now()))
23 & (Q(since__isnull=True) | Q(since__lte=timezone.now()))
24 & ~(Q(since__isnull=True) & Q(until__isnull=True))
25 )
26 )
27
28
29 class Announcement(models.Model):
30 """Describes an announcement."""
31
32 objects = models.Manager()
33 visible_objects = VisibleObjectManager()
34
35 content = HTMLField(
36 verbose_name=_("Content"),
37 help_text=_("The content of the announcement; what text to display."),
38 blank=False,
39 max_length=500,
40 )
41
42 since = models.DateTimeField(
43 verbose_name=_("Display since"),
44 help_text=_("Hide this announcement before this time."),
45 default=timezone.now,
46 )
47
48 until = models.DateTimeField(
49 verbose_name=_("Display until"),
50 help_text=_("Hide this announcement after this time."),
51 blank=True,
52 null=True,
53 )
54
55 icon = models.CharField(
56 verbose_name=_("Font Awesome icon"),
57 help_text=_("Font Awesome abbreviation for icon to use."),
58 max_length=150,
59 default="bullhorn",
60 )
61
62 closeable = models.BooleanField(default=True)
63
64 class Meta:
65 ordering = ("-since",)
66
67 def __str__(self):
68 return str(self.content)
69
70 @property
71 def is_visible(self):
72 """Is this announcement currently visible."""
73 return (
74 (self.until is None or self.until > timezone.now())
75 and (self.since is None or self.since <= timezone.now())
76 and not (self.since is None and self.until is None)
77 )
78
79
80 class FrontpageArticle(models.Model):
81 """Front page articles."""
82
83 objects = models.Manager()
84 visible_objects = VisibleObjectManager()
85
86 title = models.CharField(
87 verbose_name=_("Title"),
88 help_text=_("The title of the article; what goes in the header"),
89 blank=False,
90 max_length=80,
91 )
92
93 content = HTMLField(
94 verbose_name=_("Content"),
95 help_text=_("The content of the article; what text to display."),
96 blank=False,
97 max_length=5000,
98 )
99
100 since = models.DateTimeField(
101 verbose_name=_("Display since"),
102 help_text=_("Hide this article before this time."),
103 default=timezone.now,
104 )
105
106 until = models.DateTimeField(
107 verbose_name=_("Display until"),
108 help_text=_("Hide this article after this time."),
109 blank=True,
110 null=True,
111 )
112
113 class Meta:
114 ordering = ("-since",)
115
116 def __str__(self):
117 return str(self.title)
118
119 @property
120 def is_visible(self):
121 """Is this announcement currently visible."""
122 return (
123 (self.until is None or self.until > timezone.now())
124 and (self.since is None or self.since <= timezone.now())
125 and not (self.since is None and self.until is None)
126 )
127
128
129 def validate_image(value):
130 return FileExtensionValidator(
131 allowed_extensions=[*get_available_image_extensions(), "svg"]
132 )(value)
133
134
135 class Slide(models.Model):
136 """Describes an announcement."""
137
138 objects = models.Manager()
139 visible_objects = VisibleObjectManager()
140
141 title = CharField(
142 verbose_name=_("Title"),
143 help_text=_("The title of the slide; just for the admin."),
144 blank=False,
145 max_length=100,
146 )
147
148 content = models.FileField(
149 verbose_name=_("Content"),
150 help_text=_("The content of the slide; what image to display."),
151 blank=False,
152 upload_to="public/announcements/slides/",
153 validators=[validate_image],
154 )
155
156 since = models.DateTimeField(
157 verbose_name=_("Display since"),
158 help_text=_(
159 "Hide this slide before this time. When all date- and "
160 "time-fields are left blank, the slide won't "
161 "be visible. It will, however, be visible on an event-page "
162 "if it's linked to an event."
163 ),
164 default=timezone.now,
165 blank=True,
166 null=True,
167 )
168
169 until = models.DateTimeField(
170 verbose_name=_("Display until"),
171 help_text=_("Hide this slide after this time."),
172 blank=True,
173 null=True,
174 )
175
176 order = models.PositiveIntegerField(
177 verbose_name=_("Order"),
178 help_text=_("Approximately where this slide should appear in the order"),
179 default=0,
180 )
181
182 members_only = models.BooleanField(
183 verbose_name=_("Display only for authenticated members"), default=False
184 )
185
186 url = models.URLField(
187 verbose_name=_("Link"),
188 help_text=_("Place the user is taken to when clicking the slide"),
189 blank=True,
190 null=True,
191 )
192
193 url_blank = models.BooleanField(
194 verbose_name=_("Link outside thalia.nu"),
195 help_text=_("Clicking the slide will open a new tab"),
196 default=False,
197 )
198
199 class Meta:
200 ordering = ("-since",)
201
202 @property
203 def is_visible(self):
204 """Is this slide currently visible."""
205 return (
206 (self.until is None or self.until > timezone.now())
207 and (self.since is None or self.since <= timezone.now())
208 and not (self.since is None and self.until is None)
209 )
210
211 def __str__(self):
212 return str(self.title)
213
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/announcements/models.py b/website/announcements/models.py
--- a/website/announcements/models.py
+++ b/website/announcements/models.py
@@ -5,6 +5,7 @@
)
from django.db import models
from django.db.models import CharField, Manager, Q
+from django.db.models.functions import Now
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from tinymce.models import HTMLField
@@ -19,8 +20,8 @@
super()
.get_queryset()
.filter(
- (Q(until__isnull=True) | Q(until__gt=timezone.now()))
- & (Q(since__isnull=True) | Q(since__lte=timezone.now()))
+ (Q(until__isnull=True) | Q(until__gt=Now()))
+ & (Q(since__isnull=True) | Q(since__lte=Now()))
& ~(Q(since__isnull=True) & Q(until__isnull=True))
)
)
| {"golden_diff": "diff --git a/website/announcements/models.py b/website/announcements/models.py\n--- a/website/announcements/models.py\n+++ b/website/announcements/models.py\n@@ -5,6 +5,7 @@\n )\n from django.db import models\n from django.db.models import CharField, Manager, Q\n+from django.db.models.functions import Now\n from django.utils import timezone\n from django.utils.translation import gettext_lazy as _\n from tinymce.models import HTMLField\n@@ -19,8 +20,8 @@\n super()\n .get_queryset()\n .filter(\n- (Q(until__isnull=True) | Q(until__gt=timezone.now()))\n- & (Q(since__isnull=True) | Q(since__lte=timezone.now()))\n+ (Q(until__isnull=True) | Q(until__gt=Now()))\n+ & (Q(since__isnull=True) | Q(since__lte=Now()))\n & ~(Q(since__isnull=True) & Q(until__isnull=True))\n )\n )\n", "issue": "Missing frontpage articles in API v2\n### Describe the bug\r\nWhen there are multiple frontpage articles that should be visible (such as currently on staging), `api/v2/announcements/frontpage-articles/` shows only one.\r\n\n", "before_files": [{"content": "\"\"\"The models defined by the announcement package.\"\"\"\nfrom django.core.validators import (\n FileExtensionValidator,\n get_available_image_extensions,\n)\nfrom django.db import models\nfrom django.db.models import CharField, Manager, Q\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\nfrom tinymce.models import HTMLField\n\n\nclass VisibleObjectManager(Manager):\n \"\"\"Get all active members, i.e. who have a committee membership.\"\"\"\n\n def get_queryset(self):\n \"\"\"Select all visible items.\"\"\"\n return (\n super()\n .get_queryset()\n .filter(\n (Q(until__isnull=True) | Q(until__gt=timezone.now()))\n & (Q(since__isnull=True) | Q(since__lte=timezone.now()))\n & ~(Q(since__isnull=True) & Q(until__isnull=True))\n )\n )\n\n\nclass Announcement(models.Model):\n \"\"\"Describes an announcement.\"\"\"\n\n objects = models.Manager()\n visible_objects = VisibleObjectManager()\n\n content = HTMLField(\n verbose_name=_(\"Content\"),\n help_text=_(\"The content of the announcement; what text to display.\"),\n blank=False,\n max_length=500,\n )\n\n since = models.DateTimeField(\n verbose_name=_(\"Display since\"),\n help_text=_(\"Hide this announcement before this time.\"),\n default=timezone.now,\n )\n\n until = models.DateTimeField(\n verbose_name=_(\"Display until\"),\n help_text=_(\"Hide this announcement after this time.\"),\n blank=True,\n null=True,\n )\n\n icon = models.CharField(\n verbose_name=_(\"Font Awesome icon\"),\n help_text=_(\"Font Awesome abbreviation for icon to use.\"),\n max_length=150,\n default=\"bullhorn\",\n )\n\n closeable = models.BooleanField(default=True)\n\n class Meta:\n ordering = (\"-since\",)\n\n def __str__(self):\n return str(self.content)\n\n @property\n def is_visible(self):\n \"\"\"Is this announcement currently visible.\"\"\"\n return (\n (self.until is None or self.until > timezone.now())\n and (self.since is None or self.since <= timezone.now())\n and not (self.since is None and self.until is None)\n )\n\n\nclass FrontpageArticle(models.Model):\n \"\"\"Front page articles.\"\"\"\n\n objects = models.Manager()\n visible_objects = VisibleObjectManager()\n\n title = models.CharField(\n verbose_name=_(\"Title\"),\n help_text=_(\"The title of the article; what goes in the header\"),\n blank=False,\n max_length=80,\n )\n\n content = HTMLField(\n verbose_name=_(\"Content\"),\n help_text=_(\"The content of the article; what text to display.\"),\n blank=False,\n max_length=5000,\n )\n\n since = models.DateTimeField(\n verbose_name=_(\"Display since\"),\n help_text=_(\"Hide this article before this time.\"),\n default=timezone.now,\n )\n\n until = models.DateTimeField(\n verbose_name=_(\"Display until\"),\n help_text=_(\"Hide this article after this time.\"),\n blank=True,\n null=True,\n )\n\n class Meta:\n ordering = (\"-since\",)\n\n def __str__(self):\n return str(self.title)\n\n @property\n def is_visible(self):\n \"\"\"Is this announcement currently visible.\"\"\"\n return (\n (self.until is None or self.until > timezone.now())\n and (self.since is None or self.since <= timezone.now())\n and not (self.since is None and self.until is None)\n )\n\n\ndef validate_image(value):\n return FileExtensionValidator(\n allowed_extensions=[*get_available_image_extensions(), \"svg\"]\n )(value)\n\n\nclass Slide(models.Model):\n \"\"\"Describes an announcement.\"\"\"\n\n objects = models.Manager()\n visible_objects = VisibleObjectManager()\n\n title = CharField(\n verbose_name=_(\"Title\"),\n help_text=_(\"The title of the slide; just for the admin.\"),\n blank=False,\n max_length=100,\n )\n\n content = models.FileField(\n verbose_name=_(\"Content\"),\n help_text=_(\"The content of the slide; what image to display.\"),\n blank=False,\n upload_to=\"public/announcements/slides/\",\n validators=[validate_image],\n )\n\n since = models.DateTimeField(\n verbose_name=_(\"Display since\"),\n help_text=_(\n \"Hide this slide before this time. When all date- and \"\n \"time-fields are left blank, the slide won't \"\n \"be visible. It will, however, be visible on an event-page \"\n \"if it's linked to an event.\"\n ),\n default=timezone.now,\n blank=True,\n null=True,\n )\n\n until = models.DateTimeField(\n verbose_name=_(\"Display until\"),\n help_text=_(\"Hide this slide after this time.\"),\n blank=True,\n null=True,\n )\n\n order = models.PositiveIntegerField(\n verbose_name=_(\"Order\"),\n help_text=_(\"Approximately where this slide should appear in the order\"),\n default=0,\n )\n\n members_only = models.BooleanField(\n verbose_name=_(\"Display only for authenticated members\"), default=False\n )\n\n url = models.URLField(\n verbose_name=_(\"Link\"),\n help_text=_(\"Place the user is taken to when clicking the slide\"),\n blank=True,\n null=True,\n )\n\n url_blank = models.BooleanField(\n verbose_name=_(\"Link outside thalia.nu\"),\n help_text=_(\"Clicking the slide will open a new tab\"),\n default=False,\n )\n\n class Meta:\n ordering = (\"-since\",)\n\n @property\n def is_visible(self):\n \"\"\"Is this slide currently visible.\"\"\"\n return (\n (self.until is None or self.until > timezone.now())\n and (self.since is None or self.since <= timezone.now())\n and not (self.since is None and self.until is None)\n )\n\n def __str__(self):\n return str(self.title)\n", "path": "website/announcements/models.py"}], "after_files": [{"content": "\"\"\"The models defined by the announcement package.\"\"\"\nfrom django.core.validators import (\n FileExtensionValidator,\n get_available_image_extensions,\n)\nfrom django.db import models\nfrom django.db.models import CharField, Manager, Q\nfrom django.db.models.functions import Now\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\nfrom tinymce.models import HTMLField\n\n\nclass VisibleObjectManager(Manager):\n \"\"\"Get all active members, i.e. who have a committee membership.\"\"\"\n\n def get_queryset(self):\n \"\"\"Select all visible items.\"\"\"\n return (\n super()\n .get_queryset()\n .filter(\n (Q(until__isnull=True) | Q(until__gt=Now()))\n & (Q(since__isnull=True) | Q(since__lte=Now()))\n & ~(Q(since__isnull=True) & Q(until__isnull=True))\n )\n )\n\n\nclass Announcement(models.Model):\n \"\"\"Describes an announcement.\"\"\"\n\n objects = models.Manager()\n visible_objects = VisibleObjectManager()\n\n content = HTMLField(\n verbose_name=_(\"Content\"),\n help_text=_(\"The content of the announcement; what text to display.\"),\n blank=False,\n max_length=500,\n )\n\n since = models.DateTimeField(\n verbose_name=_(\"Display since\"),\n help_text=_(\"Hide this announcement before this time.\"),\n default=timezone.now,\n )\n\n until = models.DateTimeField(\n verbose_name=_(\"Display until\"),\n help_text=_(\"Hide this announcement after this time.\"),\n blank=True,\n null=True,\n )\n\n icon = models.CharField(\n verbose_name=_(\"Font Awesome icon\"),\n help_text=_(\"Font Awesome abbreviation for icon to use.\"),\n max_length=150,\n default=\"bullhorn\",\n )\n\n closeable = models.BooleanField(default=True)\n\n class Meta:\n ordering = (\"-since\",)\n\n def __str__(self):\n return str(self.content)\n\n @property\n def is_visible(self):\n \"\"\"Is this announcement currently visible.\"\"\"\n return (\n (self.until is None or self.until > timezone.now())\n and (self.since is None or self.since <= timezone.now())\n and not (self.since is None and self.until is None)\n )\n\n\nclass FrontpageArticle(models.Model):\n \"\"\"Front page articles.\"\"\"\n\n objects = models.Manager()\n visible_objects = VisibleObjectManager()\n\n title = models.CharField(\n verbose_name=_(\"Title\"),\n help_text=_(\"The title of the article; what goes in the header\"),\n blank=False,\n max_length=80,\n )\n\n content = HTMLField(\n verbose_name=_(\"Content\"),\n help_text=_(\"The content of the article; what text to display.\"),\n blank=False,\n max_length=5000,\n )\n\n since = models.DateTimeField(\n verbose_name=_(\"Display since\"),\n help_text=_(\"Hide this article before this time.\"),\n default=timezone.now,\n )\n\n until = models.DateTimeField(\n verbose_name=_(\"Display until\"),\n help_text=_(\"Hide this article after this time.\"),\n blank=True,\n null=True,\n )\n\n class Meta:\n ordering = (\"-since\",)\n\n def __str__(self):\n return str(self.title)\n\n @property\n def is_visible(self):\n \"\"\"Is this announcement currently visible.\"\"\"\n return (\n (self.until is None or self.until > timezone.now())\n and (self.since is None or self.since <= timezone.now())\n and not (self.since is None and self.until is None)\n )\n\n\ndef validate_image(value):\n return FileExtensionValidator(\n allowed_extensions=[*get_available_image_extensions(), \"svg\"]\n )(value)\n\n\nclass Slide(models.Model):\n \"\"\"Describes an announcement.\"\"\"\n\n objects = models.Manager()\n visible_objects = VisibleObjectManager()\n\n title = CharField(\n verbose_name=_(\"Title\"),\n help_text=_(\"The title of the slide; just for the admin.\"),\n blank=False,\n max_length=100,\n )\n\n content = models.FileField(\n verbose_name=_(\"Content\"),\n help_text=_(\"The content of the slide; what image to display.\"),\n blank=False,\n upload_to=\"public/announcements/slides/\",\n validators=[validate_image],\n )\n\n since = models.DateTimeField(\n verbose_name=_(\"Display since\"),\n help_text=_(\n \"Hide this slide before this time. When all date- and \"\n \"time-fields are left blank, the slide won't \"\n \"be visible. It will, however, be visible on an event-page \"\n \"if it's linked to an event.\"\n ),\n default=timezone.now,\n blank=True,\n null=True,\n )\n\n until = models.DateTimeField(\n verbose_name=_(\"Display until\"),\n help_text=_(\"Hide this slide after this time.\"),\n blank=True,\n null=True,\n )\n\n order = models.PositiveIntegerField(\n verbose_name=_(\"Order\"),\n help_text=_(\"Approximately where this slide should appear in the order\"),\n default=0,\n )\n\n members_only = models.BooleanField(\n verbose_name=_(\"Display only for authenticated members\"), default=False\n )\n\n url = models.URLField(\n verbose_name=_(\"Link\"),\n help_text=_(\"Place the user is taken to when clicking the slide\"),\n blank=True,\n null=True,\n )\n\n url_blank = models.BooleanField(\n verbose_name=_(\"Link outside thalia.nu\"),\n help_text=_(\"Clicking the slide will open a new tab\"),\n default=False,\n )\n\n class Meta:\n ordering = (\"-since\",)\n\n @property\n def is_visible(self):\n \"\"\"Is this slide currently visible.\"\"\"\n return (\n (self.until is None or self.until > timezone.now())\n and (self.since is None or self.since <= timezone.now())\n and not (self.since is None and self.until is None)\n )\n\n def __str__(self):\n return str(self.title)\n", "path": "website/announcements/models.py"}]} | 2,103 | 233 |
gh_patches_debug_21529 | rasdani/github-patches | git_diff | ephios-dev__ephios-172 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Age checking for signup consideres wrong date
When checking the participant age, the age at the current date is evaluated instead of the age at the event date
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ephios/event_management/signup.py`
Content:
```
1 import functools
2 import json
3 from argparse import Namespace
4 from dataclasses import dataclass, field
5 from datetime import date
6 from typing import List, Optional
7
8 import django.dispatch
9 from django import forms
10 from django.contrib import messages
11 from django.contrib.auth import get_user_model
12 from django.core.exceptions import ValidationError
13 from django.db import transaction
14 from django.db.models import QuerySet
15 from django.shortcuts import redirect
16 from django.template import Context, Template
17 from django.template.defaultfilters import yesno
18 from django.utils import formats, timezone
19 from django.utils.functional import cached_property
20 from django.utils.translation import gettext_lazy as _
21 from django.views import View
22
23 from ephios.event_management.models import AbstractParticipation, LocalParticipation, Shift
24 from ephios.extra.widgets import CustomSplitDateTimeWidget
25 from ephios.user_management.models import Qualification
26
27 register_signup_methods = django.dispatch.Signal(providing_args=[])
28
29
30 def all_signup_methods():
31 for _, methods in register_signup_methods.send(None):
32 yield from methods
33
34
35 def signup_method_from_slug(slug, shift=None):
36 for method in all_signup_methods():
37 if method.slug == slug:
38 return method(shift)
39 raise ValueError(_("Signup Method '{slug}' was not found.").format(slug=slug))
40
41
42 @dataclass(frozen=True)
43 class AbstractParticipant:
44 first_name: str
45 last_name: str
46 qualifications: QuerySet = field(hash=False)
47 date_of_birth: date
48 email: Optional[str] # if set to None, no notifications are sent
49
50 @property
51 def age(self):
52 today, born = date.today(), self.date_of_birth
53 return today.year - born.year - ((today.month, today.day) < (born.month, born.day))
54
55 def __str__(self):
56 return f"{self.first_name} {self.last_name}"
57
58 def new_participation(self, shift):
59 raise NotImplementedError
60
61 def participation_for(self, shift):
62 """Return the participation object for a shift. Return None if it does not exist."""
63 raise NotImplementedError
64
65 def collect_all_qualifications(self):
66 """We collect using breath first search with one query for every layer of inclusion."""
67 all_qualifications = set(self.qualifications)
68 current = self.qualifications
69 while current:
70 next = (
71 Qualification.objects.filter(included_by__in=current)
72 .exclude(id__in=(q.id for q in all_qualifications))
73 .distinct()
74 )
75 all_qualifications |= set(next)
76 current = next
77 return all_qualifications
78
79 def has_qualifications(self, qualifications):
80 return set(qualifications) <= self.collect_all_qualifications()
81
82
83 @dataclass(frozen=True)
84 class LocalUserParticipant(AbstractParticipant):
85 user: get_user_model()
86
87 def new_participation(self, shift):
88 return LocalParticipation(shift=shift, user=self.user)
89
90 def participation_for(self, shift):
91 try:
92 return LocalParticipation.objects.get(shift=shift, user=self.user)
93 except LocalParticipation.DoesNotExist:
94 return None
95
96
97 class ParticipationError(ValidationError):
98 pass
99
100
101 class ConfigurationForm(forms.Form):
102 pass
103
104
105 def check_event_is_active(method, participant):
106 if not method.shift.event.active:
107 return ParticipationError(_("The event is not active."))
108
109
110 def check_participation_state_for_signup(method, participant):
111 participation = participant.participation_for(method.shift)
112 if participation is not None:
113 if participation.state == AbstractParticipation.States.REQUESTED:
114 return ParticipationError(
115 _("You have already requested your participation for {shift}").format(
116 shift=method.shift
117 )
118 )
119 elif participation.state == AbstractParticipation.States.CONFIRMED:
120 return ParticipationError(
121 _("You are already signed up for {shift}.").format(shift=method.shift)
122 )
123 elif participation.state == AbstractParticipation.States.RESPONSIBLE_REJECTED:
124 return ParticipationError(
125 _("You are rejected from {shift}.").format(shift=method.shift)
126 )
127
128
129 def check_participation_state_for_decline(method, participant):
130 participation = participant.participation_for(method.shift)
131 if participation is not None:
132 if (
133 participation.state == AbstractParticipation.States.CONFIRMED
134 and not method.configuration.user_can_decline_confirmed
135 ):
136 return ParticipationError(
137 _("You are bindingly signed up for {shift}.").format(shift=method.shift)
138 )
139 elif participation.state == AbstractParticipation.States.RESPONSIBLE_REJECTED:
140 return ParticipationError(
141 _("You are rejected from {shift}.").format(shift=method.shift)
142 )
143 elif participation.state == AbstractParticipation.States.USER_DECLINED:
144 return ParticipationError(
145 _("You have already declined participating in {shift}.").format(shift=method.shift)
146 )
147
148
149 def check_inside_signup_timeframe(method, participant):
150 last_time = method.shift.end_time
151 if method.configuration.signup_until is not None:
152 last_time = min(last_time, method.configuration.signup_until)
153 if timezone.now() > last_time:
154 return ParticipationError(_("The signup period is over."))
155
156
157 def check_participant_age(method, participant):
158 minimum_age = method.configuration.minimum_age
159 if minimum_age is not None and participant.age < minimum_age:
160 return ParticipationError(
161 _("You are too young. The minimum age is {age}.").format(age=minimum_age)
162 )
163
164
165 class BaseSignupMethod:
166 slug = "abstract"
167 verbose_name = "abstract"
168 description = """"""
169
170 # use _ == gettext_lazy!
171 registration_button_text = _("Sign up")
172 signup_success_message = _("You have successfully signed up for {shift}.")
173 signup_error_message = _("Signing up failed: {error}")
174 decline_success_message = _("You have successfully declined {shift}.")
175 decline_error_message = _("Declining failed: {error}")
176
177 def __init__(self, shift):
178 self.shift = shift
179 self.configuration = Namespace(
180 **{name: config["default"] for name, config in self.get_configuration_fields().items()}
181 )
182 if shift is not None:
183 for key, value in shift.signup_configuration.items():
184 setattr(self.configuration, key, value)
185
186 @property
187 def signup_view_class(self):
188 return BaseSignupView
189
190 @cached_property
191 def signup_view(self):
192 return self.signup_view_class.as_view(method=self, shift=self.shift)
193
194 @property
195 def signup_checkers(self):
196 return [
197 check_event_is_active,
198 check_participation_state_for_signup,
199 check_inside_signup_timeframe,
200 check_participant_age,
201 ]
202
203 @property
204 def decline_checkers(self):
205 return [
206 check_event_is_active,
207 check_participation_state_for_decline,
208 check_inside_signup_timeframe,
209 ]
210
211 @functools.lru_cache()
212 def get_signup_errors(self, participant) -> List[ParticipationError]:
213 return [
214 error
215 for checker in self.signup_checkers
216 if (error := checker(self, participant)) is not None
217 ]
218
219 @functools.lru_cache()
220 def get_decline_errors(self, participant):
221 return [
222 error
223 for checker in self.decline_checkers
224 if (error := checker(self, participant)) is not None
225 ]
226
227 def can_decline(self, participant):
228 return not self.get_decline_errors(participant)
229
230 def can_sign_up(self, participant):
231 return not self.get_signup_errors(participant)
232
233 def get_participation_for(self, participant):
234 return participant.participation_for(self.shift) or participant.new_participation(
235 self.shift
236 )
237
238 def perform_signup(self, participant: AbstractParticipant, **kwargs):
239 """
240 Configure a participation object for the given participant according to the method's configuration.
241 `kwargs` may contain further instructions from a e.g. a form.
242 """
243 if errors := self.get_signup_errors(participant):
244 raise ParticipationError(errors)
245 return self.get_participation_for(participant)
246
247 def perform_decline(self, participant, **kwargs):
248 """Create and configure a declining participation object for the given participant. `kwargs` may contain further instructions from a e.g. a form."""
249 if errors := self.get_decline_errors(participant):
250 raise ParticipationError(errors)
251 participation = self.get_participation_for(participant)
252 participation.state = AbstractParticipation.States.USER_DECLINED
253 participation.save()
254 return participation
255
256 def get_configuration_fields(self):
257 return {
258 "minimum_age": {
259 "formfield": forms.IntegerField(required=False),
260 "default": 16,
261 "publish_with_label": _("Minimum age"),
262 },
263 "signup_until": {
264 "formfield": forms.SplitDateTimeField(
265 required=False, widget=CustomSplitDateTimeWidget
266 ),
267 "default": None,
268 "publish_with_label": _("Signup until"),
269 "format": functools.partial(formats.date_format, format="SHORT_DATETIME_FORMAT"),
270 },
271 "user_can_decline_confirmed": {
272 "formfield": forms.BooleanField(
273 label=_("Confirmed users can decline by themselves"),
274 required=False,
275 help_text=_("only if the signup timeframe has not ended"),
276 ),
277 "default": False,
278 "publish_with_label": _("Can decline after confirmation"),
279 "format": yesno,
280 },
281 }
282
283 def get_signup_info(self):
284 """
285 Return key/value pairs about the configuration to show in the shift info box.
286 """
287 fields = self.get_configuration_fields()
288 return {
289 label: field.get("format", str)(value)
290 for key, field in fields.items()
291 if (label := field.get("publish_with_label", False))
292 and (value := getattr(self.configuration, key))
293 }
294
295 def render_shift_state(self, request):
296 """
297 Render html that will be shown in the shift info box.
298 Use it to inform about the current state of the shift and participations.
299 """
300 return ""
301
302 def get_configuration_form(self, *args, **kwargs):
303 if self.shift is not None:
304 kwargs.setdefault("initial", self.configuration.__dict__)
305 form = ConfigurationForm(*args, **kwargs)
306 for name, config in self.get_configuration_fields().items():
307 form.fields[name] = config["formfield"]
308 return form
309
310 def render_configuration_form(self, form=None, *args, **kwargs):
311 form = form or self.get_configuration_form(*args, **kwargs)
312 template = Template(
313 template_string="{% load bootstrap4 %}{% bootstrap_form form %}"
314 ).render(Context({"form": form}))
315 return template
316
317
318 class BaseSignupView(View):
319 shift: Shift = ...
320 method: BaseSignupMethod = ...
321
322 def dispatch(self, request, *args, **kwargs):
323 if (choice := request.POST.get("signup_choice")) is not None:
324 if choice == "sign_up":
325 return self.signup_pressed(request, *args, **kwargs)
326 elif choice == "decline":
327 return self.decline_pressed(request, *args, **kwargs)
328 else:
329 raise ValueError(
330 _("'{choice}' is not a valid signup action.").format(choice=choice)
331 )
332 return super().dispatch(request, *args, **kwargs)
333
334 def signup_pressed(self, request, *args, **kwargs):
335 try:
336 with transaction.atomic():
337 self.method.perform_signup(request.user.as_participant())
338 messages.success(
339 request,
340 self.method.signup_success_message.format(shift=self.shift),
341 )
342 except ParticipationError as errors:
343 for error in errors:
344 messages.error(request, self.method.signup_error_message.format(error=error))
345 finally:
346 return redirect(self.shift.event.get_absolute_url())
347
348 def decline_pressed(self, request, *args, **kwargs):
349 try:
350 with transaction.atomic():
351 self.method.perform_decline(request.user.as_participant())
352 messages.info(request, self.method.decline_success_message.format(shift=self.shift))
353 except ParticipationError as errors:
354 for error in errors:
355 messages.error(request, self.method.decline_error_message.format(error=error))
356 finally:
357 return redirect(self.shift.event.get_absolute_url())
358
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ephios/event_management/signup.py b/ephios/event_management/signup.py
--- a/ephios/event_management/signup.py
+++ b/ephios/event_management/signup.py
@@ -47,9 +47,8 @@
date_of_birth: date
email: Optional[str] # if set to None, no notifications are sent
- @property
- def age(self):
- today, born = date.today(), self.date_of_birth
+ def get_age(self, today: date = None):
+ today, born = today or date.today(), self.date_of_birth
return today.year - born.year - ((today.month, today.day) < (born.month, born.day))
def __str__(self):
@@ -156,7 +155,8 @@
def check_participant_age(method, participant):
minimum_age = method.configuration.minimum_age
- if minimum_age is not None and participant.age < minimum_age:
+ day = method.shift.start_time.date()
+ if minimum_age is not None and participant.get_age(day) < minimum_age:
return ParticipationError(
_("You are too young. The minimum age is {age}.").format(age=minimum_age)
)
| {"golden_diff": "diff --git a/ephios/event_management/signup.py b/ephios/event_management/signup.py\n--- a/ephios/event_management/signup.py\n+++ b/ephios/event_management/signup.py\n@@ -47,9 +47,8 @@\n date_of_birth: date\n email: Optional[str] # if set to None, no notifications are sent\n \n- @property\n- def age(self):\n- today, born = date.today(), self.date_of_birth\n+ def get_age(self, today: date = None):\n+ today, born = today or date.today(), self.date_of_birth\n return today.year - born.year - ((today.month, today.day) < (born.month, born.day))\n \n def __str__(self):\n@@ -156,7 +155,8 @@\n \n def check_participant_age(method, participant):\n minimum_age = method.configuration.minimum_age\n- if minimum_age is not None and participant.age < minimum_age:\n+ day = method.shift.start_time.date()\n+ if minimum_age is not None and participant.get_age(day) < minimum_age:\n return ParticipationError(\n _(\"You are too young. The minimum age is {age}.\").format(age=minimum_age)\n )\n", "issue": "Age checking for signup consideres wrong date\nWhen checking the participant age, the age at the current date is evaluated instead of the age at the event date\n", "before_files": [{"content": "import functools\nimport json\nfrom argparse import Namespace\nfrom dataclasses import dataclass, field\nfrom datetime import date\nfrom typing import List, Optional\n\nimport django.dispatch\nfrom django import forms\nfrom django.contrib import messages\nfrom django.contrib.auth import get_user_model\nfrom django.core.exceptions import ValidationError\nfrom django.db import transaction\nfrom django.db.models import QuerySet\nfrom django.shortcuts import redirect\nfrom django.template import Context, Template\nfrom django.template.defaultfilters import yesno\nfrom django.utils import formats, timezone\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import View\n\nfrom ephios.event_management.models import AbstractParticipation, LocalParticipation, Shift\nfrom ephios.extra.widgets import CustomSplitDateTimeWidget\nfrom ephios.user_management.models import Qualification\n\nregister_signup_methods = django.dispatch.Signal(providing_args=[])\n\n\ndef all_signup_methods():\n for _, methods in register_signup_methods.send(None):\n yield from methods\n\n\ndef signup_method_from_slug(slug, shift=None):\n for method in all_signup_methods():\n if method.slug == slug:\n return method(shift)\n raise ValueError(_(\"Signup Method '{slug}' was not found.\").format(slug=slug))\n\n\n@dataclass(frozen=True)\nclass AbstractParticipant:\n first_name: str\n last_name: str\n qualifications: QuerySet = field(hash=False)\n date_of_birth: date\n email: Optional[str] # if set to None, no notifications are sent\n\n @property\n def age(self):\n today, born = date.today(), self.date_of_birth\n return today.year - born.year - ((today.month, today.day) < (born.month, born.day))\n\n def __str__(self):\n return f\"{self.first_name} {self.last_name}\"\n\n def new_participation(self, shift):\n raise NotImplementedError\n\n def participation_for(self, shift):\n \"\"\"Return the participation object for a shift. Return None if it does not exist.\"\"\"\n raise NotImplementedError\n\n def collect_all_qualifications(self):\n \"\"\"We collect using breath first search with one query for every layer of inclusion.\"\"\"\n all_qualifications = set(self.qualifications)\n current = self.qualifications\n while current:\n next = (\n Qualification.objects.filter(included_by__in=current)\n .exclude(id__in=(q.id for q in all_qualifications))\n .distinct()\n )\n all_qualifications |= set(next)\n current = next\n return all_qualifications\n\n def has_qualifications(self, qualifications):\n return set(qualifications) <= self.collect_all_qualifications()\n\n\n@dataclass(frozen=True)\nclass LocalUserParticipant(AbstractParticipant):\n user: get_user_model()\n\n def new_participation(self, shift):\n return LocalParticipation(shift=shift, user=self.user)\n\n def participation_for(self, shift):\n try:\n return LocalParticipation.objects.get(shift=shift, user=self.user)\n except LocalParticipation.DoesNotExist:\n return None\n\n\nclass ParticipationError(ValidationError):\n pass\n\n\nclass ConfigurationForm(forms.Form):\n pass\n\n\ndef check_event_is_active(method, participant):\n if not method.shift.event.active:\n return ParticipationError(_(\"The event is not active.\"))\n\n\ndef check_participation_state_for_signup(method, participant):\n participation = participant.participation_for(method.shift)\n if participation is not None:\n if participation.state == AbstractParticipation.States.REQUESTED:\n return ParticipationError(\n _(\"You have already requested your participation for {shift}\").format(\n shift=method.shift\n )\n )\n elif participation.state == AbstractParticipation.States.CONFIRMED:\n return ParticipationError(\n _(\"You are already signed up for {shift}.\").format(shift=method.shift)\n )\n elif participation.state == AbstractParticipation.States.RESPONSIBLE_REJECTED:\n return ParticipationError(\n _(\"You are rejected from {shift}.\").format(shift=method.shift)\n )\n\n\ndef check_participation_state_for_decline(method, participant):\n participation = participant.participation_for(method.shift)\n if participation is not None:\n if (\n participation.state == AbstractParticipation.States.CONFIRMED\n and not method.configuration.user_can_decline_confirmed\n ):\n return ParticipationError(\n _(\"You are bindingly signed up for {shift}.\").format(shift=method.shift)\n )\n elif participation.state == AbstractParticipation.States.RESPONSIBLE_REJECTED:\n return ParticipationError(\n _(\"You are rejected from {shift}.\").format(shift=method.shift)\n )\n elif participation.state == AbstractParticipation.States.USER_DECLINED:\n return ParticipationError(\n _(\"You have already declined participating in {shift}.\").format(shift=method.shift)\n )\n\n\ndef check_inside_signup_timeframe(method, participant):\n last_time = method.shift.end_time\n if method.configuration.signup_until is not None:\n last_time = min(last_time, method.configuration.signup_until)\n if timezone.now() > last_time:\n return ParticipationError(_(\"The signup period is over.\"))\n\n\ndef check_participant_age(method, participant):\n minimum_age = method.configuration.minimum_age\n if minimum_age is not None and participant.age < minimum_age:\n return ParticipationError(\n _(\"You are too young. The minimum age is {age}.\").format(age=minimum_age)\n )\n\n\nclass BaseSignupMethod:\n slug = \"abstract\"\n verbose_name = \"abstract\"\n description = \"\"\"\"\"\"\n\n # use _ == gettext_lazy!\n registration_button_text = _(\"Sign up\")\n signup_success_message = _(\"You have successfully signed up for {shift}.\")\n signup_error_message = _(\"Signing up failed: {error}\")\n decline_success_message = _(\"You have successfully declined {shift}.\")\n decline_error_message = _(\"Declining failed: {error}\")\n\n def __init__(self, shift):\n self.shift = shift\n self.configuration = Namespace(\n **{name: config[\"default\"] for name, config in self.get_configuration_fields().items()}\n )\n if shift is not None:\n for key, value in shift.signup_configuration.items():\n setattr(self.configuration, key, value)\n\n @property\n def signup_view_class(self):\n return BaseSignupView\n\n @cached_property\n def signup_view(self):\n return self.signup_view_class.as_view(method=self, shift=self.shift)\n\n @property\n def signup_checkers(self):\n return [\n check_event_is_active,\n check_participation_state_for_signup,\n check_inside_signup_timeframe,\n check_participant_age,\n ]\n\n @property\n def decline_checkers(self):\n return [\n check_event_is_active,\n check_participation_state_for_decline,\n check_inside_signup_timeframe,\n ]\n\n @functools.lru_cache()\n def get_signup_errors(self, participant) -> List[ParticipationError]:\n return [\n error\n for checker in self.signup_checkers\n if (error := checker(self, participant)) is not None\n ]\n\n @functools.lru_cache()\n def get_decline_errors(self, participant):\n return [\n error\n for checker in self.decline_checkers\n if (error := checker(self, participant)) is not None\n ]\n\n def can_decline(self, participant):\n return not self.get_decline_errors(participant)\n\n def can_sign_up(self, participant):\n return not self.get_signup_errors(participant)\n\n def get_participation_for(self, participant):\n return participant.participation_for(self.shift) or participant.new_participation(\n self.shift\n )\n\n def perform_signup(self, participant: AbstractParticipant, **kwargs):\n \"\"\"\n Configure a participation object for the given participant according to the method's configuration.\n `kwargs` may contain further instructions from a e.g. a form.\n \"\"\"\n if errors := self.get_signup_errors(participant):\n raise ParticipationError(errors)\n return self.get_participation_for(participant)\n\n def perform_decline(self, participant, **kwargs):\n \"\"\"Create and configure a declining participation object for the given participant. `kwargs` may contain further instructions from a e.g. a form.\"\"\"\n if errors := self.get_decline_errors(participant):\n raise ParticipationError(errors)\n participation = self.get_participation_for(participant)\n participation.state = AbstractParticipation.States.USER_DECLINED\n participation.save()\n return participation\n\n def get_configuration_fields(self):\n return {\n \"minimum_age\": {\n \"formfield\": forms.IntegerField(required=False),\n \"default\": 16,\n \"publish_with_label\": _(\"Minimum age\"),\n },\n \"signup_until\": {\n \"formfield\": forms.SplitDateTimeField(\n required=False, widget=CustomSplitDateTimeWidget\n ),\n \"default\": None,\n \"publish_with_label\": _(\"Signup until\"),\n \"format\": functools.partial(formats.date_format, format=\"SHORT_DATETIME_FORMAT\"),\n },\n \"user_can_decline_confirmed\": {\n \"formfield\": forms.BooleanField(\n label=_(\"Confirmed users can decline by themselves\"),\n required=False,\n help_text=_(\"only if the signup timeframe has not ended\"),\n ),\n \"default\": False,\n \"publish_with_label\": _(\"Can decline after confirmation\"),\n \"format\": yesno,\n },\n }\n\n def get_signup_info(self):\n \"\"\"\n Return key/value pairs about the configuration to show in the shift info box.\n \"\"\"\n fields = self.get_configuration_fields()\n return {\n label: field.get(\"format\", str)(value)\n for key, field in fields.items()\n if (label := field.get(\"publish_with_label\", False))\n and (value := getattr(self.configuration, key))\n }\n\n def render_shift_state(self, request):\n \"\"\"\n Render html that will be shown in the shift info box.\n Use it to inform about the current state of the shift and participations.\n \"\"\"\n return \"\"\n\n def get_configuration_form(self, *args, **kwargs):\n if self.shift is not None:\n kwargs.setdefault(\"initial\", self.configuration.__dict__)\n form = ConfigurationForm(*args, **kwargs)\n for name, config in self.get_configuration_fields().items():\n form.fields[name] = config[\"formfield\"]\n return form\n\n def render_configuration_form(self, form=None, *args, **kwargs):\n form = form or self.get_configuration_form(*args, **kwargs)\n template = Template(\n template_string=\"{% load bootstrap4 %}{% bootstrap_form form %}\"\n ).render(Context({\"form\": form}))\n return template\n\n\nclass BaseSignupView(View):\n shift: Shift = ...\n method: BaseSignupMethod = ...\n\n def dispatch(self, request, *args, **kwargs):\n if (choice := request.POST.get(\"signup_choice\")) is not None:\n if choice == \"sign_up\":\n return self.signup_pressed(request, *args, **kwargs)\n elif choice == \"decline\":\n return self.decline_pressed(request, *args, **kwargs)\n else:\n raise ValueError(\n _(\"'{choice}' is not a valid signup action.\").format(choice=choice)\n )\n return super().dispatch(request, *args, **kwargs)\n\n def signup_pressed(self, request, *args, **kwargs):\n try:\n with transaction.atomic():\n self.method.perform_signup(request.user.as_participant())\n messages.success(\n request,\n self.method.signup_success_message.format(shift=self.shift),\n )\n except ParticipationError as errors:\n for error in errors:\n messages.error(request, self.method.signup_error_message.format(error=error))\n finally:\n return redirect(self.shift.event.get_absolute_url())\n\n def decline_pressed(self, request, *args, **kwargs):\n try:\n with transaction.atomic():\n self.method.perform_decline(request.user.as_participant())\n messages.info(request, self.method.decline_success_message.format(shift=self.shift))\n except ParticipationError as errors:\n for error in errors:\n messages.error(request, self.method.decline_error_message.format(error=error))\n finally:\n return redirect(self.shift.event.get_absolute_url())\n", "path": "ephios/event_management/signup.py"}], "after_files": [{"content": "import functools\nimport json\nfrom argparse import Namespace\nfrom dataclasses import dataclass, field\nfrom datetime import date\nfrom typing import List, Optional\n\nimport django.dispatch\nfrom django import forms\nfrom django.contrib import messages\nfrom django.contrib.auth import get_user_model\nfrom django.core.exceptions import ValidationError\nfrom django.db import transaction\nfrom django.db.models import QuerySet\nfrom django.shortcuts import redirect\nfrom django.template import Context, Template\nfrom django.template.defaultfilters import yesno\nfrom django.utils import formats, timezone\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import View\n\nfrom ephios.event_management.models import AbstractParticipation, LocalParticipation, Shift\nfrom ephios.extra.widgets import CustomSplitDateTimeWidget\nfrom ephios.user_management.models import Qualification\n\nregister_signup_methods = django.dispatch.Signal(providing_args=[])\n\n\ndef all_signup_methods():\n for _, methods in register_signup_methods.send(None):\n yield from methods\n\n\ndef signup_method_from_slug(slug, shift=None):\n for method in all_signup_methods():\n if method.slug == slug:\n return method(shift)\n raise ValueError(_(\"Signup Method '{slug}' was not found.\").format(slug=slug))\n\n\n@dataclass(frozen=True)\nclass AbstractParticipant:\n first_name: str\n last_name: str\n qualifications: QuerySet = field(hash=False)\n date_of_birth: date\n email: Optional[str] # if set to None, no notifications are sent\n\n def get_age(self, today: date = None):\n today, born = today or date.today(), self.date_of_birth\n return today.year - born.year - ((today.month, today.day) < (born.month, born.day))\n\n def __str__(self):\n return f\"{self.first_name} {self.last_name}\"\n\n def new_participation(self, shift):\n raise NotImplementedError\n\n def participation_for(self, shift):\n \"\"\"Return the participation object for a shift. Return None if it does not exist.\"\"\"\n raise NotImplementedError\n\n def collect_all_qualifications(self):\n \"\"\"We collect using breath first search with one query for every layer of inclusion.\"\"\"\n all_qualifications = set(self.qualifications)\n current = self.qualifications\n while current:\n next = (\n Qualification.objects.filter(included_by__in=current)\n .exclude(id__in=(q.id for q in all_qualifications))\n .distinct()\n )\n all_qualifications |= set(next)\n current = next\n return all_qualifications\n\n def has_qualifications(self, qualifications):\n return set(qualifications) <= self.collect_all_qualifications()\n\n\n@dataclass(frozen=True)\nclass LocalUserParticipant(AbstractParticipant):\n user: get_user_model()\n\n def new_participation(self, shift):\n return LocalParticipation(shift=shift, user=self.user)\n\n def participation_for(self, shift):\n try:\n return LocalParticipation.objects.get(shift=shift, user=self.user)\n except LocalParticipation.DoesNotExist:\n return None\n\n\nclass ParticipationError(ValidationError):\n pass\n\n\nclass ConfigurationForm(forms.Form):\n pass\n\n\ndef check_event_is_active(method, participant):\n if not method.shift.event.active:\n return ParticipationError(_(\"The event is not active.\"))\n\n\ndef check_participation_state_for_signup(method, participant):\n participation = participant.participation_for(method.shift)\n if participation is not None:\n if participation.state == AbstractParticipation.States.REQUESTED:\n return ParticipationError(\n _(\"You have already requested your participation for {shift}\").format(\n shift=method.shift\n )\n )\n elif participation.state == AbstractParticipation.States.CONFIRMED:\n return ParticipationError(\n _(\"You are already signed up for {shift}.\").format(shift=method.shift)\n )\n elif participation.state == AbstractParticipation.States.RESPONSIBLE_REJECTED:\n return ParticipationError(\n _(\"You are rejected from {shift}.\").format(shift=method.shift)\n )\n\n\ndef check_participation_state_for_decline(method, participant):\n participation = participant.participation_for(method.shift)\n if participation is not None:\n if (\n participation.state == AbstractParticipation.States.CONFIRMED\n and not method.configuration.user_can_decline_confirmed\n ):\n return ParticipationError(\n _(\"You are bindingly signed up for {shift}.\").format(shift=method.shift)\n )\n elif participation.state == AbstractParticipation.States.RESPONSIBLE_REJECTED:\n return ParticipationError(\n _(\"You are rejected from {shift}.\").format(shift=method.shift)\n )\n elif participation.state == AbstractParticipation.States.USER_DECLINED:\n return ParticipationError(\n _(\"You have already declined participating in {shift}.\").format(shift=method.shift)\n )\n\n\ndef check_inside_signup_timeframe(method, participant):\n last_time = method.shift.end_time\n if method.configuration.signup_until is not None:\n last_time = min(last_time, method.configuration.signup_until)\n if timezone.now() > last_time:\n return ParticipationError(_(\"The signup period is over.\"))\n\n\ndef check_participant_age(method, participant):\n minimum_age = method.configuration.minimum_age\n day = method.shift.start_time.date()\n if minimum_age is not None and participant.get_age(day) < minimum_age:\n return ParticipationError(\n _(\"You are too young. The minimum age is {age}.\").format(age=minimum_age)\n )\n\n\nclass BaseSignupMethod:\n slug = \"abstract\"\n verbose_name = \"abstract\"\n description = \"\"\"\"\"\"\n\n # use _ == gettext_lazy!\n registration_button_text = _(\"Sign up\")\n signup_success_message = _(\"You have successfully signed up for {shift}.\")\n signup_error_message = _(\"Signing up failed: {error}\")\n decline_success_message = _(\"You have successfully declined {shift}.\")\n decline_error_message = _(\"Declining failed: {error}\")\n\n def __init__(self, shift):\n self.shift = shift\n self.configuration = Namespace(\n **{name: config[\"default\"] for name, config in self.get_configuration_fields().items()}\n )\n if shift is not None:\n for key, value in shift.signup_configuration.items():\n setattr(self.configuration, key, value)\n\n @property\n def signup_view_class(self):\n return BaseSignupView\n\n @cached_property\n def signup_view(self):\n return self.signup_view_class.as_view(method=self, shift=self.shift)\n\n @property\n def signup_checkers(self):\n return [\n check_event_is_active,\n check_participation_state_for_signup,\n check_inside_signup_timeframe,\n check_participant_age,\n ]\n\n @property\n def decline_checkers(self):\n return [\n check_event_is_active,\n check_participation_state_for_decline,\n check_inside_signup_timeframe,\n ]\n\n @functools.lru_cache()\n def get_signup_errors(self, participant) -> List[ParticipationError]:\n return [\n error\n for checker in self.signup_checkers\n if (error := checker(self, participant)) is not None\n ]\n\n @functools.lru_cache()\n def get_decline_errors(self, participant):\n return [\n error\n for checker in self.decline_checkers\n if (error := checker(self, participant)) is not None\n ]\n\n def can_decline(self, participant):\n return not self.get_decline_errors(participant)\n\n def can_sign_up(self, participant):\n return not self.get_signup_errors(participant)\n\n def get_participation_for(self, participant):\n return participant.participation_for(self.shift) or participant.new_participation(\n self.shift\n )\n\n def perform_signup(self, participant: AbstractParticipant, **kwargs):\n \"\"\"\n Configure a participation object for the given participant according to the method's configuration.\n `kwargs` may contain further instructions from a e.g. a form.\n \"\"\"\n if errors := self.get_signup_errors(participant):\n raise ParticipationError(errors)\n return self.get_participation_for(participant)\n\n def perform_decline(self, participant, **kwargs):\n \"\"\"Create and configure a declining participation object for the given participant. `kwargs` may contain further instructions from a e.g. a form.\"\"\"\n if errors := self.get_decline_errors(participant):\n raise ParticipationError(errors)\n participation = self.get_participation_for(participant)\n participation.state = AbstractParticipation.States.USER_DECLINED\n participation.save()\n return participation\n\n def get_configuration_fields(self):\n return {\n \"minimum_age\": {\n \"formfield\": forms.IntegerField(required=False),\n \"default\": 16,\n \"publish_with_label\": _(\"Minimum age\"),\n },\n \"signup_until\": {\n \"formfield\": forms.SplitDateTimeField(\n required=False, widget=CustomSplitDateTimeWidget\n ),\n \"default\": None,\n \"publish_with_label\": _(\"Signup until\"),\n \"format\": functools.partial(formats.date_format, format=\"SHORT_DATETIME_FORMAT\"),\n },\n \"user_can_decline_confirmed\": {\n \"formfield\": forms.BooleanField(\n label=_(\"Confirmed users can decline by themselves\"),\n required=False,\n help_text=_(\"only if the signup timeframe has not ended\"),\n ),\n \"default\": False,\n \"publish_with_label\": _(\"Can decline after confirmation\"),\n \"format\": yesno,\n },\n }\n\n def get_signup_info(self):\n \"\"\"\n Return key/value pairs about the configuration to show in the shift info box.\n \"\"\"\n fields = self.get_configuration_fields()\n return {\n label: field.get(\"format\", str)(value)\n for key, field in fields.items()\n if (label := field.get(\"publish_with_label\", False))\n and (value := getattr(self.configuration, key))\n }\n\n def render_shift_state(self, request):\n \"\"\"\n Render html that will be shown in the shift info box.\n Use it to inform about the current state of the shift and participations.\n \"\"\"\n return \"\"\n\n def get_configuration_form(self, *args, **kwargs):\n if self.shift is not None:\n kwargs.setdefault(\"initial\", self.configuration.__dict__)\n form = ConfigurationForm(*args, **kwargs)\n for name, config in self.get_configuration_fields().items():\n form.fields[name] = config[\"formfield\"]\n return form\n\n def render_configuration_form(self, form=None, *args, **kwargs):\n form = form or self.get_configuration_form(*args, **kwargs)\n template = Template(\n template_string=\"{% load bootstrap4 %}{% bootstrap_form form %}\"\n ).render(Context({\"form\": form}))\n return template\n\n\nclass BaseSignupView(View):\n shift: Shift = ...\n method: BaseSignupMethod = ...\n\n def dispatch(self, request, *args, **kwargs):\n if (choice := request.POST.get(\"signup_choice\")) is not None:\n if choice == \"sign_up\":\n return self.signup_pressed(request, *args, **kwargs)\n elif choice == \"decline\":\n return self.decline_pressed(request, *args, **kwargs)\n else:\n raise ValueError(\n _(\"'{choice}' is not a valid signup action.\").format(choice=choice)\n )\n return super().dispatch(request, *args, **kwargs)\n\n def signup_pressed(self, request, *args, **kwargs):\n try:\n with transaction.atomic():\n self.method.perform_signup(request.user.as_participant())\n messages.success(\n request,\n self.method.signup_success_message.format(shift=self.shift),\n )\n except ParticipationError as errors:\n for error in errors:\n messages.error(request, self.method.signup_error_message.format(error=error))\n finally:\n return redirect(self.shift.event.get_absolute_url())\n\n def decline_pressed(self, request, *args, **kwargs):\n try:\n with transaction.atomic():\n self.method.perform_decline(request.user.as_participant())\n messages.info(request, self.method.decline_success_message.format(shift=self.shift))\n except ParticipationError as errors:\n for error in errors:\n messages.error(request, self.method.decline_error_message.format(error=error))\n finally:\n return redirect(self.shift.event.get_absolute_url())\n", "path": "ephios/event_management/signup.py"}]} | 3,836 | 266 |
gh_patches_debug_58737 | rasdani/github-patches | git_diff | goauthentik__authentik-9182 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
OIDC Well-known URL won't configure endpoint URLs
**Describe the bug**
When entering the OIDC Well-known URL for an OpenID OAuth source, it won't automatically configure the endpoint URLs. Instead it prompts a "Bad Request" error and says "This field may not be blank." for every endpoint.
**To Reproduce**
Steps to reproduce the behavior:
1. Go to 'Directory > Federation and Social login'
2. Click on 'Create > OpenID OAuth Source > Next'
3. Enter your credentials (Name, Slug, Key, Secret and the Well-known URL)
4. Click on 'Finish'
5. See error
**Expected behavior**
Uppon clicking on 'Finish' authentik should configure the missing endpoints using the Well-known URL.
**Screenshots**

**Version and Deployment:**
- authentik version: 2024.2.2
- Deployment: docker-compose
**Additional context**
I thought, it might have something to do with connectivity and proxy, since we are running authentik behind a corporate proxy. But I can curl the Well-known URL from inside the containers.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `authentik/sources/oauth/api/source.py`
Content:
```
1 """OAuth Source Serializer"""
2
3 from django.urls.base import reverse_lazy
4 from django_filters.filters import BooleanFilter
5 from django_filters.filterset import FilterSet
6 from drf_spectacular.types import OpenApiTypes
7 from drf_spectacular.utils import OpenApiParameter, extend_schema, extend_schema_field
8 from requests import RequestException
9 from rest_framework.decorators import action
10 from rest_framework.fields import BooleanField, CharField, ChoiceField, SerializerMethodField
11 from rest_framework.request import Request
12 from rest_framework.response import Response
13 from rest_framework.serializers import ValidationError
14 from rest_framework.viewsets import ModelViewSet
15
16 from authentik.core.api.sources import SourceSerializer
17 from authentik.core.api.used_by import UsedByMixin
18 from authentik.core.api.utils import PassiveSerializer
19 from authentik.lib.utils.http import get_http_session
20 from authentik.sources.oauth.models import OAuthSource
21 from authentik.sources.oauth.types.registry import SourceType, registry
22
23
24 class SourceTypeSerializer(PassiveSerializer):
25 """Serializer for SourceType"""
26
27 name = CharField(required=True)
28 verbose_name = CharField(required=True)
29 urls_customizable = BooleanField()
30 request_token_url = CharField(read_only=True, allow_null=True)
31 authorization_url = CharField(read_only=True, allow_null=True)
32 access_token_url = CharField(read_only=True, allow_null=True)
33 profile_url = CharField(read_only=True, allow_null=True)
34 oidc_well_known_url = CharField(read_only=True, allow_null=True)
35 oidc_jwks_url = CharField(read_only=True, allow_null=True)
36
37
38 class OAuthSourceSerializer(SourceSerializer):
39 """OAuth Source Serializer"""
40
41 provider_type = ChoiceField(choices=registry.get_name_tuple())
42 callback_url = SerializerMethodField()
43 type = SerializerMethodField()
44
45 def get_callback_url(self, instance: OAuthSource) -> str:
46 """Get OAuth Callback URL"""
47 relative_url = reverse_lazy(
48 "authentik_sources_oauth:oauth-client-callback",
49 kwargs={"source_slug": instance.slug},
50 )
51 if "request" not in self.context:
52 return relative_url
53 return self.context["request"].build_absolute_uri(relative_url)
54
55 @extend_schema_field(SourceTypeSerializer)
56 def get_type(self, instance: OAuthSource) -> SourceTypeSerializer:
57 """Get source's type configuration"""
58 return SourceTypeSerializer(instance.source_type).data
59
60 def validate(self, attrs: dict) -> dict:
61 session = get_http_session()
62 source_type = registry.find_type(attrs["provider_type"])
63
64 well_known = attrs.get("oidc_well_known_url") or source_type.oidc_well_known_url
65 inferred_oidc_jwks_url = None
66
67 if well_known and well_known != "":
68 try:
69 well_known_config = session.get(well_known)
70 well_known_config.raise_for_status()
71 except RequestException as exc:
72 text = exc.response.text if exc.response else str(exc)
73 raise ValidationError({"oidc_well_known_url": text}) from None
74 config = well_known_config.json()
75 if "issuer" not in config:
76 raise ValidationError({"oidc_well_known_url": "Invalid well-known configuration"})
77 field_map = {
78 # authentik field to oidc field
79 "authorization_url": "authorization_endpoint",
80 "access_token_url": "token_endpoint",
81 "profile_url": "userinfo_endpoint",
82 }
83 for ak_key, oidc_key in field_map.items():
84 # Don't overwrite user-set values
85 if ak_key in attrs and attrs[ak_key]:
86 continue
87 attrs[ak_key] = config.get(oidc_key, "")
88 inferred_oidc_jwks_url = config.get("jwks_uri", "")
89
90 # Prefer user-entered URL to inferred URL to default URL
91 jwks_url = attrs.get("oidc_jwks_url") or inferred_oidc_jwks_url or source_type.oidc_jwks_url
92 if jwks_url and jwks_url != "":
93 attrs["oidc_jwks_url"] = jwks_url
94 try:
95 jwks_config = session.get(jwks_url)
96 jwks_config.raise_for_status()
97 except RequestException as exc:
98 text = exc.response.text if exc.response else str(exc)
99 raise ValidationError({"oidc_jwks_url": text}) from None
100 config = jwks_config.json()
101 attrs["oidc_jwks"] = config
102
103 provider_type = registry.find_type(attrs.get("provider_type", ""))
104 for url in [
105 "authorization_url",
106 "access_token_url",
107 "profile_url",
108 ]:
109 if getattr(provider_type, url, None) is None:
110 if url not in attrs:
111 raise ValidationError(
112 f"{url} is required for provider {provider_type.verbose_name}"
113 )
114 return attrs
115
116 class Meta:
117 model = OAuthSource
118 fields = SourceSerializer.Meta.fields + [
119 "provider_type",
120 "request_token_url",
121 "authorization_url",
122 "access_token_url",
123 "profile_url",
124 "consumer_key",
125 "consumer_secret",
126 "callback_url",
127 "additional_scopes",
128 "type",
129 "oidc_well_known_url",
130 "oidc_jwks_url",
131 "oidc_jwks",
132 ]
133 extra_kwargs = {"consumer_secret": {"write_only": True}}
134
135
136 class OAuthSourceFilter(FilterSet):
137 """OAuth Source filter set"""
138
139 has_jwks = BooleanFilter(label="Only return sources with JWKS data", method="filter_has_jwks")
140
141 def filter_has_jwks(self, queryset, name, value): # pragma: no cover
142 """Only return sources with JWKS data"""
143 return queryset.exclude(oidc_jwks__iexact="{}")
144
145 class Meta:
146 model = OAuthSource
147 fields = [
148 "name",
149 "slug",
150 "enabled",
151 "authentication_flow",
152 "enrollment_flow",
153 "policy_engine_mode",
154 "user_matching_mode",
155 "provider_type",
156 "request_token_url",
157 "authorization_url",
158 "access_token_url",
159 "profile_url",
160 "consumer_key",
161 "additional_scopes",
162 ]
163
164
165 class OAuthSourceViewSet(UsedByMixin, ModelViewSet):
166 """Source Viewset"""
167
168 queryset = OAuthSource.objects.all()
169 serializer_class = OAuthSourceSerializer
170 lookup_field = "slug"
171 filterset_class = OAuthSourceFilter
172 search_fields = ["name", "slug"]
173 ordering = ["name"]
174
175 @extend_schema(
176 responses={200: SourceTypeSerializer(many=True)},
177 parameters=[
178 OpenApiParameter(
179 name="name",
180 location=OpenApiParameter.QUERY,
181 type=OpenApiTypes.STR,
182 )
183 ],
184 )
185 @action(detail=False, pagination_class=None, filter_backends=[])
186 def source_types(self, request: Request) -> Response:
187 """Get all creatable source types. If ?name is set, only returns the type for <name>.
188 If <name> isn't found, returns the default type."""
189 data = []
190 if "name" in request.query_params:
191 source_type = registry.find_type(request.query_params.get("name"))
192 if source_type.__class__ != SourceType:
193 data.append(SourceTypeSerializer(source_type).data)
194 else:
195 for source_type in registry.get():
196 data.append(SourceTypeSerializer(source_type).data)
197 return Response(data)
198
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/authentik/sources/oauth/api/source.py b/authentik/sources/oauth/api/source.py
--- a/authentik/sources/oauth/api/source.py
+++ b/authentik/sources/oauth/api/source.py
@@ -130,7 +130,13 @@
"oidc_jwks_url",
"oidc_jwks",
]
- extra_kwargs = {"consumer_secret": {"write_only": True}}
+ extra_kwargs = {
+ "consumer_secret": {"write_only": True},
+ "request_token_url": {"allow_blank": True},
+ "authorization_url": {"allow_blank": True},
+ "access_token_url": {"allow_blank": True},
+ "profile_url": {"allow_blank": True},
+ }
class OAuthSourceFilter(FilterSet):
| {"golden_diff": "diff --git a/authentik/sources/oauth/api/source.py b/authentik/sources/oauth/api/source.py\n--- a/authentik/sources/oauth/api/source.py\n+++ b/authentik/sources/oauth/api/source.py\n@@ -130,7 +130,13 @@\n \"oidc_jwks_url\",\n \"oidc_jwks\",\n ]\n- extra_kwargs = {\"consumer_secret\": {\"write_only\": True}}\n+ extra_kwargs = {\n+ \"consumer_secret\": {\"write_only\": True},\n+ \"request_token_url\": {\"allow_blank\": True},\n+ \"authorization_url\": {\"allow_blank\": True},\n+ \"access_token_url\": {\"allow_blank\": True},\n+ \"profile_url\": {\"allow_blank\": True},\n+ }\n \n \n class OAuthSourceFilter(FilterSet):\n", "issue": "OIDC Well-known URL won't configure endpoint URLs\n**Describe the bug**\r\nWhen entering the OIDC Well-known URL for an OpenID OAuth source, it won't automatically configure the endpoint URLs. Instead it prompts a \"Bad Request\" error and says \"This field may not be blank.\" for every endpoint.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n\r\n1. Go to 'Directory > Federation and Social login'\r\n2. Click on 'Create > OpenID OAuth Source > Next'\r\n3. Enter your credentials (Name, Slug, Key, Secret and the Well-known URL)\r\n4. Click on 'Finish'\r\n5. See error\r\n\r\n**Expected behavior**\r\nUppon clicking on 'Finish' authentik should configure the missing endpoints using the Well-known URL.\r\n\r\n**Screenshots**\r\n\r\n\r\n\r\n**Version and Deployment:**\r\n\r\n- authentik version: 2024.2.2 \r\n- Deployment: docker-compose\r\n\r\n**Additional context**\r\nI thought, it might have something to do with connectivity and proxy, since we are running authentik behind a corporate proxy. But I can curl the Well-known URL from inside the containers.\r\n\n", "before_files": [{"content": "\"\"\"OAuth Source Serializer\"\"\"\n\nfrom django.urls.base import reverse_lazy\nfrom django_filters.filters import BooleanFilter\nfrom django_filters.filterset import FilterSet\nfrom drf_spectacular.types import OpenApiTypes\nfrom drf_spectacular.utils import OpenApiParameter, extend_schema, extend_schema_field\nfrom requests import RequestException\nfrom rest_framework.decorators import action\nfrom rest_framework.fields import BooleanField, CharField, ChoiceField, SerializerMethodField\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\nfrom rest_framework.serializers import ValidationError\nfrom rest_framework.viewsets import ModelViewSet\n\nfrom authentik.core.api.sources import SourceSerializer\nfrom authentik.core.api.used_by import UsedByMixin\nfrom authentik.core.api.utils import PassiveSerializer\nfrom authentik.lib.utils.http import get_http_session\nfrom authentik.sources.oauth.models import OAuthSource\nfrom authentik.sources.oauth.types.registry import SourceType, registry\n\n\nclass SourceTypeSerializer(PassiveSerializer):\n \"\"\"Serializer for SourceType\"\"\"\n\n name = CharField(required=True)\n verbose_name = CharField(required=True)\n urls_customizable = BooleanField()\n request_token_url = CharField(read_only=True, allow_null=True)\n authorization_url = CharField(read_only=True, allow_null=True)\n access_token_url = CharField(read_only=True, allow_null=True)\n profile_url = CharField(read_only=True, allow_null=True)\n oidc_well_known_url = CharField(read_only=True, allow_null=True)\n oidc_jwks_url = CharField(read_only=True, allow_null=True)\n\n\nclass OAuthSourceSerializer(SourceSerializer):\n \"\"\"OAuth Source Serializer\"\"\"\n\n provider_type = ChoiceField(choices=registry.get_name_tuple())\n callback_url = SerializerMethodField()\n type = SerializerMethodField()\n\n def get_callback_url(self, instance: OAuthSource) -> str:\n \"\"\"Get OAuth Callback URL\"\"\"\n relative_url = reverse_lazy(\n \"authentik_sources_oauth:oauth-client-callback\",\n kwargs={\"source_slug\": instance.slug},\n )\n if \"request\" not in self.context:\n return relative_url\n return self.context[\"request\"].build_absolute_uri(relative_url)\n\n @extend_schema_field(SourceTypeSerializer)\n def get_type(self, instance: OAuthSource) -> SourceTypeSerializer:\n \"\"\"Get source's type configuration\"\"\"\n return SourceTypeSerializer(instance.source_type).data\n\n def validate(self, attrs: dict) -> dict:\n session = get_http_session()\n source_type = registry.find_type(attrs[\"provider_type\"])\n\n well_known = attrs.get(\"oidc_well_known_url\") or source_type.oidc_well_known_url\n inferred_oidc_jwks_url = None\n\n if well_known and well_known != \"\":\n try:\n well_known_config = session.get(well_known)\n well_known_config.raise_for_status()\n except RequestException as exc:\n text = exc.response.text if exc.response else str(exc)\n raise ValidationError({\"oidc_well_known_url\": text}) from None\n config = well_known_config.json()\n if \"issuer\" not in config:\n raise ValidationError({\"oidc_well_known_url\": \"Invalid well-known configuration\"})\n field_map = {\n # authentik field to oidc field\n \"authorization_url\": \"authorization_endpoint\",\n \"access_token_url\": \"token_endpoint\",\n \"profile_url\": \"userinfo_endpoint\",\n }\n for ak_key, oidc_key in field_map.items():\n # Don't overwrite user-set values\n if ak_key in attrs and attrs[ak_key]:\n continue\n attrs[ak_key] = config.get(oidc_key, \"\")\n inferred_oidc_jwks_url = config.get(\"jwks_uri\", \"\")\n\n # Prefer user-entered URL to inferred URL to default URL\n jwks_url = attrs.get(\"oidc_jwks_url\") or inferred_oidc_jwks_url or source_type.oidc_jwks_url\n if jwks_url and jwks_url != \"\":\n attrs[\"oidc_jwks_url\"] = jwks_url\n try:\n jwks_config = session.get(jwks_url)\n jwks_config.raise_for_status()\n except RequestException as exc:\n text = exc.response.text if exc.response else str(exc)\n raise ValidationError({\"oidc_jwks_url\": text}) from None\n config = jwks_config.json()\n attrs[\"oidc_jwks\"] = config\n\n provider_type = registry.find_type(attrs.get(\"provider_type\", \"\"))\n for url in [\n \"authorization_url\",\n \"access_token_url\",\n \"profile_url\",\n ]:\n if getattr(provider_type, url, None) is None:\n if url not in attrs:\n raise ValidationError(\n f\"{url} is required for provider {provider_type.verbose_name}\"\n )\n return attrs\n\n class Meta:\n model = OAuthSource\n fields = SourceSerializer.Meta.fields + [\n \"provider_type\",\n \"request_token_url\",\n \"authorization_url\",\n \"access_token_url\",\n \"profile_url\",\n \"consumer_key\",\n \"consumer_secret\",\n \"callback_url\",\n \"additional_scopes\",\n \"type\",\n \"oidc_well_known_url\",\n \"oidc_jwks_url\",\n \"oidc_jwks\",\n ]\n extra_kwargs = {\"consumer_secret\": {\"write_only\": True}}\n\n\nclass OAuthSourceFilter(FilterSet):\n \"\"\"OAuth Source filter set\"\"\"\n\n has_jwks = BooleanFilter(label=\"Only return sources with JWKS data\", method=\"filter_has_jwks\")\n\n def filter_has_jwks(self, queryset, name, value): # pragma: no cover\n \"\"\"Only return sources with JWKS data\"\"\"\n return queryset.exclude(oidc_jwks__iexact=\"{}\")\n\n class Meta:\n model = OAuthSource\n fields = [\n \"name\",\n \"slug\",\n \"enabled\",\n \"authentication_flow\",\n \"enrollment_flow\",\n \"policy_engine_mode\",\n \"user_matching_mode\",\n \"provider_type\",\n \"request_token_url\",\n \"authorization_url\",\n \"access_token_url\",\n \"profile_url\",\n \"consumer_key\",\n \"additional_scopes\",\n ]\n\n\nclass OAuthSourceViewSet(UsedByMixin, ModelViewSet):\n \"\"\"Source Viewset\"\"\"\n\n queryset = OAuthSource.objects.all()\n serializer_class = OAuthSourceSerializer\n lookup_field = \"slug\"\n filterset_class = OAuthSourceFilter\n search_fields = [\"name\", \"slug\"]\n ordering = [\"name\"]\n\n @extend_schema(\n responses={200: SourceTypeSerializer(many=True)},\n parameters=[\n OpenApiParameter(\n name=\"name\",\n location=OpenApiParameter.QUERY,\n type=OpenApiTypes.STR,\n )\n ],\n )\n @action(detail=False, pagination_class=None, filter_backends=[])\n def source_types(self, request: Request) -> Response:\n \"\"\"Get all creatable source types. If ?name is set, only returns the type for <name>.\n If <name> isn't found, returns the default type.\"\"\"\n data = []\n if \"name\" in request.query_params:\n source_type = registry.find_type(request.query_params.get(\"name\"))\n if source_type.__class__ != SourceType:\n data.append(SourceTypeSerializer(source_type).data)\n else:\n for source_type in registry.get():\n data.append(SourceTypeSerializer(source_type).data)\n return Response(data)\n", "path": "authentik/sources/oauth/api/source.py"}], "after_files": [{"content": "\"\"\"OAuth Source Serializer\"\"\"\n\nfrom django.urls.base import reverse_lazy\nfrom django_filters.filters import BooleanFilter\nfrom django_filters.filterset import FilterSet\nfrom drf_spectacular.types import OpenApiTypes\nfrom drf_spectacular.utils import OpenApiParameter, extend_schema, extend_schema_field\nfrom requests import RequestException\nfrom rest_framework.decorators import action\nfrom rest_framework.fields import BooleanField, CharField, ChoiceField, SerializerMethodField\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\nfrom rest_framework.serializers import ValidationError\nfrom rest_framework.viewsets import ModelViewSet\n\nfrom authentik.core.api.sources import SourceSerializer\nfrom authentik.core.api.used_by import UsedByMixin\nfrom authentik.core.api.utils import PassiveSerializer\nfrom authentik.lib.utils.http import get_http_session\nfrom authentik.sources.oauth.models import OAuthSource\nfrom authentik.sources.oauth.types.registry import SourceType, registry\n\n\nclass SourceTypeSerializer(PassiveSerializer):\n \"\"\"Serializer for SourceType\"\"\"\n\n name = CharField(required=True)\n verbose_name = CharField(required=True)\n urls_customizable = BooleanField()\n request_token_url = CharField(read_only=True, allow_null=True)\n authorization_url = CharField(read_only=True, allow_null=True)\n access_token_url = CharField(read_only=True, allow_null=True)\n profile_url = CharField(read_only=True, allow_null=True)\n oidc_well_known_url = CharField(read_only=True, allow_null=True)\n oidc_jwks_url = CharField(read_only=True, allow_null=True)\n\n\nclass OAuthSourceSerializer(SourceSerializer):\n \"\"\"OAuth Source Serializer\"\"\"\n\n provider_type = ChoiceField(choices=registry.get_name_tuple())\n callback_url = SerializerMethodField()\n type = SerializerMethodField()\n\n def get_callback_url(self, instance: OAuthSource) -> str:\n \"\"\"Get OAuth Callback URL\"\"\"\n relative_url = reverse_lazy(\n \"authentik_sources_oauth:oauth-client-callback\",\n kwargs={\"source_slug\": instance.slug},\n )\n if \"request\" not in self.context:\n return relative_url\n return self.context[\"request\"].build_absolute_uri(relative_url)\n\n @extend_schema_field(SourceTypeSerializer)\n def get_type(self, instance: OAuthSource) -> SourceTypeSerializer:\n \"\"\"Get source's type configuration\"\"\"\n return SourceTypeSerializer(instance.source_type).data\n\n def validate(self, attrs: dict) -> dict:\n session = get_http_session()\n source_type = registry.find_type(attrs[\"provider_type\"])\n\n well_known = attrs.get(\"oidc_well_known_url\") or source_type.oidc_well_known_url\n inferred_oidc_jwks_url = None\n\n if well_known and well_known != \"\":\n try:\n well_known_config = session.get(well_known)\n well_known_config.raise_for_status()\n except RequestException as exc:\n text = exc.response.text if exc.response else str(exc)\n raise ValidationError({\"oidc_well_known_url\": text}) from None\n config = well_known_config.json()\n if \"issuer\" not in config:\n raise ValidationError({\"oidc_well_known_url\": \"Invalid well-known configuration\"})\n field_map = {\n # authentik field to oidc field\n \"authorization_url\": \"authorization_endpoint\",\n \"access_token_url\": \"token_endpoint\",\n \"profile_url\": \"userinfo_endpoint\",\n }\n for ak_key, oidc_key in field_map.items():\n # Don't overwrite user-set values\n if ak_key in attrs and attrs[ak_key]:\n continue\n attrs[ak_key] = config.get(oidc_key, \"\")\n inferred_oidc_jwks_url = config.get(\"jwks_uri\", \"\")\n\n # Prefer user-entered URL to inferred URL to default URL\n jwks_url = attrs.get(\"oidc_jwks_url\") or inferred_oidc_jwks_url or source_type.oidc_jwks_url\n if jwks_url and jwks_url != \"\":\n attrs[\"oidc_jwks_url\"] = jwks_url\n try:\n jwks_config = session.get(jwks_url)\n jwks_config.raise_for_status()\n except RequestException as exc:\n text = exc.response.text if exc.response else str(exc)\n raise ValidationError({\"oidc_jwks_url\": text}) from None\n config = jwks_config.json()\n attrs[\"oidc_jwks\"] = config\n\n provider_type = registry.find_type(attrs.get(\"provider_type\", \"\"))\n for url in [\n \"authorization_url\",\n \"access_token_url\",\n \"profile_url\",\n ]:\n if getattr(provider_type, url, None) is None:\n if url not in attrs:\n raise ValidationError(\n f\"{url} is required for provider {provider_type.verbose_name}\"\n )\n return attrs\n\n class Meta:\n model = OAuthSource\n fields = SourceSerializer.Meta.fields + [\n \"provider_type\",\n \"request_token_url\",\n \"authorization_url\",\n \"access_token_url\",\n \"profile_url\",\n \"consumer_key\",\n \"consumer_secret\",\n \"callback_url\",\n \"additional_scopes\",\n \"type\",\n \"oidc_well_known_url\",\n \"oidc_jwks_url\",\n \"oidc_jwks\",\n ]\n extra_kwargs = {\n \"consumer_secret\": {\"write_only\": True},\n \"request_token_url\": {\"allow_blank\": True},\n \"authorization_url\": {\"allow_blank\": True},\n \"access_token_url\": {\"allow_blank\": True},\n \"profile_url\": {\"allow_blank\": True},\n }\n\n\nclass OAuthSourceFilter(FilterSet):\n \"\"\"OAuth Source filter set\"\"\"\n\n has_jwks = BooleanFilter(label=\"Only return sources with JWKS data\", method=\"filter_has_jwks\")\n\n def filter_has_jwks(self, queryset, name, value): # pragma: no cover\n \"\"\"Only return sources with JWKS data\"\"\"\n return queryset.exclude(oidc_jwks__iexact=\"{}\")\n\n class Meta:\n model = OAuthSource\n fields = [\n \"name\",\n \"slug\",\n \"enabled\",\n \"authentication_flow\",\n \"enrollment_flow\",\n \"policy_engine_mode\",\n \"user_matching_mode\",\n \"provider_type\",\n \"request_token_url\",\n \"authorization_url\",\n \"access_token_url\",\n \"profile_url\",\n \"consumer_key\",\n \"additional_scopes\",\n ]\n\n\nclass OAuthSourceViewSet(UsedByMixin, ModelViewSet):\n \"\"\"Source Viewset\"\"\"\n\n queryset = OAuthSource.objects.all()\n serializer_class = OAuthSourceSerializer\n lookup_field = \"slug\"\n filterset_class = OAuthSourceFilter\n search_fields = [\"name\", \"slug\"]\n ordering = [\"name\"]\n\n @extend_schema(\n responses={200: SourceTypeSerializer(many=True)},\n parameters=[\n OpenApiParameter(\n name=\"name\",\n location=OpenApiParameter.QUERY,\n type=OpenApiTypes.STR,\n )\n ],\n )\n @action(detail=False, pagination_class=None, filter_backends=[])\n def source_types(self, request: Request) -> Response:\n \"\"\"Get all creatable source types. If ?name is set, only returns the type for <name>.\n If <name> isn't found, returns the default type.\"\"\"\n data = []\n if \"name\" in request.query_params:\n source_type = registry.find_type(request.query_params.get(\"name\"))\n if source_type.__class__ != SourceType:\n data.append(SourceTypeSerializer(source_type).data)\n else:\n for source_type in registry.get():\n data.append(SourceTypeSerializer(source_type).data)\n return Response(data)\n", "path": "authentik/sources/oauth/api/source.py"}]} | 2,646 | 172 |
gh_patches_debug_12806 | rasdani/github-patches | git_diff | bokeh__bokeh-4677 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
undeprecate .from_df
With server apps it is necessary to update `.data` all "at once" so a method to construct a dictionary for `.data` is actually useful and needed. Going to un-deprecate for now, can discuss other ideas for this functionality later.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bokeh/models/sources.py`
Content:
```
1 from __future__ import absolute_import
2
3 from ..core import validation
4 from ..core.validation.errors import COLUMN_LENGTHS
5 from ..core.properties import abstract
6 from ..core.properties import Any, Int, String, Instance, List, Dict, Bool, Enum, JSON
7 from ..model import Model
8 from ..util.dependencies import import_optional
9 from ..util.deprecate import deprecated
10 from ..util.serialization import transform_column_source_data
11 from .callbacks import Callback
12
13 pd = import_optional('pandas')
14
15 @abstract
16 class DataSource(Model):
17 """ A base class for data source types. ``DataSource`` is
18 not generally useful to instantiate on its own.
19
20 """
21
22 selected = Dict(String, Dict(String, Any), default={
23 '0d': {'glyph': None, 'indices': []},
24 '1d': {'indices': []},
25 '2d': {'indices': []}
26 }, help="""
27 A dict to indicate selected indices on different dimensions on this DataSource. Keys are:
28
29 - 0d: indicates whether a Line or Patch glyphs have been hit. Value is a
30 dict with the following keys:
31
32 - flag (boolean): true if glyph was with false otherwise
33 - indices (list): indices hit (if applicable)
34
35 - 1d: indicates whether any of all other glyph (except [multi]line or
36 patches) was hit:
37
38 - indices (list): indices that were hit/selected
39
40 - 2d: indicates whether a [multi]line or patches) were hit:
41
42 - indices (list(list)): indices of the lines/patches that were
43 hit/selected
44 """)
45
46 callback = Instance(Callback, help="""
47 A callback to run in the browser whenever the selection is changed.
48 """)
49
50 class ColumnDataSource(DataSource):
51 """ Maps names of columns to sequences or arrays.
52
53 If the ColumnDataSource initializer is called with a single argument that
54 is a dict or pandas.DataFrame, that argument is used as the value for the
55 "data" attribute. For example::
56
57 ColumnDataSource(mydict) # same as ColumnDataSource(data=mydict)
58 ColumnDataSource(df) # same as ColumnDataSource(data=df)
59
60 .. note::
61 There is an implicit assumption that all the columns in a
62 a given ColumnDataSource have the same length.
63
64 """
65
66 data = Dict(String, Any, help="""
67 Mapping of column names to sequences of data. The data can be, e.g,
68 Python lists or tuples, NumPy arrays, etc.
69 """)
70
71 column_names = List(String, help="""
72 An list of names for all the columns in this DataSource.
73 """)
74
75 def __init__(self, *args, **kw):
76 """ If called with a single argument that is a dict or
77 pandas.DataFrame, treat that implicitly as the "data" attribute.
78 """
79 if len(args) == 1 and "data" not in kw:
80 kw["data"] = args[0]
81 # TODO (bev) invalid to pass args and "data", check and raise exception
82 raw_data = kw.pop("data", {})
83 if not isinstance(raw_data, dict):
84 if pd and isinstance(raw_data, pd.DataFrame):
85 raw_data = self._data_from_df(raw_data)
86 else:
87 raise ValueError("expected a dict or pandas.DataFrame, got %s" % raw_data)
88 super(ColumnDataSource, self).__init__(**kw)
89 for name, data in raw_data.items():
90 self.add(data, name)
91
92 @staticmethod
93 def _data_from_df(df):
94 """ Create a ``dict`` of columns from a Pandas DataFrame,
95 suitable for creating a ColumnDataSource.
96
97 Args:
98 df (DataFrame) : data to convert
99
100 Returns:
101 dict(str, list)
102
103 """
104 index = df.index
105 new_data = {}
106 for colname in df:
107 new_data[colname] = df[colname].tolist()
108 if index.name:
109 new_data[index.name] = index.tolist()
110 elif index.names and not all([x is None for x in index.names]):
111 new_data["_".join(index.names)] = index.tolist()
112 else:
113 new_data["index"] = index.tolist()
114 return new_data
115
116 @classmethod
117 @deprecated("Bokeh 0.9.3", "ColumnDataSource initializer")
118 def from_df(cls, data):
119 """ Create a ``dict`` of columns from a Pandas DataFrame,
120 suitable for creating a ColumnDataSource.
121
122 Args:
123 data (DataFrame) : data to convert
124
125 Returns:
126 dict(str, list)
127
128 """
129 import warnings
130 warnings.warn("Method deprecated in Bokeh 0.9.3")
131 return cls._data_from_df(data)
132
133 def to_df(self):
134 """ Convert this data source to pandas dataframe.
135
136 If ``column_names`` is set, use those. Otherwise let Pandas
137 infer the column names. The ``column_names`` property can be
138 used both to order and filter the columns.
139
140 Returns:
141 DataFrame
142
143 """
144 if not pd:
145 raise RuntimeError('Pandas must be installed to convert to a Pandas Dataframe')
146 if self.column_names:
147 return pd.DataFrame(self.data, columns=self.column_names)
148 else:
149 return pd.DataFrame(self.data)
150
151 def add(self, data, name=None):
152 """ Appends a new column of data to the data source.
153
154 Args:
155 data (seq) : new data to add
156 name (str, optional) : column name to use.
157 If not supplied, generate a name go the form "Series ####"
158
159 Returns:
160 str: the column name used
161
162 """
163 if name is None:
164 n = len(self.data)
165 while "Series %d"%n in self.data:
166 n += 1
167 name = "Series %d"%n
168 self.column_names.append(name)
169 self.data[name] = data
170 return name
171
172 def _to_json_like(self, include_defaults):
173 attrs = super(ColumnDataSource, self)._to_json_like(include_defaults=include_defaults)
174 if 'data' in attrs:
175 attrs['data'] = transform_column_source_data(attrs['data'])
176 return attrs
177
178 def remove(self, name):
179 """ Remove a column of data.
180
181 Args:
182 name (str) : name of the column to remove
183
184 Returns:
185 None
186
187 .. note::
188 If the column name does not exist, a warning is issued.
189
190 """
191 try:
192 self.column_names.remove(name)
193 del self.data[name]
194 except (ValueError, KeyError):
195 import warnings
196 warnings.warn("Unable to find column '%s' in data source" % name)
197
198 @deprecated("Bokeh 0.11.0", "bokeh.io.push_notebook")
199 def push_notebook(self):
200 """ Update a data source for a plot in a Jupyter notebook.
201
202 This function can be be used to update data in plot data sources
203 in the Jupyter notebook, without having to use the Bokeh server.
204
205 .. warning::
206 This function has been deprecated. Please use
207 ``bokeh.io.push_notebook()`` which will push all changes
208 (not just data sources) to the last shown plot in a Jupyter
209 notebook.
210
211 Returns:
212 None
213
214 """
215 from bokeh.io import push_notebook
216 push_notebook()
217
218 @validation.error(COLUMN_LENGTHS)
219 def _check_column_lengths(self):
220 lengths = set(len(x) for x in self.data.values())
221 if len(lengths) > 1:
222 return str(self)
223
224
225 def stream(self, new_data, rollover=None):
226 import numpy as np
227
228 newkeys = set(new_data.keys())
229 oldkeys = set(self.data.keys())
230 if newkeys != oldkeys:
231 missing = oldkeys - newkeys
232 extra = newkeys - oldkeys
233 if missing and extra:
234 raise ValueError("Must stream updates to all existing columns (missing: %s, extra: %s)" % (", ".join(sorted(missing)), ", ".join(sorted(extra))))
235 elif missing:
236 raise ValueError("Must stream updates to all existing columns (missing: %s)" % ", ".join(sorted(missing)))
237 else:
238 raise ValueError("Must stream updates to all existing columns (extra: %s)" % ", ".join(sorted(extra)))
239
240 lengths = set()
241 for x in new_data.values():
242 if isinstance(x, np.ndarray):
243 if len(x.shape) != 1:
244 raise ValueError("stream(...) only supports 1d sequences, got ndarray with size %r" % (x.shape,))
245 lengths.add(x.shape[0])
246 else:
247 lengths.add(len(x))
248
249 if len(lengths) > 1:
250 raise ValueError("All streaming column updates must be the same length")
251
252 self.data._stream(self.document, self, new_data, rollover)
253
254 class GeoJSONDataSource(ColumnDataSource):
255
256 geojson = JSON(help="""
257 GeoJSON that contains features for plotting. Currently GeoJSONDataSource can
258 only process a FeatureCollection or GeometryCollection.
259 """)
260
261
262 @abstract
263 class RemoteSource(ColumnDataSource):
264
265 data_url = String(help="""
266 The URL to the endpoint for the data.
267 """)
268
269 polling_interval = Int(help="""
270 polling interval for updating data source in milliseconds
271 """)
272
273 class AjaxDataSource(RemoteSource):
274
275 method = Enum('POST', 'GET', help="http method - GET or POST")
276
277 mode = Enum("replace", "append", help="""
278 Whether to append new data to existing data (up to ``max_size``),
279 or to replace existing data entirely.
280 """)
281
282 max_size = Int(help="""
283 Maximum size of the data array being kept after each pull requests.
284 Larger than that size, the data will be right shifted.
285 """)
286
287 if_modified = Bool(False, help="""
288 Whether to include an ``If-Modified-Since`` header in AJAX requests
289 to the server. If this header is supported by the server, then only
290 new data since the last request will be returned.
291 """)
292 content_type = String(default='application/json', help="""
293 Set the "contentType" parameter for the Ajax request.
294 """)
295 http_headers = Dict(String, String, help="""
296 HTTP headers to set for the Ajax request.
297 """)
298
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bokeh/models/sources.py b/bokeh/models/sources.py
--- a/bokeh/models/sources.py
+++ b/bokeh/models/sources.py
@@ -114,7 +114,6 @@
return new_data
@classmethod
- @deprecated("Bokeh 0.9.3", "ColumnDataSource initializer")
def from_df(cls, data):
""" Create a ``dict`` of columns from a Pandas DataFrame,
suitable for creating a ColumnDataSource.
@@ -126,8 +125,6 @@
dict(str, list)
"""
- import warnings
- warnings.warn("Method deprecated in Bokeh 0.9.3")
return cls._data_from_df(data)
def to_df(self):
| {"golden_diff": "diff --git a/bokeh/models/sources.py b/bokeh/models/sources.py\n--- a/bokeh/models/sources.py\n+++ b/bokeh/models/sources.py\n@@ -114,7 +114,6 @@\n return new_data\n \n @classmethod\n- @deprecated(\"Bokeh 0.9.3\", \"ColumnDataSource initializer\")\n def from_df(cls, data):\n \"\"\" Create a ``dict`` of columns from a Pandas DataFrame,\n suitable for creating a ColumnDataSource.\n@@ -126,8 +125,6 @@\n dict(str, list)\n \n \"\"\"\n- import warnings\n- warnings.warn(\"Method deprecated in Bokeh 0.9.3\")\n return cls._data_from_df(data)\n \n def to_df(self):\n", "issue": "undeprecate .from_df\nWith server apps it is necessary to update `.data` all \"at once\" so a method to construct a dictionary for `.data` is actually useful and needed. Going to un-deprecate for now, can discuss other ideas for this functionality later. \n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom ..core import validation\nfrom ..core.validation.errors import COLUMN_LENGTHS\nfrom ..core.properties import abstract\nfrom ..core.properties import Any, Int, String, Instance, List, Dict, Bool, Enum, JSON\nfrom ..model import Model\nfrom ..util.dependencies import import_optional\nfrom ..util.deprecate import deprecated\nfrom ..util.serialization import transform_column_source_data\nfrom .callbacks import Callback\n\npd = import_optional('pandas')\n\n@abstract\nclass DataSource(Model):\n \"\"\" A base class for data source types. ``DataSource`` is\n not generally useful to instantiate on its own.\n\n \"\"\"\n\n selected = Dict(String, Dict(String, Any), default={\n '0d': {'glyph': None, 'indices': []},\n '1d': {'indices': []},\n '2d': {'indices': []}\n }, help=\"\"\"\n A dict to indicate selected indices on different dimensions on this DataSource. Keys are:\n\n - 0d: indicates whether a Line or Patch glyphs have been hit. Value is a\n dict with the following keys:\n\n - flag (boolean): true if glyph was with false otherwise\n - indices (list): indices hit (if applicable)\n\n - 1d: indicates whether any of all other glyph (except [multi]line or\n patches) was hit:\n\n - indices (list): indices that were hit/selected\n\n - 2d: indicates whether a [multi]line or patches) were hit:\n\n - indices (list(list)): indices of the lines/patches that were\n hit/selected\n \"\"\")\n\n callback = Instance(Callback, help=\"\"\"\n A callback to run in the browser whenever the selection is changed.\n \"\"\")\n\nclass ColumnDataSource(DataSource):\n \"\"\" Maps names of columns to sequences or arrays.\n\n If the ColumnDataSource initializer is called with a single argument that\n is a dict or pandas.DataFrame, that argument is used as the value for the\n \"data\" attribute. For example::\n\n ColumnDataSource(mydict) # same as ColumnDataSource(data=mydict)\n ColumnDataSource(df) # same as ColumnDataSource(data=df)\n\n .. note::\n There is an implicit assumption that all the columns in a\n a given ColumnDataSource have the same length.\n\n \"\"\"\n\n data = Dict(String, Any, help=\"\"\"\n Mapping of column names to sequences of data. The data can be, e.g,\n Python lists or tuples, NumPy arrays, etc.\n \"\"\")\n\n column_names = List(String, help=\"\"\"\n An list of names for all the columns in this DataSource.\n \"\"\")\n\n def __init__(self, *args, **kw):\n \"\"\" If called with a single argument that is a dict or\n pandas.DataFrame, treat that implicitly as the \"data\" attribute.\n \"\"\"\n if len(args) == 1 and \"data\" not in kw:\n kw[\"data\"] = args[0]\n # TODO (bev) invalid to pass args and \"data\", check and raise exception\n raw_data = kw.pop(\"data\", {})\n if not isinstance(raw_data, dict):\n if pd and isinstance(raw_data, pd.DataFrame):\n raw_data = self._data_from_df(raw_data)\n else:\n raise ValueError(\"expected a dict or pandas.DataFrame, got %s\" % raw_data)\n super(ColumnDataSource, self).__init__(**kw)\n for name, data in raw_data.items():\n self.add(data, name)\n\n @staticmethod\n def _data_from_df(df):\n \"\"\" Create a ``dict`` of columns from a Pandas DataFrame,\n suitable for creating a ColumnDataSource.\n\n Args:\n df (DataFrame) : data to convert\n\n Returns:\n dict(str, list)\n\n \"\"\"\n index = df.index\n new_data = {}\n for colname in df:\n new_data[colname] = df[colname].tolist()\n if index.name:\n new_data[index.name] = index.tolist()\n elif index.names and not all([x is None for x in index.names]):\n new_data[\"_\".join(index.names)] = index.tolist()\n else:\n new_data[\"index\"] = index.tolist()\n return new_data\n\n @classmethod\n @deprecated(\"Bokeh 0.9.3\", \"ColumnDataSource initializer\")\n def from_df(cls, data):\n \"\"\" Create a ``dict`` of columns from a Pandas DataFrame,\n suitable for creating a ColumnDataSource.\n\n Args:\n data (DataFrame) : data to convert\n\n Returns:\n dict(str, list)\n\n \"\"\"\n import warnings\n warnings.warn(\"Method deprecated in Bokeh 0.9.3\")\n return cls._data_from_df(data)\n\n def to_df(self):\n \"\"\" Convert this data source to pandas dataframe.\n\n If ``column_names`` is set, use those. Otherwise let Pandas\n infer the column names. The ``column_names`` property can be\n used both to order and filter the columns.\n\n Returns:\n DataFrame\n\n \"\"\"\n if not pd:\n raise RuntimeError('Pandas must be installed to convert to a Pandas Dataframe')\n if self.column_names:\n return pd.DataFrame(self.data, columns=self.column_names)\n else:\n return pd.DataFrame(self.data)\n\n def add(self, data, name=None):\n \"\"\" Appends a new column of data to the data source.\n\n Args:\n data (seq) : new data to add\n name (str, optional) : column name to use.\n If not supplied, generate a name go the form \"Series ####\"\n\n Returns:\n str: the column name used\n\n \"\"\"\n if name is None:\n n = len(self.data)\n while \"Series %d\"%n in self.data:\n n += 1\n name = \"Series %d\"%n\n self.column_names.append(name)\n self.data[name] = data\n return name\n\n def _to_json_like(self, include_defaults):\n attrs = super(ColumnDataSource, self)._to_json_like(include_defaults=include_defaults)\n if 'data' in attrs:\n attrs['data'] = transform_column_source_data(attrs['data'])\n return attrs\n\n def remove(self, name):\n \"\"\" Remove a column of data.\n\n Args:\n name (str) : name of the column to remove\n\n Returns:\n None\n\n .. note::\n If the column name does not exist, a warning is issued.\n\n \"\"\"\n try:\n self.column_names.remove(name)\n del self.data[name]\n except (ValueError, KeyError):\n import warnings\n warnings.warn(\"Unable to find column '%s' in data source\" % name)\n\n @deprecated(\"Bokeh 0.11.0\", \"bokeh.io.push_notebook\")\n def push_notebook(self):\n \"\"\" Update a data source for a plot in a Jupyter notebook.\n\n This function can be be used to update data in plot data sources\n in the Jupyter notebook, without having to use the Bokeh server.\n\n .. warning::\n This function has been deprecated. Please use\n ``bokeh.io.push_notebook()`` which will push all changes\n (not just data sources) to the last shown plot in a Jupyter\n notebook.\n\n Returns:\n None\n\n \"\"\"\n from bokeh.io import push_notebook\n push_notebook()\n\n @validation.error(COLUMN_LENGTHS)\n def _check_column_lengths(self):\n lengths = set(len(x) for x in self.data.values())\n if len(lengths) > 1:\n return str(self)\n\n\n def stream(self, new_data, rollover=None):\n import numpy as np\n\n newkeys = set(new_data.keys())\n oldkeys = set(self.data.keys())\n if newkeys != oldkeys:\n missing = oldkeys - newkeys\n extra = newkeys - oldkeys\n if missing and extra:\n raise ValueError(\"Must stream updates to all existing columns (missing: %s, extra: %s)\" % (\", \".join(sorted(missing)), \", \".join(sorted(extra))))\n elif missing:\n raise ValueError(\"Must stream updates to all existing columns (missing: %s)\" % \", \".join(sorted(missing)))\n else:\n raise ValueError(\"Must stream updates to all existing columns (extra: %s)\" % \", \".join(sorted(extra)))\n\n lengths = set()\n for x in new_data.values():\n if isinstance(x, np.ndarray):\n if len(x.shape) != 1:\n raise ValueError(\"stream(...) only supports 1d sequences, got ndarray with size %r\" % (x.shape,))\n lengths.add(x.shape[0])\n else:\n lengths.add(len(x))\n\n if len(lengths) > 1:\n raise ValueError(\"All streaming column updates must be the same length\")\n\n self.data._stream(self.document, self, new_data, rollover)\n\nclass GeoJSONDataSource(ColumnDataSource):\n\n geojson = JSON(help=\"\"\"\n GeoJSON that contains features for plotting. Currently GeoJSONDataSource can\n only process a FeatureCollection or GeometryCollection.\n \"\"\")\n\n\n@abstract\nclass RemoteSource(ColumnDataSource):\n\n data_url = String(help=\"\"\"\n The URL to the endpoint for the data.\n \"\"\")\n\n polling_interval = Int(help=\"\"\"\n polling interval for updating data source in milliseconds\n \"\"\")\n\nclass AjaxDataSource(RemoteSource):\n\n method = Enum('POST', 'GET', help=\"http method - GET or POST\")\n\n mode = Enum(\"replace\", \"append\", help=\"\"\"\n Whether to append new data to existing data (up to ``max_size``),\n or to replace existing data entirely.\n \"\"\")\n\n max_size = Int(help=\"\"\"\n Maximum size of the data array being kept after each pull requests.\n Larger than that size, the data will be right shifted.\n \"\"\")\n\n if_modified = Bool(False, help=\"\"\"\n Whether to include an ``If-Modified-Since`` header in AJAX requests\n to the server. If this header is supported by the server, then only\n new data since the last request will be returned.\n \"\"\")\n content_type = String(default='application/json', help=\"\"\"\n Set the \"contentType\" parameter for the Ajax request.\n \"\"\")\n http_headers = Dict(String, String, help=\"\"\"\n HTTP headers to set for the Ajax request.\n \"\"\")\n", "path": "bokeh/models/sources.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nfrom ..core import validation\nfrom ..core.validation.errors import COLUMN_LENGTHS\nfrom ..core.properties import abstract\nfrom ..core.properties import Any, Int, String, Instance, List, Dict, Bool, Enum, JSON\nfrom ..model import Model\nfrom ..util.dependencies import import_optional\nfrom ..util.deprecate import deprecated\nfrom ..util.serialization import transform_column_source_data\nfrom .callbacks import Callback\n\npd = import_optional('pandas')\n\n@abstract\nclass DataSource(Model):\n \"\"\" A base class for data source types. ``DataSource`` is\n not generally useful to instantiate on its own.\n\n \"\"\"\n\n selected = Dict(String, Dict(String, Any), default={\n '0d': {'glyph': None, 'indices': []},\n '1d': {'indices': []},\n '2d': {'indices': []}\n }, help=\"\"\"\n A dict to indicate selected indices on different dimensions on this DataSource. Keys are:\n\n - 0d: indicates whether a Line or Patch glyphs have been hit. Value is a\n dict with the following keys:\n\n - flag (boolean): true if glyph was with false otherwise\n - indices (list): indices hit (if applicable)\n\n - 1d: indicates whether any of all other glyph (except [multi]line or\n patches) was hit:\n\n - indices (list): indices that were hit/selected\n\n - 2d: indicates whether a [multi]line or patches) were hit:\n\n - indices (list(list)): indices of the lines/patches that were\n hit/selected\n \"\"\")\n\n callback = Instance(Callback, help=\"\"\"\n A callback to run in the browser whenever the selection is changed.\n \"\"\")\n\nclass ColumnDataSource(DataSource):\n \"\"\" Maps names of columns to sequences or arrays.\n\n If the ColumnDataSource initializer is called with a single argument that\n is a dict or pandas.DataFrame, that argument is used as the value for the\n \"data\" attribute. For example::\n\n ColumnDataSource(mydict) # same as ColumnDataSource(data=mydict)\n ColumnDataSource(df) # same as ColumnDataSource(data=df)\n\n .. note::\n There is an implicit assumption that all the columns in a\n a given ColumnDataSource have the same length.\n\n \"\"\"\n\n data = Dict(String, Any, help=\"\"\"\n Mapping of column names to sequences of data. The data can be, e.g,\n Python lists or tuples, NumPy arrays, etc.\n \"\"\")\n\n column_names = List(String, help=\"\"\"\n An list of names for all the columns in this DataSource.\n \"\"\")\n\n def __init__(self, *args, **kw):\n \"\"\" If called with a single argument that is a dict or\n pandas.DataFrame, treat that implicitly as the \"data\" attribute.\n \"\"\"\n if len(args) == 1 and \"data\" not in kw:\n kw[\"data\"] = args[0]\n # TODO (bev) invalid to pass args and \"data\", check and raise exception\n raw_data = kw.pop(\"data\", {})\n if not isinstance(raw_data, dict):\n if pd and isinstance(raw_data, pd.DataFrame):\n raw_data = self._data_from_df(raw_data)\n else:\n raise ValueError(\"expected a dict or pandas.DataFrame, got %s\" % raw_data)\n super(ColumnDataSource, self).__init__(**kw)\n for name, data in raw_data.items():\n self.add(data, name)\n\n @staticmethod\n def _data_from_df(df):\n \"\"\" Create a ``dict`` of columns from a Pandas DataFrame,\n suitable for creating a ColumnDataSource.\n\n Args:\n df (DataFrame) : data to convert\n\n Returns:\n dict(str, list)\n\n \"\"\"\n index = df.index\n new_data = {}\n for colname in df:\n new_data[colname] = df[colname].tolist()\n if index.name:\n new_data[index.name] = index.tolist()\n elif index.names and not all([x is None for x in index.names]):\n new_data[\"_\".join(index.names)] = index.tolist()\n else:\n new_data[\"index\"] = index.tolist()\n return new_data\n\n @classmethod\n def from_df(cls, data):\n \"\"\" Create a ``dict`` of columns from a Pandas DataFrame,\n suitable for creating a ColumnDataSource.\n\n Args:\n data (DataFrame) : data to convert\n\n Returns:\n dict(str, list)\n\n \"\"\"\n return cls._data_from_df(data)\n\n def to_df(self):\n \"\"\" Convert this data source to pandas dataframe.\n\n If ``column_names`` is set, use those. Otherwise let Pandas\n infer the column names. The ``column_names`` property can be\n used both to order and filter the columns.\n\n Returns:\n DataFrame\n\n \"\"\"\n if not pd:\n raise RuntimeError('Pandas must be installed to convert to a Pandas Dataframe')\n if self.column_names:\n return pd.DataFrame(self.data, columns=self.column_names)\n else:\n return pd.DataFrame(self.data)\n\n def add(self, data, name=None):\n \"\"\" Appends a new column of data to the data source.\n\n Args:\n data (seq) : new data to add\n name (str, optional) : column name to use.\n If not supplied, generate a name go the form \"Series ####\"\n\n Returns:\n str: the column name used\n\n \"\"\"\n if name is None:\n n = len(self.data)\n while \"Series %d\"%n in self.data:\n n += 1\n name = \"Series %d\"%n\n self.column_names.append(name)\n self.data[name] = data\n return name\n\n def _to_json_like(self, include_defaults):\n attrs = super(ColumnDataSource, self)._to_json_like(include_defaults=include_defaults)\n if 'data' in attrs:\n attrs['data'] = transform_column_source_data(attrs['data'])\n return attrs\n\n def remove(self, name):\n \"\"\" Remove a column of data.\n\n Args:\n name (str) : name of the column to remove\n\n Returns:\n None\n\n .. note::\n If the column name does not exist, a warning is issued.\n\n \"\"\"\n try:\n self.column_names.remove(name)\n del self.data[name]\n except (ValueError, KeyError):\n import warnings\n warnings.warn(\"Unable to find column '%s' in data source\" % name)\n\n @deprecated(\"Bokeh 0.11.0\", \"bokeh.io.push_notebook\")\n def push_notebook(self):\n \"\"\" Update a data source for a plot in a Jupyter notebook.\n\n This function can be be used to update data in plot data sources\n in the Jupyter notebook, without having to use the Bokeh server.\n\n .. warning::\n This function has been deprecated. Please use\n ``bokeh.io.push_notebook()`` which will push all changes\n (not just data sources) to the last shown plot in a Jupyter\n notebook.\n\n Returns:\n None\n\n \"\"\"\n from bokeh.io import push_notebook\n push_notebook()\n\n @validation.error(COLUMN_LENGTHS)\n def _check_column_lengths(self):\n lengths = set(len(x) for x in self.data.values())\n if len(lengths) > 1:\n return str(self)\n\n\n def stream(self, new_data, rollover=None):\n import numpy as np\n\n newkeys = set(new_data.keys())\n oldkeys = set(self.data.keys())\n if newkeys != oldkeys:\n missing = oldkeys - newkeys\n extra = newkeys - oldkeys\n if missing and extra:\n raise ValueError(\"Must stream updates to all existing columns (missing: %s, extra: %s)\" % (\", \".join(sorted(missing)), \", \".join(sorted(extra))))\n elif missing:\n raise ValueError(\"Must stream updates to all existing columns (missing: %s)\" % \", \".join(sorted(missing)))\n else:\n raise ValueError(\"Must stream updates to all existing columns (extra: %s)\" % \", \".join(sorted(extra)))\n\n lengths = set()\n for x in new_data.values():\n if isinstance(x, np.ndarray):\n if len(x.shape) != 1:\n raise ValueError(\"stream(...) only supports 1d sequences, got ndarray with size %r\" % (x.shape,))\n lengths.add(x.shape[0])\n else:\n lengths.add(len(x))\n\n if len(lengths) > 1:\n raise ValueError(\"All streaming column updates must be the same length\")\n\n self.data._stream(self.document, self, new_data, rollover)\n\nclass GeoJSONDataSource(ColumnDataSource):\n\n geojson = JSON(help=\"\"\"\n GeoJSON that contains features for plotting. Currently GeoJSONDataSource can\n only process a FeatureCollection or GeometryCollection.\n \"\"\")\n\n\n@abstract\nclass RemoteSource(ColumnDataSource):\n\n data_url = String(help=\"\"\"\n The URL to the endpoint for the data.\n \"\"\")\n\n polling_interval = Int(help=\"\"\"\n polling interval for updating data source in milliseconds\n \"\"\")\n\nclass AjaxDataSource(RemoteSource):\n\n method = Enum('POST', 'GET', help=\"http method - GET or POST\")\n\n mode = Enum(\"replace\", \"append\", help=\"\"\"\n Whether to append new data to existing data (up to ``max_size``),\n or to replace existing data entirely.\n \"\"\")\n\n max_size = Int(help=\"\"\"\n Maximum size of the data array being kept after each pull requests.\n Larger than that size, the data will be right shifted.\n \"\"\")\n\n if_modified = Bool(False, help=\"\"\"\n Whether to include an ``If-Modified-Since`` header in AJAX requests\n to the server. If this header is supported by the server, then only\n new data since the last request will be returned.\n \"\"\")\n content_type = String(default='application/json', help=\"\"\"\n Set the \"contentType\" parameter for the Ajax request.\n \"\"\")\n http_headers = Dict(String, String, help=\"\"\"\n HTTP headers to set for the Ajax request.\n \"\"\")\n", "path": "bokeh/models/sources.py"}]} | 3,359 | 170 |
gh_patches_debug_27442 | rasdani/github-patches | git_diff | aio-libs-abandoned__aioredis-py-839 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Drop Python 3.5 support
Python 3.5 was a famous release (async/await syntax was introduced) but the version has reached end-of-life.
We can (and should) drop it.
Speaking as the maintainer of other libraries, I can say that it simplifies the code base a little
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import re
2 import os.path
3 import sys
4 import platform
5 from setuptools import setup, find_packages
6
7
8 install_requires = ['async-timeout']
9 if platform.python_implementation() == 'CPython':
10 install_requires.append('hiredis')
11
12 PY_VER = sys.version_info
13
14 if PY_VER < (3, 5):
15 raise RuntimeError("aioredis doesn't support Python version prior 3.5")
16
17
18 def read(*parts):
19 with open(os.path.join(*parts), 'rt') as f:
20 return f.read().strip()
21
22
23 def read_version():
24 regexp = re.compile(r"^__version__\W*=\W*'([\d.abrc]+)'")
25 init_py = os.path.join(os.path.dirname(__file__),
26 'aioredis', '__init__.py')
27 with open(init_py) as f:
28 for line in f:
29 match = regexp.match(line)
30 if match is not None:
31 return match.group(1)
32 raise RuntimeError('Cannot find version in {}'.format(init_py))
33
34
35 classifiers = [
36 'License :: OSI Approved :: MIT License',
37 'Development Status :: 4 - Beta',
38 'Programming Language :: Python',
39 'Programming Language :: Python :: 3',
40 'Programming Language :: Python :: 3.5',
41 'Programming Language :: Python :: 3.6',
42 'Programming Language :: Python :: 3.7',
43 'Programming Language :: Python :: 3 :: Only',
44 'Operating System :: POSIX',
45 'Environment :: Web Environment',
46 'Intended Audience :: Developers',
47 'Topic :: Software Development',
48 'Topic :: Software Development :: Libraries',
49 'Framework :: AsyncIO',
50 ]
51
52 setup(name='aioredis',
53 version=read_version(),
54 description=("asyncio (PEP 3156) Redis support"),
55 long_description="\n\n".join((read('README.rst'), read('CHANGES.txt'))),
56 classifiers=classifiers,
57 platforms=["POSIX"],
58 author="Alexey Popravka",
59 author_email="[email protected]",
60 url="https://github.com/aio-libs/aioredis",
61 license="MIT",
62 packages=find_packages(exclude=["tests"]),
63 install_requires=install_requires,
64 include_package_data=True,
65 )
66
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,20 +1,8 @@
import re
import os.path
-import sys
-import platform
from setuptools import setup, find_packages
-install_requires = ['async-timeout']
-if platform.python_implementation() == 'CPython':
- install_requires.append('hiredis')
-
-PY_VER = sys.version_info
-
-if PY_VER < (3, 5):
- raise RuntimeError("aioredis doesn't support Python version prior 3.5")
-
-
def read(*parts):
with open(os.path.join(*parts), 'rt') as f:
return f.read().strip()
@@ -37,7 +25,6 @@
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3 :: Only',
@@ -60,6 +47,10 @@
url="https://github.com/aio-libs/aioredis",
license="MIT",
packages=find_packages(exclude=["tests"]),
- install_requires=install_requires,
+ install_requires=[
+ 'async-timeout',
+ 'hiredis; implementation_name=="cpython"'
+ ],
+ python_requires=">=3.6",
include_package_data=True,
)
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,20 +1,8 @@\n import re\n import os.path\n-import sys\n-import platform\n from setuptools import setup, find_packages\n \n \n-install_requires = ['async-timeout']\n-if platform.python_implementation() == 'CPython':\n- install_requires.append('hiredis')\n-\n-PY_VER = sys.version_info\n-\n-if PY_VER < (3, 5):\n- raise RuntimeError(\"aioredis doesn't support Python version prior 3.5\")\n-\n-\n def read(*parts):\n with open(os.path.join(*parts), 'rt') as f:\n return f.read().strip()\n@@ -37,7 +25,6 @@\n 'Development Status :: 4 - Beta',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n- 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n@@ -60,6 +47,10 @@\n url=\"https://github.com/aio-libs/aioredis\",\n license=\"MIT\",\n packages=find_packages(exclude=[\"tests\"]),\n- install_requires=install_requires,\n+ install_requires=[\n+ 'async-timeout',\n+ 'hiredis; implementation_name==\"cpython\"'\n+ ],\n+ python_requires=\">=3.6\",\n include_package_data=True,\n )\n", "issue": "Drop Python 3.5 support\nPython 3.5 was a famous release (async/await syntax was introduced) but the version has reached end-of-life.\r\nWe can (and should) drop it.\r\nSpeaking as the maintainer of other libraries, I can say that it simplifies the code base a little\n", "before_files": [{"content": "import re\nimport os.path\nimport sys\nimport platform\nfrom setuptools import setup, find_packages\n\n\ninstall_requires = ['async-timeout']\nif platform.python_implementation() == 'CPython':\n install_requires.append('hiredis')\n\nPY_VER = sys.version_info\n\nif PY_VER < (3, 5):\n raise RuntimeError(\"aioredis doesn't support Python version prior 3.5\")\n\n\ndef read(*parts):\n with open(os.path.join(*parts), 'rt') as f:\n return f.read().strip()\n\n\ndef read_version():\n regexp = re.compile(r\"^__version__\\W*=\\W*'([\\d.abrc]+)'\")\n init_py = os.path.join(os.path.dirname(__file__),\n 'aioredis', '__init__.py')\n with open(init_py) as f:\n for line in f:\n match = regexp.match(line)\n if match is not None:\n return match.group(1)\n raise RuntimeError('Cannot find version in {}'.format(init_py))\n\n\nclassifiers = [\n 'License :: OSI Approved :: MIT License',\n 'Development Status :: 4 - Beta',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Operating System :: POSIX',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Framework :: AsyncIO',\n]\n\nsetup(name='aioredis',\n version=read_version(),\n description=(\"asyncio (PEP 3156) Redis support\"),\n long_description=\"\\n\\n\".join((read('README.rst'), read('CHANGES.txt'))),\n classifiers=classifiers,\n platforms=[\"POSIX\"],\n author=\"Alexey Popravka\",\n author_email=\"[email protected]\",\n url=\"https://github.com/aio-libs/aioredis\",\n license=\"MIT\",\n packages=find_packages(exclude=[\"tests\"]),\n install_requires=install_requires,\n include_package_data=True,\n )\n", "path": "setup.py"}], "after_files": [{"content": "import re\nimport os.path\nfrom setuptools import setup, find_packages\n\n\ndef read(*parts):\n with open(os.path.join(*parts), 'rt') as f:\n return f.read().strip()\n\n\ndef read_version():\n regexp = re.compile(r\"^__version__\\W*=\\W*'([\\d.abrc]+)'\")\n init_py = os.path.join(os.path.dirname(__file__),\n 'aioredis', '__init__.py')\n with open(init_py) as f:\n for line in f:\n match = regexp.match(line)\n if match is not None:\n return match.group(1)\n raise RuntimeError('Cannot find version in {}'.format(init_py))\n\n\nclassifiers = [\n 'License :: OSI Approved :: MIT License',\n 'Development Status :: 4 - Beta',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Operating System :: POSIX',\n 'Environment :: Web Environment',\n 'Intended Audience :: Developers',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Framework :: AsyncIO',\n]\n\nsetup(name='aioredis',\n version=read_version(),\n description=(\"asyncio (PEP 3156) Redis support\"),\n long_description=\"\\n\\n\".join((read('README.rst'), read('CHANGES.txt'))),\n classifiers=classifiers,\n platforms=[\"POSIX\"],\n author=\"Alexey Popravka\",\n author_email=\"[email protected]\",\n url=\"https://github.com/aio-libs/aioredis\",\n license=\"MIT\",\n packages=find_packages(exclude=[\"tests\"]),\n install_requires=[\n 'async-timeout',\n 'hiredis; implementation_name==\"cpython\"'\n ],\n python_requires=\">=3.6\",\n include_package_data=True,\n )\n", "path": "setup.py"}]} | 936 | 335 |
gh_patches_debug_17186 | rasdani/github-patches | git_diff | microsoft__Qcodes-5008 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Parsing version string error with Keysight_34465A
When I try to connect a Keysight_34465A digital multimeter with
```
from qcodes.instrument_drivers.Keysight.Keysight_34465A_submodules import Keysight_34465A
dmm = Keysight_34465A('dmm', 'TCPIP0::10.21.64.130::inst0::INSTR')
```
I get "InvalidVersion: Invalid version '65.02.17-02.40-02.17-00.52-04-01'", whereas the actual firmware version is 'A.02.17-02.40-02.17-00.52-04-01':
```
---------------------------------------------------------------------------
InvalidVersion Traceback (most recent call last)
Cell In[1], line 2
1 from qcodes.instrument_drivers.Keysight.Keysight_34465A_submodules import Keysight_34465A
----> 2 dmm = Keysight_34465A('dmm', 'TCPIP0::10.21.64.130::inst0::INSTR')
File ~\.conda\envs\qcodes\lib\site-packages\qcodes\instrument\instrument_meta.py:36, in InstrumentMeta.__call__(cls, *args, **kwargs)
31 def __call__(cls, *args: Any, **kwargs: Any) -> Any:
32 """
33 Overloads `type.__call__` to add code that runs only if __init__ completes
34 successfully.
35 """
---> 36 new_inst = super().__call__(*args, **kwargs)
37 is_abstract = new_inst._is_abstract()
38 if is_abstract:
File ~\.conda\envs\qcodes\lib\site-packages\qcodes\instrument_drivers\Keysight\Keysight_34465A_submodules.py:12, in Keysight34465A.__init__(self, name, address, silent, **kwargs)
10 def __init__(self, name: str, address: str, silent: bool = False,
11 **kwargs: Any):
---> 12 super().__init__(name, address, silent, **kwargs)
File ~\.conda\envs\qcodes\lib\site-packages\qcodes\instrument_drivers\Keysight\private\Keysight_344xxA_submodules.py:470, in _Keysight_344xxA.__init__(self, name, address, silent, **kwargs)
463 ####################################
464 # Instrument specifications
466 options = self._options()
467 self.has_DIG = self.is_34465A_34470A and (
468 "DIG" in options
469 or version.parse(convert_legacy_version_to_supported_version("A.03"))
--> 470 <= version.parse(
471 convert_legacy_version_to_supported_version(idn["firmware"])
472 )
473 )
474 # Note that the firmware version check is still needed because
475 # ``_options`` (the ``*OPT?`` command) returns 'DIG' option for
476 # firmware 3.0 only if it has been purchased before
477 self.has_MEM = self.is_34465A_34470A and 'MEM' in options
File ~\.conda\envs\qcodes\lib\site-packages\packaging\version.py:52, in parse(version)
43 def parse(version: str) -> "Version":
44 """Parse the given version string.
45
46 >>> parse('1.0.dev1')
(...)
50 :raises InvalidVersion: When the version string is not a valid version.
51 """
---> 52 return Version(version)
File ~\.conda\envs\qcodes\lib\site-packages\packaging\version.py:197, in Version.__init__(self, version)
195 match = self._regex.search(version)
196 if not match:
--> 197 raise InvalidVersion(f"Invalid version: '{version}'")
199 # Store the parsed out pieces of the version
200 self._version = _Version(
201 epoch=int(match.group("epoch")) if match.group("epoch") else 0,
202 release=tuple(int(i) for i in match.group("release").split(".")),
(...)
208 local=_parse_local_version(match.group("local")),
209 )
```
OS Windows 11
qcodes 0.37.0
Pyvisa 1.13.0
pyserial 3.5
Parsing version string error with Keysight_34465A
When I try to connect a Keysight_34465A digital multimeter with
```
from qcodes.instrument_drivers.Keysight.Keysight_34465A_submodules import Keysight_34465A
dmm = Keysight_34465A('dmm', 'TCPIP0::10.21.64.130::inst0::INSTR')
```
I get "InvalidVersion: Invalid version '65.02.17-02.40-02.17-00.52-04-01'", whereas the actual firmware version is 'A.02.17-02.40-02.17-00.52-04-01':
```
---------------------------------------------------------------------------
InvalidVersion Traceback (most recent call last)
Cell In[1], line 2
1 from qcodes.instrument_drivers.Keysight.Keysight_34465A_submodules import Keysight_34465A
----> 2 dmm = Keysight_34465A('dmm', 'TCPIP0::10.21.64.130::inst0::INSTR')
File ~\.conda\envs\qcodes\lib\site-packages\qcodes\instrument\instrument_meta.py:36, in InstrumentMeta.__call__(cls, *args, **kwargs)
31 def __call__(cls, *args: Any, **kwargs: Any) -> Any:
32 """
33 Overloads `type.__call__` to add code that runs only if __init__ completes
34 successfully.
35 """
---> 36 new_inst = super().__call__(*args, **kwargs)
37 is_abstract = new_inst._is_abstract()
38 if is_abstract:
File ~\.conda\envs\qcodes\lib\site-packages\qcodes\instrument_drivers\Keysight\Keysight_34465A_submodules.py:12, in Keysight34465A.__init__(self, name, address, silent, **kwargs)
10 def __init__(self, name: str, address: str, silent: bool = False,
11 **kwargs: Any):
---> 12 super().__init__(name, address, silent, **kwargs)
File ~\.conda\envs\qcodes\lib\site-packages\qcodes\instrument_drivers\Keysight\private\Keysight_344xxA_submodules.py:470, in _Keysight_344xxA.__init__(self, name, address, silent, **kwargs)
463 ####################################
464 # Instrument specifications
466 options = self._options()
467 self.has_DIG = self.is_34465A_34470A and (
468 "DIG" in options
469 or version.parse(convert_legacy_version_to_supported_version("A.03"))
--> 470 <= version.parse(
471 convert_legacy_version_to_supported_version(idn["firmware"])
472 )
473 )
474 # Note that the firmware version check is still needed because
475 # ``_options`` (the ``*OPT?`` command) returns 'DIG' option for
476 # firmware 3.0 only if it has been purchased before
477 self.has_MEM = self.is_34465A_34470A and 'MEM' in options
File ~\.conda\envs\qcodes\lib\site-packages\packaging\version.py:52, in parse(version)
43 def parse(version: str) -> "Version":
44 """Parse the given version string.
45
46 >>> parse('1.0.dev1')
(...)
50 :raises InvalidVersion: When the version string is not a valid version.
51 """
---> 52 return Version(version)
File ~\.conda\envs\qcodes\lib\site-packages\packaging\version.py:197, in Version.__init__(self, version)
195 match = self._regex.search(version)
196 if not match:
--> 197 raise InvalidVersion(f"Invalid version: '{version}'")
199 # Store the parsed out pieces of the version
200 self._version = _Version(
201 epoch=int(match.group("epoch")) if match.group("epoch") else 0,
202 release=tuple(int(i) for i in match.group("release").split(".")),
(...)
208 local=_parse_local_version(match.group("local")),
209 )
```
OS Windows 11
qcodes 0.37.0
Pyvisa 1.13.0
pyserial 3.5
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qcodes/utils/installation_info.py`
Content:
```
1 """
2 This module contains helper functions that provide information about how
3 QCoDeS is installed and about what other packages are installed along with
4 QCoDeS
5 """
6 import json
7 import logging
8 import subprocess
9 import sys
10 from typing import Dict, Optional
11
12 if sys.version_info >= (3, 10):
13 # distribution.name used below became part of the
14 # official api in 3.10
15 from importlib.metadata import distributions
16 else:
17 # 3.9 and earlier
18 from importlib_metadata import distributions
19
20 from qcodes.utils.deprecate import deprecate
21
22 log = logging.getLogger(__name__)
23
24
25 def is_qcodes_installed_editably() -> Optional[bool]:
26 """
27 Try to ask pip whether QCoDeS is installed in editable mode and return
28 the answer a boolean. Returns None if pip somehow did not respond as
29 expected.
30 """
31
32 answer: Optional[bool]
33
34 try:
35 pipproc = subprocess.run(['python', '-m', 'pip', 'list', '-e', '--no-index',
36 '--format=json'],
37 check=True,
38 stdout=subprocess.PIPE)
39 e_pkgs = json.loads(pipproc.stdout.decode('utf-8'))
40 answer = any([d["name"] == 'qcodes' for d in e_pkgs])
41 except Exception as e: # we actually do want a catch-all here
42 log.warning(f'{type(e)}: {str(e)}')
43 answer = None
44
45 return answer
46
47
48 @deprecate("function 'get_qcodes_version'", alternative="qcodes.__version__")
49 def get_qcodes_version() -> str:
50 """
51 Get the version of the currently installed QCoDeS
52 """
53 from qcodes._version import __version__
54 return __version__
55
56
57 def get_all_installed_package_versions() -> Dict[str, str]:
58 """
59 Return a dictionary of the currently installed packages and their versions.
60 """
61 return {d.name: d.version for d in distributions()}
62
63
64 def convert_legacy_version_to_supported_version(ver: str) -> str:
65 """
66 Convert a legacy version str containing single chars rather than
67 numbers to a regular version string. This is done by replacing a char
68 by its ASCII code (using ``ord``). This assumes that the version number
69 only uses at most a single char per level and only ASCII chars.
70 """
71
72 temp_list = []
73 for v in ver:
74 if v.isalpha():
75 temp_list.append(str(ord(v.upper())))
76 else:
77 temp_list.append(v)
78 return "".join(temp_list)
79
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/qcodes/utils/installation_info.py b/qcodes/utils/installation_info.py
--- a/qcodes/utils/installation_info.py
+++ b/qcodes/utils/installation_info.py
@@ -67,6 +67,11 @@
numbers to a regular version string. This is done by replacing a char
by its ASCII code (using ``ord``). This assumes that the version number
only uses at most a single char per level and only ASCII chars.
+
+ It also splits off anything that comes after the first ``-`` in the version str.
+
+ This is meant to pass versions like ``'A.02.17-02.40-02.17-00.52-04-01'``
+ primarily used by Keysight instruments.
"""
temp_list = []
@@ -75,4 +80,5 @@
temp_list.append(str(ord(v.upper())))
else:
temp_list.append(v)
- return "".join(temp_list)
+ temp_str = "".join(temp_list)
+ return temp_str.split("-")[0]
| {"golden_diff": "diff --git a/qcodes/utils/installation_info.py b/qcodes/utils/installation_info.py\n--- a/qcodes/utils/installation_info.py\n+++ b/qcodes/utils/installation_info.py\n@@ -67,6 +67,11 @@\n numbers to a regular version string. This is done by replacing a char\n by its ASCII code (using ``ord``). This assumes that the version number\n only uses at most a single char per level and only ASCII chars.\n+\n+ It also splits off anything that comes after the first ``-`` in the version str.\n+\n+ This is meant to pass versions like ``'A.02.17-02.40-02.17-00.52-04-01'``\n+ primarily used by Keysight instruments.\n \"\"\"\n \n temp_list = []\n@@ -75,4 +80,5 @@\n temp_list.append(str(ord(v.upper())))\n else:\n temp_list.append(v)\n- return \"\".join(temp_list)\n+ temp_str = \"\".join(temp_list)\n+ return temp_str.split(\"-\")[0]\n", "issue": "Parsing version string error with Keysight_34465A\nWhen I try to connect a Keysight_34465A digital multimeter with \r\n\r\n```\r\nfrom qcodes.instrument_drivers.Keysight.Keysight_34465A_submodules import Keysight_34465A\r\ndmm = Keysight_34465A('dmm', 'TCPIP0::10.21.64.130::inst0::INSTR')\r\n```\r\n\r\nI get \"InvalidVersion: Invalid version '65.02.17-02.40-02.17-00.52-04-01'\", whereas the actual firmware version is 'A.02.17-02.40-02.17-00.52-04-01':\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nInvalidVersion Traceback (most recent call last)\r\nCell In[1], line 2\r\n 1 from qcodes.instrument_drivers.Keysight.Keysight_34465A_submodules import Keysight_34465A\r\n----> 2 dmm = Keysight_34465A('dmm', 'TCPIP0::10.21.64.130::inst0::INSTR')\r\n\r\nFile ~\\.conda\\envs\\qcodes\\lib\\site-packages\\qcodes\\instrument\\instrument_meta.py:36, in InstrumentMeta.__call__(cls, *args, **kwargs)\r\n 31 def __call__(cls, *args: Any, **kwargs: Any) -> Any:\r\n 32 \"\"\"\r\n 33 Overloads `type.__call__` to add code that runs only if __init__ completes\r\n 34 successfully.\r\n 35 \"\"\"\r\n---> 36 new_inst = super().__call__(*args, **kwargs)\r\n 37 is_abstract = new_inst._is_abstract()\r\n 38 if is_abstract:\r\n\r\nFile ~\\.conda\\envs\\qcodes\\lib\\site-packages\\qcodes\\instrument_drivers\\Keysight\\Keysight_34465A_submodules.py:12, in Keysight34465A.__init__(self, name, address, silent, **kwargs)\r\n 10 def __init__(self, name: str, address: str, silent: bool = False,\r\n 11 **kwargs: Any):\r\n---> 12 super().__init__(name, address, silent, **kwargs)\r\n\r\nFile ~\\.conda\\envs\\qcodes\\lib\\site-packages\\qcodes\\instrument_drivers\\Keysight\\private\\Keysight_344xxA_submodules.py:470, in _Keysight_344xxA.__init__(self, name, address, silent, **kwargs)\r\n 463 ####################################\r\n 464 # Instrument specifications\r\n 466 options = self._options()\r\n 467 self.has_DIG = self.is_34465A_34470A and (\r\n 468 \"DIG\" in options\r\n 469 or version.parse(convert_legacy_version_to_supported_version(\"A.03\"))\r\n--> 470 <= version.parse(\r\n 471 convert_legacy_version_to_supported_version(idn[\"firmware\"])\r\n 472 )\r\n 473 )\r\n 474 # Note that the firmware version check is still needed because\r\n 475 # ``_options`` (the ``*OPT?`` command) returns 'DIG' option for\r\n 476 # firmware 3.0 only if it has been purchased before\r\n 477 self.has_MEM = self.is_34465A_34470A and 'MEM' in options\r\n\r\nFile ~\\.conda\\envs\\qcodes\\lib\\site-packages\\packaging\\version.py:52, in parse(version)\r\n 43 def parse(version: str) -> \"Version\":\r\n 44 \"\"\"Parse the given version string.\r\n 45 \r\n 46 >>> parse('1.0.dev1')\r\n (...)\r\n 50 :raises InvalidVersion: When the version string is not a valid version.\r\n 51 \"\"\"\r\n---> 52 return Version(version)\r\n\r\nFile ~\\.conda\\envs\\qcodes\\lib\\site-packages\\packaging\\version.py:197, in Version.__init__(self, version)\r\n 195 match = self._regex.search(version)\r\n 196 if not match:\r\n--> 197 raise InvalidVersion(f\"Invalid version: '{version}'\")\r\n 199 # Store the parsed out pieces of the version\r\n 200 self._version = _Version(\r\n 201 epoch=int(match.group(\"epoch\")) if match.group(\"epoch\") else 0,\r\n 202 release=tuple(int(i) for i in match.group(\"release\").split(\".\")),\r\n (...)\r\n 208 local=_parse_local_version(match.group(\"local\")),\r\n 209 )\r\n```\r\n\r\nOS Windows 11\r\nqcodes 0.37.0\r\nPyvisa 1.13.0\r\npyserial 3.5\nParsing version string error with Keysight_34465A\nWhen I try to connect a Keysight_34465A digital multimeter with \r\n\r\n```\r\nfrom qcodes.instrument_drivers.Keysight.Keysight_34465A_submodules import Keysight_34465A\r\ndmm = Keysight_34465A('dmm', 'TCPIP0::10.21.64.130::inst0::INSTR')\r\n```\r\n\r\nI get \"InvalidVersion: Invalid version '65.02.17-02.40-02.17-00.52-04-01'\", whereas the actual firmware version is 'A.02.17-02.40-02.17-00.52-04-01':\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nInvalidVersion Traceback (most recent call last)\r\nCell In[1], line 2\r\n 1 from qcodes.instrument_drivers.Keysight.Keysight_34465A_submodules import Keysight_34465A\r\n----> 2 dmm = Keysight_34465A('dmm', 'TCPIP0::10.21.64.130::inst0::INSTR')\r\n\r\nFile ~\\.conda\\envs\\qcodes\\lib\\site-packages\\qcodes\\instrument\\instrument_meta.py:36, in InstrumentMeta.__call__(cls, *args, **kwargs)\r\n 31 def __call__(cls, *args: Any, **kwargs: Any) -> Any:\r\n 32 \"\"\"\r\n 33 Overloads `type.__call__` to add code that runs only if __init__ completes\r\n 34 successfully.\r\n 35 \"\"\"\r\n---> 36 new_inst = super().__call__(*args, **kwargs)\r\n 37 is_abstract = new_inst._is_abstract()\r\n 38 if is_abstract:\r\n\r\nFile ~\\.conda\\envs\\qcodes\\lib\\site-packages\\qcodes\\instrument_drivers\\Keysight\\Keysight_34465A_submodules.py:12, in Keysight34465A.__init__(self, name, address, silent, **kwargs)\r\n 10 def __init__(self, name: str, address: str, silent: bool = False,\r\n 11 **kwargs: Any):\r\n---> 12 super().__init__(name, address, silent, **kwargs)\r\n\r\nFile ~\\.conda\\envs\\qcodes\\lib\\site-packages\\qcodes\\instrument_drivers\\Keysight\\private\\Keysight_344xxA_submodules.py:470, in _Keysight_344xxA.__init__(self, name, address, silent, **kwargs)\r\n 463 ####################################\r\n 464 # Instrument specifications\r\n 466 options = self._options()\r\n 467 self.has_DIG = self.is_34465A_34470A and (\r\n 468 \"DIG\" in options\r\n 469 or version.parse(convert_legacy_version_to_supported_version(\"A.03\"))\r\n--> 470 <= version.parse(\r\n 471 convert_legacy_version_to_supported_version(idn[\"firmware\"])\r\n 472 )\r\n 473 )\r\n 474 # Note that the firmware version check is still needed because\r\n 475 # ``_options`` (the ``*OPT?`` command) returns 'DIG' option for\r\n 476 # firmware 3.0 only if it has been purchased before\r\n 477 self.has_MEM = self.is_34465A_34470A and 'MEM' in options\r\n\r\nFile ~\\.conda\\envs\\qcodes\\lib\\site-packages\\packaging\\version.py:52, in parse(version)\r\n 43 def parse(version: str) -> \"Version\":\r\n 44 \"\"\"Parse the given version string.\r\n 45 \r\n 46 >>> parse('1.0.dev1')\r\n (...)\r\n 50 :raises InvalidVersion: When the version string is not a valid version.\r\n 51 \"\"\"\r\n---> 52 return Version(version)\r\n\r\nFile ~\\.conda\\envs\\qcodes\\lib\\site-packages\\packaging\\version.py:197, in Version.__init__(self, version)\r\n 195 match = self._regex.search(version)\r\n 196 if not match:\r\n--> 197 raise InvalidVersion(f\"Invalid version: '{version}'\")\r\n 199 # Store the parsed out pieces of the version\r\n 200 self._version = _Version(\r\n 201 epoch=int(match.group(\"epoch\")) if match.group(\"epoch\") else 0,\r\n 202 release=tuple(int(i) for i in match.group(\"release\").split(\".\")),\r\n (...)\r\n 208 local=_parse_local_version(match.group(\"local\")),\r\n 209 )\r\n```\r\n\r\nOS Windows 11\r\nqcodes 0.37.0\r\nPyvisa 1.13.0\r\npyserial 3.5\n", "before_files": [{"content": "\"\"\"\nThis module contains helper functions that provide information about how\nQCoDeS is installed and about what other packages are installed along with\nQCoDeS\n\"\"\"\nimport json\nimport logging\nimport subprocess\nimport sys\nfrom typing import Dict, Optional\n\nif sys.version_info >= (3, 10):\n # distribution.name used below became part of the\n # official api in 3.10\n from importlib.metadata import distributions\nelse:\n # 3.9 and earlier\n from importlib_metadata import distributions\n\nfrom qcodes.utils.deprecate import deprecate\n\nlog = logging.getLogger(__name__)\n\n\ndef is_qcodes_installed_editably() -> Optional[bool]:\n \"\"\"\n Try to ask pip whether QCoDeS is installed in editable mode and return\n the answer a boolean. Returns None if pip somehow did not respond as\n expected.\n \"\"\"\n\n answer: Optional[bool]\n\n try:\n pipproc = subprocess.run(['python', '-m', 'pip', 'list', '-e', '--no-index',\n '--format=json'],\n check=True,\n stdout=subprocess.PIPE)\n e_pkgs = json.loads(pipproc.stdout.decode('utf-8'))\n answer = any([d[\"name\"] == 'qcodes' for d in e_pkgs])\n except Exception as e: # we actually do want a catch-all here\n log.warning(f'{type(e)}: {str(e)}')\n answer = None\n\n return answer\n\n\n@deprecate(\"function 'get_qcodes_version'\", alternative=\"qcodes.__version__\")\ndef get_qcodes_version() -> str:\n \"\"\"\n Get the version of the currently installed QCoDeS\n \"\"\"\n from qcodes._version import __version__\n return __version__\n\n\ndef get_all_installed_package_versions() -> Dict[str, str]:\n \"\"\"\n Return a dictionary of the currently installed packages and their versions.\n \"\"\"\n return {d.name: d.version for d in distributions()}\n\n\ndef convert_legacy_version_to_supported_version(ver: str) -> str:\n \"\"\"\n Convert a legacy version str containing single chars rather than\n numbers to a regular version string. This is done by replacing a char\n by its ASCII code (using ``ord``). This assumes that the version number\n only uses at most a single char per level and only ASCII chars.\n \"\"\"\n\n temp_list = []\n for v in ver:\n if v.isalpha():\n temp_list.append(str(ord(v.upper())))\n else:\n temp_list.append(v)\n return \"\".join(temp_list)\n", "path": "qcodes/utils/installation_info.py"}], "after_files": [{"content": "\"\"\"\nThis module contains helper functions that provide information about how\nQCoDeS is installed and about what other packages are installed along with\nQCoDeS\n\"\"\"\nimport json\nimport logging\nimport subprocess\nimport sys\nfrom typing import Dict, Optional\n\nif sys.version_info >= (3, 10):\n # distribution.name used below became part of the\n # official api in 3.10\n from importlib.metadata import distributions\nelse:\n # 3.9 and earlier\n from importlib_metadata import distributions\n\nfrom qcodes.utils.deprecate import deprecate\n\nlog = logging.getLogger(__name__)\n\n\ndef is_qcodes_installed_editably() -> Optional[bool]:\n \"\"\"\n Try to ask pip whether QCoDeS is installed in editable mode and return\n the answer a boolean. Returns None if pip somehow did not respond as\n expected.\n \"\"\"\n\n answer: Optional[bool]\n\n try:\n pipproc = subprocess.run(['python', '-m', 'pip', 'list', '-e', '--no-index',\n '--format=json'],\n check=True,\n stdout=subprocess.PIPE)\n e_pkgs = json.loads(pipproc.stdout.decode('utf-8'))\n answer = any([d[\"name\"] == 'qcodes' for d in e_pkgs])\n except Exception as e: # we actually do want a catch-all here\n log.warning(f'{type(e)}: {str(e)}')\n answer = None\n\n return answer\n\n\n@deprecate(\"function 'get_qcodes_version'\", alternative=\"qcodes.__version__\")\ndef get_qcodes_version() -> str:\n \"\"\"\n Get the version of the currently installed QCoDeS\n \"\"\"\n from qcodes._version import __version__\n return __version__\n\n\ndef get_all_installed_package_versions() -> Dict[str, str]:\n \"\"\"\n Return a dictionary of the currently installed packages and their versions.\n \"\"\"\n return {d.name: d.version for d in distributions()}\n\n\ndef convert_legacy_version_to_supported_version(ver: str) -> str:\n \"\"\"\n Convert a legacy version str containing single chars rather than\n numbers to a regular version string. This is done by replacing a char\n by its ASCII code (using ``ord``). This assumes that the version number\n only uses at most a single char per level and only ASCII chars.\n\n It also splits off anything that comes after the first ``-`` in the version str.\n\n This is meant to pass versions like ``'A.02.17-02.40-02.17-00.52-04-01'``\n primarily used by Keysight instruments.\n \"\"\"\n\n temp_list = []\n for v in ver:\n if v.isalpha():\n temp_list.append(str(ord(v.upper())))\n else:\n temp_list.append(v)\n temp_str = \"\".join(temp_list)\n return temp_str.split(\"-\")[0]\n", "path": "qcodes/utils/installation_info.py"}]} | 3,310 | 243 |
gh_patches_debug_34889 | rasdani/github-patches | git_diff | scikit-hep__pyhf-1818 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add contextlib support to the pyhf.schema API
> instead of having users overwriting the value by assignment, do you think there is value in offering a `pyhf.utils.update_schema_path` or something that performs this operation through the API?
>
> Thanks @kratsg for taking up my suggestion!
>
> I second @matthewfeickert's suggestion to change schemas via function call rather than assignment. It is much simpler to add functionality to a function if it ever becomes necessary, than to replace an entire existing module with a class instance with a property.
>
> I'd even go so far to say that a context manager doubling as an update function would be ideal IMO:
> ```python
> # in pyhf.utils
> _SCHEMAS = Path(...)
> class use_schema_path: # snake_case to remind of function-like usage
> def __init__(self, path):
> global _SCHEMAS
> self._old_schemas = _SCHEMAS
> _SCHEMAS = pathlib.Path(path)
> def __enter__(self):
> pass
> def __exit__(self, *args, **kwargs):
> global _SCHEMAS
> _SCHEMAS = self._old_schemas
> ```
> which can still be called as a function (only executing `__init__`), so short scripts etc. are not forced to use `with` blocks.
> But it can also be used like so:
> ```python
> def make_my_workspace(spec):
> with pyhf.utils.use_schema_path('/my/very/special/schemas'):
> return pyhf.Workspace(spec)
> ```
> So as a user writing code on top of pyhf, I don't have to worry about resesetting the global variable, the CM does it for me, and there are fewer mistakes to make.
>
_Originally posted by @lhenkelm in https://github.com/scikit-hep/pyhf/issues/1753#issuecomment-1026678066_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pyhf/schema/__init__.py`
Content:
```
1 """
2 See :class:`~pyhf.schema.Schema` for documentation.
3 """
4 import pathlib
5 import sys
6 from pyhf.schema.loader import load_schema
7 from pyhf.schema.validator import validate
8 from pyhf.schema import variables
9
10 __all__ = [
11 "load_schema",
12 "validate",
13 "path",
14 "version",
15 ]
16
17
18 def __dir__():
19 return __all__
20
21
22 class Schema(sys.modules[__name__].__class__):
23 """
24 A module-level wrapper around :mod:`pyhf.schema` which will provide additional functionality for interacting with schemas.
25
26 Example:
27 >>> import pyhf.schema
28 >>> import pathlib
29 >>> curr_path = pyhf.schema.path
30 >>> curr_path # doctest: +ELLIPSIS
31 PosixPath('.../pyhf/schemas')
32 >>> pyhf.schema(pathlib.Path('/home/root/my/new/path'))
33 >>> pyhf.schema.path
34 PosixPath('/home/root/my/new/path')
35 >>> pyhf.schema(curr_path)
36 >>> pyhf.schema.path # doctest: +ELLIPSIS
37 PosixPath('.../pyhf/schemas')
38
39 """
40
41 def __call__(self, new_path: pathlib.Path):
42 """
43 Change the local search path for finding schemas locally.
44
45 Args:
46 new_path (pathlib.Path): Path to folder containing the schemas
47
48 Returns:
49 None
50 """
51 variables.schemas = new_path
52
53 @property
54 def path(self):
55 """
56 The local path for schemas.
57 """
58 return variables.schemas
59
60 @property
61 def version(self):
62 """
63 The default version used for finding schemas.
64 """
65 return variables.SCHEMA_VERSION
66
67
68 sys.modules[__name__].__class__ = Schema
69
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/pyhf/schema/__init__.py b/src/pyhf/schema/__init__.py
--- a/src/pyhf/schema/__init__.py
+++ b/src/pyhf/schema/__init__.py
@@ -23,17 +23,40 @@
"""
A module-level wrapper around :mod:`pyhf.schema` which will provide additional functionality for interacting with schemas.
- Example:
+ .. rubric:: Example (callable)
+
+ .. code-block:: pycon
+
>>> import pyhf.schema
>>> import pathlib
>>> curr_path = pyhf.schema.path
- >>> curr_path # doctest: +ELLIPSIS
+ >>> curr_path # doctest: +ELLIPSIS
PosixPath('.../pyhf/schemas')
- >>> pyhf.schema(pathlib.Path('/home/root/my/new/path'))
+ >>> new_path = pathlib.Path("/home/root/my/new/path")
+ >>> pyhf.schema(new_path) # doctest: +ELLIPSIS
+ <module 'pyhf.schema' from ...>
>>> pyhf.schema.path
PosixPath('/home/root/my/new/path')
- >>> pyhf.schema(curr_path)
- >>> pyhf.schema.path # doctest: +ELLIPSIS
+ >>> pyhf.schema(curr_path) # doctest: +ELLIPSIS
+ <module 'pyhf.schema' from ...>
+ >>> pyhf.schema.path # doctest: +ELLIPSIS
+ PosixPath('.../pyhf/schemas')
+
+ .. rubric:: Example (context-manager)
+
+ .. code-block:: pycon
+
+ >>> import pyhf.schema
+ >>> import pathlib
+ >>> curr_path = pyhf.schema.path
+ >>> curr_path # doctest: +ELLIPSIS
+ PosixPath('.../pyhf/schemas')
+ >>> new_path = pathlib.Path("/home/root/my/new/path")
+ >>> with pyhf.schema(new_path):
+ ... print(repr(pyhf.schema.path))
+ ...
+ PosixPath('/home/root/my/new/path')
+ >>> pyhf.schema.path # doctest: +ELLIPSIS
PosixPath('.../pyhf/schemas')
"""
@@ -45,10 +68,23 @@
Args:
new_path (pathlib.Path): Path to folder containing the schemas
+ Returns:
+ self (pyhf.schema.Schema): Returns itself (for contextlib management)
+ """
+ self.orig_path, variables.schemas = variables.schemas, new_path
+ return self
+
+ def __enter__(self):
+ pass
+
+ def __exit__(self, *args, **kwargs):
+ """
+ Reset the local search path for finding schemas locally.
+
Returns:
None
"""
- variables.schemas = new_path
+ variables.schemas = self.orig_path
@property
def path(self):
| {"golden_diff": "diff --git a/src/pyhf/schema/__init__.py b/src/pyhf/schema/__init__.py\n--- a/src/pyhf/schema/__init__.py\n+++ b/src/pyhf/schema/__init__.py\n@@ -23,17 +23,40 @@\n \"\"\"\n A module-level wrapper around :mod:`pyhf.schema` which will provide additional functionality for interacting with schemas.\n \n- Example:\n+ .. rubric:: Example (callable)\n+\n+ .. code-block:: pycon\n+\n >>> import pyhf.schema\n >>> import pathlib\n >>> curr_path = pyhf.schema.path\n- >>> curr_path # doctest: +ELLIPSIS\n+ >>> curr_path # doctest: +ELLIPSIS\n PosixPath('.../pyhf/schemas')\n- >>> pyhf.schema(pathlib.Path('/home/root/my/new/path'))\n+ >>> new_path = pathlib.Path(\"/home/root/my/new/path\")\n+ >>> pyhf.schema(new_path) # doctest: +ELLIPSIS\n+ <module 'pyhf.schema' from ...>\n >>> pyhf.schema.path\n PosixPath('/home/root/my/new/path')\n- >>> pyhf.schema(curr_path)\n- >>> pyhf.schema.path # doctest: +ELLIPSIS\n+ >>> pyhf.schema(curr_path) # doctest: +ELLIPSIS\n+ <module 'pyhf.schema' from ...>\n+ >>> pyhf.schema.path # doctest: +ELLIPSIS\n+ PosixPath('.../pyhf/schemas')\n+\n+ .. rubric:: Example (context-manager)\n+\n+ .. code-block:: pycon\n+\n+ >>> import pyhf.schema\n+ >>> import pathlib\n+ >>> curr_path = pyhf.schema.path\n+ >>> curr_path # doctest: +ELLIPSIS\n+ PosixPath('.../pyhf/schemas')\n+ >>> new_path = pathlib.Path(\"/home/root/my/new/path\")\n+ >>> with pyhf.schema(new_path):\n+ ... print(repr(pyhf.schema.path))\n+ ...\n+ PosixPath('/home/root/my/new/path')\n+ >>> pyhf.schema.path # doctest: +ELLIPSIS\n PosixPath('.../pyhf/schemas')\n \n \"\"\"\n@@ -45,10 +68,23 @@\n Args:\n new_path (pathlib.Path): Path to folder containing the schemas\n \n+ Returns:\n+ self (pyhf.schema.Schema): Returns itself (for contextlib management)\n+ \"\"\"\n+ self.orig_path, variables.schemas = variables.schemas, new_path\n+ return self\n+\n+ def __enter__(self):\n+ pass\n+\n+ def __exit__(self, *args, **kwargs):\n+ \"\"\"\n+ Reset the local search path for finding schemas locally.\n+\n Returns:\n None\n \"\"\"\n- variables.schemas = new_path\n+ variables.schemas = self.orig_path\n \n @property\n def path(self):\n", "issue": "Add contextlib support to the pyhf.schema API\n> instead of having users overwriting the value by assignment, do you think there is value in offering a `pyhf.utils.update_schema_path` or something that performs this operation through the API?\r\n>\r\n> Thanks @kratsg for taking up my suggestion!\r\n> \r\n> I second @matthewfeickert's suggestion to change schemas via function call rather than assignment. It is much simpler to add functionality to a function if it ever becomes necessary, than to replace an entire existing module with a class instance with a property.\r\n> \r\n> I'd even go so far to say that a context manager doubling as an update function would be ideal IMO:\r\n> ```python\r\n> # in pyhf.utils\r\n> _SCHEMAS = Path(...)\r\n> class use_schema_path: # snake_case to remind of function-like usage\r\n> def __init__(self, path):\r\n> global _SCHEMAS\r\n> self._old_schemas = _SCHEMAS\r\n> _SCHEMAS = pathlib.Path(path)\r\n> def __enter__(self):\r\n> pass\r\n> def __exit__(self, *args, **kwargs):\r\n> global _SCHEMAS\r\n> _SCHEMAS = self._old_schemas\r\n> ```\r\n> which can still be called as a function (only executing `__init__`), so short scripts etc. are not forced to use `with` blocks.\r\n> But it can also be used like so:\r\n> ```python\r\n> def make_my_workspace(spec):\r\n> with pyhf.utils.use_schema_path('/my/very/special/schemas'):\r\n> return pyhf.Workspace(spec)\r\n> ```\r\n> So as a user writing code on top of pyhf, I don't have to worry about resesetting the global variable, the CM does it for me, and there are fewer mistakes to make.\r\n> \r\n_Originally posted by @lhenkelm in https://github.com/scikit-hep/pyhf/issues/1753#issuecomment-1026678066_\n", "before_files": [{"content": "\"\"\"\nSee :class:`~pyhf.schema.Schema` for documentation.\n\"\"\"\nimport pathlib\nimport sys\nfrom pyhf.schema.loader import load_schema\nfrom pyhf.schema.validator import validate\nfrom pyhf.schema import variables\n\n__all__ = [\n \"load_schema\",\n \"validate\",\n \"path\",\n \"version\",\n]\n\n\ndef __dir__():\n return __all__\n\n\nclass Schema(sys.modules[__name__].__class__):\n \"\"\"\n A module-level wrapper around :mod:`pyhf.schema` which will provide additional functionality for interacting with schemas.\n\n Example:\n >>> import pyhf.schema\n >>> import pathlib\n >>> curr_path = pyhf.schema.path\n >>> curr_path # doctest: +ELLIPSIS\n PosixPath('.../pyhf/schemas')\n >>> pyhf.schema(pathlib.Path('/home/root/my/new/path'))\n >>> pyhf.schema.path\n PosixPath('/home/root/my/new/path')\n >>> pyhf.schema(curr_path)\n >>> pyhf.schema.path # doctest: +ELLIPSIS\n PosixPath('.../pyhf/schemas')\n\n \"\"\"\n\n def __call__(self, new_path: pathlib.Path):\n \"\"\"\n Change the local search path for finding schemas locally.\n\n Args:\n new_path (pathlib.Path): Path to folder containing the schemas\n\n Returns:\n None\n \"\"\"\n variables.schemas = new_path\n\n @property\n def path(self):\n \"\"\"\n The local path for schemas.\n \"\"\"\n return variables.schemas\n\n @property\n def version(self):\n \"\"\"\n The default version used for finding schemas.\n \"\"\"\n return variables.SCHEMA_VERSION\n\n\nsys.modules[__name__].__class__ = Schema\n", "path": "src/pyhf/schema/__init__.py"}], "after_files": [{"content": "\"\"\"\nSee :class:`~pyhf.schema.Schema` for documentation.\n\"\"\"\nimport pathlib\nimport sys\nfrom pyhf.schema.loader import load_schema\nfrom pyhf.schema.validator import validate\nfrom pyhf.schema import variables\n\n__all__ = [\n \"load_schema\",\n \"validate\",\n \"path\",\n \"version\",\n]\n\n\ndef __dir__():\n return __all__\n\n\nclass Schema(sys.modules[__name__].__class__):\n \"\"\"\n A module-level wrapper around :mod:`pyhf.schema` which will provide additional functionality for interacting with schemas.\n\n .. rubric:: Example (callable)\n\n .. code-block:: pycon\n\n >>> import pyhf.schema\n >>> import pathlib\n >>> curr_path = pyhf.schema.path\n >>> curr_path # doctest: +ELLIPSIS\n PosixPath('.../pyhf/schemas')\n >>> new_path = pathlib.Path(\"/home/root/my/new/path\")\n >>> pyhf.schema(new_path) # doctest: +ELLIPSIS\n <module 'pyhf.schema' from ...>\n >>> pyhf.schema.path\n PosixPath('/home/root/my/new/path')\n >>> pyhf.schema(curr_path) # doctest: +ELLIPSIS\n <module 'pyhf.schema' from ...>\n >>> pyhf.schema.path # doctest: +ELLIPSIS\n PosixPath('.../pyhf/schemas')\n\n .. rubric:: Example (context-manager)\n\n .. code-block:: pycon\n\n >>> import pyhf.schema\n >>> import pathlib\n >>> curr_path = pyhf.schema.path\n >>> curr_path # doctest: +ELLIPSIS\n PosixPath('.../pyhf/schemas')\n >>> new_path = pathlib.Path(\"/home/root/my/new/path\")\n >>> with pyhf.schema(new_path):\n ... print(repr(pyhf.schema.path))\n ...\n PosixPath('/home/root/my/new/path')\n >>> pyhf.schema.path # doctest: +ELLIPSIS\n PosixPath('.../pyhf/schemas')\n\n \"\"\"\n\n def __call__(self, new_path: pathlib.Path):\n \"\"\"\n Change the local search path for finding schemas locally.\n\n Args:\n new_path (pathlib.Path): Path to folder containing the schemas\n\n Returns:\n self (pyhf.schema.Schema): Returns itself (for contextlib management)\n \"\"\"\n self.orig_path, variables.schemas = variables.schemas, new_path\n return self\n\n def __enter__(self):\n pass\n\n def __exit__(self, *args, **kwargs):\n \"\"\"\n Reset the local search path for finding schemas locally.\n\n Returns:\n None\n \"\"\"\n variables.schemas = self.orig_path\n\n @property\n def path(self):\n \"\"\"\n The local path for schemas.\n \"\"\"\n return variables.schemas\n\n @property\n def version(self):\n \"\"\"\n The default version used for finding schemas.\n \"\"\"\n return variables.SCHEMA_VERSION\n\n\nsys.modules[__name__].__class__ = Schema\n", "path": "src/pyhf/schema/__init__.py"}]} | 1,206 | 656 |
gh_patches_debug_3498 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-1530 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Social Network Login Failure
When clicking on connect GitHub on the [social accounts page](https://readthedocs.org/accounts/social/connections/?) I get a message:
> An error occurred while attempting to login via your social network account.
There's a `?` in the url. Could that be a hint? Is it missing some request arguments? If I omit it, the bug persists.
Cheers!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `readthedocs/settings/postgres.py`
Content:
```
1 import os
2
3 from .base import * # noqa
4
5
6 DATABASES = {
7 'default': {
8 'ENGINE': 'django.db.backends.postgresql_psycopg2',
9 'NAME': 'docs',
10 'USER': 'postgres', # Not used with sqlite3.
11 'PASSWORD': '',
12 'HOST': '10.177.73.97',
13 'PORT': '',
14 }
15 }
16
17 DEBUG = False
18 TEMPLATE_DEBUG = False
19 CELERY_ALWAYS_EAGER = False
20
21 MEDIA_URL = 'https://media.readthedocs.org/'
22 STATIC_URL = 'https://media.readthedocs.org/static/'
23 ADMIN_MEDIA_PREFIX = MEDIA_URL + 'admin/'
24 SESSION_ENGINE = "django.contrib.sessions.backends.cached_db"
25
26 HAYSTACK_CONNECTIONS = {
27 'default': {
28 'ENGINE': 'haystack.backends.solr_backend.SolrEngine',
29 'URL': 'http://odin:8983/solr',
30 }
31 }
32
33 CACHES = {
34 'default': {
35 'BACKEND': 'redis_cache.RedisCache',
36 'LOCATION': 'localhost:6379',
37 'PREFIX': 'docs',
38 'OPTIONS': {
39 'DB': 1,
40 'PARSER_CLASS': 'redis.connection.HiredisParser'
41 },
42 },
43 }
44
45 # Elasticsearch settings.
46 ES_HOSTS = ['backup:9200', 'db:9200']
47 ES_DEFAULT_NUM_REPLICAS = 1
48 ES_DEFAULT_NUM_SHARDS = 5
49
50 SLUMBER_API_HOST = 'https://readthedocs.org'
51 WEBSOCKET_HOST = 'websocket.readthedocs.org:8088'
52
53 PRODUCTION_DOMAIN = 'readthedocs.org'
54 USE_SUBDOMAIN = True
55 NGINX_X_ACCEL_REDIRECT = True
56
57 SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
58
59 # Lock builds for 10 minutes
60 REPO_LOCK_SECONDS = 300
61
62 # Don't re-confirm existing accounts
63 ACCOUNT_EMAIL_VERIFICATION = 'none'
64
65 FILE_SYNCER = 'privacy.backends.syncers.DoubleRemotePuller'
66
67 # set GitHub scope
68 SOCIALACCOUNT_PROVIDERS = {
69 'github': {'SCOPE': ['user:email', 'read:org', 'admin:repo_hook', 'repo:status']}
70 }
71
72 if not os.environ.get('DJANGO_SETTINGS_SKIP_LOCAL', False):
73 try:
74 from local_settings import * # noqa
75 except ImportError:
76 pass
77
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/readthedocs/settings/postgres.py b/readthedocs/settings/postgres.py
--- a/readthedocs/settings/postgres.py
+++ b/readthedocs/settings/postgres.py
@@ -69,6 +69,9 @@
'github': {'SCOPE': ['user:email', 'read:org', 'admin:repo_hook', 'repo:status']}
}
+# allauth settings
+ACCOUNT_DEFAULT_HTTP_PROTOCOL = 'https'
+
if not os.environ.get('DJANGO_SETTINGS_SKIP_LOCAL', False):
try:
from local_settings import * # noqa
| {"golden_diff": "diff --git a/readthedocs/settings/postgres.py b/readthedocs/settings/postgres.py\n--- a/readthedocs/settings/postgres.py\n+++ b/readthedocs/settings/postgres.py\n@@ -69,6 +69,9 @@\n 'github': {'SCOPE': ['user:email', 'read:org', 'admin:repo_hook', 'repo:status']}\n }\n \n+# allauth settings\n+ACCOUNT_DEFAULT_HTTP_PROTOCOL = 'https'\n+\n if not os.environ.get('DJANGO_SETTINGS_SKIP_LOCAL', False):\n try:\n from local_settings import * # noqa\n", "issue": "Social Network Login Failure\nWhen clicking on connect GitHub on the [social accounts page](https://readthedocs.org/accounts/social/connections/?) I get a message:\n\n> An error occurred while attempting to login via your social network account.\n\nThere's a `?` in the url. Could that be a hint? Is it missing some request arguments? If I omit it, the bug persists.\n\nCheers!\n\n", "before_files": [{"content": "import os\n\nfrom .base import * # noqa\n\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'docs',\n 'USER': 'postgres', # Not used with sqlite3.\n 'PASSWORD': '',\n 'HOST': '10.177.73.97',\n 'PORT': '',\n }\n}\n\nDEBUG = False\nTEMPLATE_DEBUG = False\nCELERY_ALWAYS_EAGER = False\n\nMEDIA_URL = 'https://media.readthedocs.org/'\nSTATIC_URL = 'https://media.readthedocs.org/static/'\nADMIN_MEDIA_PREFIX = MEDIA_URL + 'admin/'\nSESSION_ENGINE = \"django.contrib.sessions.backends.cached_db\"\n\nHAYSTACK_CONNECTIONS = {\n 'default': {\n 'ENGINE': 'haystack.backends.solr_backend.SolrEngine',\n 'URL': 'http://odin:8983/solr',\n }\n}\n\nCACHES = {\n 'default': {\n 'BACKEND': 'redis_cache.RedisCache',\n 'LOCATION': 'localhost:6379',\n 'PREFIX': 'docs',\n 'OPTIONS': {\n 'DB': 1,\n 'PARSER_CLASS': 'redis.connection.HiredisParser'\n },\n },\n}\n\n# Elasticsearch settings.\nES_HOSTS = ['backup:9200', 'db:9200']\nES_DEFAULT_NUM_REPLICAS = 1\nES_DEFAULT_NUM_SHARDS = 5\n\nSLUMBER_API_HOST = 'https://readthedocs.org'\nWEBSOCKET_HOST = 'websocket.readthedocs.org:8088'\n\nPRODUCTION_DOMAIN = 'readthedocs.org'\nUSE_SUBDOMAIN = True\nNGINX_X_ACCEL_REDIRECT = True\n\nSECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n\n# Lock builds for 10 minutes\nREPO_LOCK_SECONDS = 300\n\n# Don't re-confirm existing accounts\nACCOUNT_EMAIL_VERIFICATION = 'none'\n\nFILE_SYNCER = 'privacy.backends.syncers.DoubleRemotePuller'\n\n# set GitHub scope\nSOCIALACCOUNT_PROVIDERS = {\n 'github': {'SCOPE': ['user:email', 'read:org', 'admin:repo_hook', 'repo:status']}\n}\n\nif not os.environ.get('DJANGO_SETTINGS_SKIP_LOCAL', False):\n try:\n from local_settings import * # noqa\n except ImportError:\n pass\n", "path": "readthedocs/settings/postgres.py"}], "after_files": [{"content": "import os\n\nfrom .base import * # noqa\n\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'docs',\n 'USER': 'postgres', # Not used with sqlite3.\n 'PASSWORD': '',\n 'HOST': '10.177.73.97',\n 'PORT': '',\n }\n}\n\nDEBUG = False\nTEMPLATE_DEBUG = False\nCELERY_ALWAYS_EAGER = False\n\nMEDIA_URL = 'https://media.readthedocs.org/'\nSTATIC_URL = 'https://media.readthedocs.org/static/'\nADMIN_MEDIA_PREFIX = MEDIA_URL + 'admin/'\nSESSION_ENGINE = \"django.contrib.sessions.backends.cached_db\"\n\nHAYSTACK_CONNECTIONS = {\n 'default': {\n 'ENGINE': 'haystack.backends.solr_backend.SolrEngine',\n 'URL': 'http://odin:8983/solr',\n }\n}\n\nCACHES = {\n 'default': {\n 'BACKEND': 'redis_cache.RedisCache',\n 'LOCATION': 'localhost:6379',\n 'PREFIX': 'docs',\n 'OPTIONS': {\n 'DB': 1,\n 'PARSER_CLASS': 'redis.connection.HiredisParser'\n },\n },\n}\n\n# Elasticsearch settings.\nES_HOSTS = ['backup:9200', 'db:9200']\nES_DEFAULT_NUM_REPLICAS = 1\nES_DEFAULT_NUM_SHARDS = 5\n\nSLUMBER_API_HOST = 'https://readthedocs.org'\nWEBSOCKET_HOST = 'websocket.readthedocs.org:8088'\n\nPRODUCTION_DOMAIN = 'readthedocs.org'\nUSE_SUBDOMAIN = True\nNGINX_X_ACCEL_REDIRECT = True\n\nSECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n\n# Lock builds for 10 minutes\nREPO_LOCK_SECONDS = 300\n\n# Don't re-confirm existing accounts\nACCOUNT_EMAIL_VERIFICATION = 'none'\n\nFILE_SYNCER = 'privacy.backends.syncers.DoubleRemotePuller'\n\n# set GitHub scope\nSOCIALACCOUNT_PROVIDERS = {\n 'github': {'SCOPE': ['user:email', 'read:org', 'admin:repo_hook', 'repo:status']}\n}\n\n# allauth settings\nACCOUNT_DEFAULT_HTTP_PROTOCOL = 'https'\n\nif not os.environ.get('DJANGO_SETTINGS_SKIP_LOCAL', False):\n try:\n from local_settings import * # noqa\n except ImportError:\n pass\n", "path": "readthedocs/settings/postgres.py"}]} | 1,016 | 125 |
gh_patches_debug_16013 | rasdani/github-patches | git_diff | pyro-ppl__pyro-3167 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bug with OneCycleLR on Apple Silicone
### Guidelines
**NOTE:** Issues are for bugs and feature requests only. If you have a question about using Pyro or general modeling questions, please post it on the [forum](https://forum.pyro.ai/).
If you would like to address any minor bugs in the documentation or source, please feel free to contribute a Pull Request without creating an issue first.
Please tag the issue appropriately in the title e.g. [bug], [feature request], [discussion], etc.
Please provide the following details:
--------------------------------------------------------------------------------------------------
### Issue Description
Provide a brief description of the issue.
### Environment
For any bugs, please provide the following:
- OS and python version.
- PyTorch version, or if relevant, output of `pip freeze`.
- Pyro version: output of `python -c 'import pyro; print pyro.__version__'`
### Code Snippet
Provide any relevant code snippets and commands run to replicate the issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pyro/optim/pytorch_optimizers.py`
Content:
```
1 # Copyright (c) 2017-2019 Uber Technologies, Inc.
2 # SPDX-License-Identifier: Apache-2.0
3
4 import torch
5
6 from pyro.optim import PyroOptim
7 from pyro.optim.lr_scheduler import PyroLRScheduler
8
9 __all__ = []
10 # Programmatically load all optimizers from PyTorch.
11 for _name, _Optim in torch.optim.__dict__.items():
12 if not isinstance(_Optim, type):
13 continue
14 if not issubclass(_Optim, torch.optim.Optimizer):
15 continue
16 if _Optim is torch.optim.Optimizer:
17 continue
18 if _Optim is torch.optim.LBFGS:
19 # XXX LBFGS is not supported for SVI yet
20 continue
21
22 _PyroOptim = (
23 lambda _Optim: lambda optim_args, clip_args=None: PyroOptim(
24 _Optim, optim_args, clip_args
25 )
26 )(_Optim)
27 _PyroOptim.__name__ = _name
28 _PyroOptim.__doc__ = "Wraps :class:`torch.optim.{}` with :class:`~pyro.optim.optim.PyroOptim`.".format(
29 _name
30 )
31
32 locals()[_name] = _PyroOptim
33 __all__.append(_name)
34 del _PyroOptim
35
36 # Load all schedulers from PyTorch
37 for _name, _Optim in torch.optim.lr_scheduler.__dict__.items():
38 if not isinstance(_Optim, type):
39 continue
40 if (
41 not issubclass(_Optim, torch.optim.lr_scheduler._LRScheduler)
42 and _name != "ReduceLROnPlateau"
43 ):
44 continue
45 if _Optim is torch.optim.Optimizer:
46 continue
47
48 _PyroOptim = (
49 lambda _Optim: lambda optim_args, clip_args=None: PyroLRScheduler(
50 _Optim, optim_args, clip_args
51 )
52 )(_Optim)
53 _PyroOptim.__name__ = _name
54 _PyroOptim.__doc__ = (
55 "Wraps :class:`torch.optim.{}` with ".format(_name)
56 + ":class:`~pyro.optim.lr_scheduler.PyroLRScheduler`."
57 )
58
59 locals()[_name] = _PyroOptim
60 __all__.append(_name)
61 del _PyroOptim
62
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pyro/optim/pytorch_optimizers.py b/pyro/optim/pytorch_optimizers.py
--- a/pyro/optim/pytorch_optimizers.py
+++ b/pyro/optim/pytorch_optimizers.py
@@ -34,13 +34,16 @@
del _PyroOptim
# Load all schedulers from PyTorch
+# breaking change in torch >= 1.14: LRScheduler is new base class
+if hasattr(torch.optim.lr_scheduler, "LRScheduler"):
+ _torch_scheduler_base = torch.optim.lr_scheduler.LRScheduler
+else: # for torch < 1.13, _LRScheduler is base class
+ _torch_scheduler_base = torch.optim.lr_scheduler._LRScheduler
+
for _name, _Optim in torch.optim.lr_scheduler.__dict__.items():
if not isinstance(_Optim, type):
continue
- if (
- not issubclass(_Optim, torch.optim.lr_scheduler._LRScheduler)
- and _name != "ReduceLROnPlateau"
- ):
+ if not issubclass(_Optim, _torch_scheduler_base) and _name != "ReduceLROnPlateau":
continue
if _Optim is torch.optim.Optimizer:
continue
| {"golden_diff": "diff --git a/pyro/optim/pytorch_optimizers.py b/pyro/optim/pytorch_optimizers.py\n--- a/pyro/optim/pytorch_optimizers.py\n+++ b/pyro/optim/pytorch_optimizers.py\n@@ -34,13 +34,16 @@\n del _PyroOptim\n \n # Load all schedulers from PyTorch\n+# breaking change in torch >= 1.14: LRScheduler is new base class\n+if hasattr(torch.optim.lr_scheduler, \"LRScheduler\"):\n+ _torch_scheduler_base = torch.optim.lr_scheduler.LRScheduler\n+else: # for torch < 1.13, _LRScheduler is base class\n+ _torch_scheduler_base = torch.optim.lr_scheduler._LRScheduler\n+\n for _name, _Optim in torch.optim.lr_scheduler.__dict__.items():\n if not isinstance(_Optim, type):\n continue\n- if (\n- not issubclass(_Optim, torch.optim.lr_scheduler._LRScheduler)\n- and _name != \"ReduceLROnPlateau\"\n- ):\n+ if not issubclass(_Optim, _torch_scheduler_base) and _name != \"ReduceLROnPlateau\":\n continue\n if _Optim is torch.optim.Optimizer:\n continue\n", "issue": "bug with OneCycleLR on Apple Silicone \n### Guidelines\r\n\r\n**NOTE:** Issues are for bugs and feature requests only. If you have a question about using Pyro or general modeling questions, please post it on the [forum](https://forum.pyro.ai/).\r\n\r\nIf you would like to address any minor bugs in the documentation or source, please feel free to contribute a Pull Request without creating an issue first. \r\n\r\nPlease tag the issue appropriately in the title e.g. [bug], [feature request], [discussion], etc.\r\n\r\nPlease provide the following details:\r\n--------------------------------------------------------------------------------------------------\r\n### Issue Description\r\nProvide a brief description of the issue.\r\n\r\n### Environment\r\nFor any bugs, please provide the following:\r\n - OS and python version.\r\n - PyTorch version, or if relevant, output of `pip freeze`.\r\n - Pyro version: output of `python -c 'import pyro; print pyro.__version__'`\r\n\r\n### Code Snippet\r\nProvide any relevant code snippets and commands run to replicate the issue.\r\n\n", "before_files": [{"content": "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport torch\n\nfrom pyro.optim import PyroOptim\nfrom pyro.optim.lr_scheduler import PyroLRScheduler\n\n__all__ = []\n# Programmatically load all optimizers from PyTorch.\nfor _name, _Optim in torch.optim.__dict__.items():\n if not isinstance(_Optim, type):\n continue\n if not issubclass(_Optim, torch.optim.Optimizer):\n continue\n if _Optim is torch.optim.Optimizer:\n continue\n if _Optim is torch.optim.LBFGS:\n # XXX LBFGS is not supported for SVI yet\n continue\n\n _PyroOptim = (\n lambda _Optim: lambda optim_args, clip_args=None: PyroOptim(\n _Optim, optim_args, clip_args\n )\n )(_Optim)\n _PyroOptim.__name__ = _name\n _PyroOptim.__doc__ = \"Wraps :class:`torch.optim.{}` with :class:`~pyro.optim.optim.PyroOptim`.\".format(\n _name\n )\n\n locals()[_name] = _PyroOptim\n __all__.append(_name)\n del _PyroOptim\n\n# Load all schedulers from PyTorch\nfor _name, _Optim in torch.optim.lr_scheduler.__dict__.items():\n if not isinstance(_Optim, type):\n continue\n if (\n not issubclass(_Optim, torch.optim.lr_scheduler._LRScheduler)\n and _name != \"ReduceLROnPlateau\"\n ):\n continue\n if _Optim is torch.optim.Optimizer:\n continue\n\n _PyroOptim = (\n lambda _Optim: lambda optim_args, clip_args=None: PyroLRScheduler(\n _Optim, optim_args, clip_args\n )\n )(_Optim)\n _PyroOptim.__name__ = _name\n _PyroOptim.__doc__ = (\n \"Wraps :class:`torch.optim.{}` with \".format(_name)\n + \":class:`~pyro.optim.lr_scheduler.PyroLRScheduler`.\"\n )\n\n locals()[_name] = _PyroOptim\n __all__.append(_name)\n del _PyroOptim\n", "path": "pyro/optim/pytorch_optimizers.py"}], "after_files": [{"content": "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport torch\n\nfrom pyro.optim import PyroOptim\nfrom pyro.optim.lr_scheduler import PyroLRScheduler\n\n__all__ = []\n# Programmatically load all optimizers from PyTorch.\nfor _name, _Optim in torch.optim.__dict__.items():\n if not isinstance(_Optim, type):\n continue\n if not issubclass(_Optim, torch.optim.Optimizer):\n continue\n if _Optim is torch.optim.Optimizer:\n continue\n if _Optim is torch.optim.LBFGS:\n # XXX LBFGS is not supported for SVI yet\n continue\n\n _PyroOptim = (\n lambda _Optim: lambda optim_args, clip_args=None: PyroOptim(\n _Optim, optim_args, clip_args\n )\n )(_Optim)\n _PyroOptim.__name__ = _name\n _PyroOptim.__doc__ = \"Wraps :class:`torch.optim.{}` with :class:`~pyro.optim.optim.PyroOptim`.\".format(\n _name\n )\n\n locals()[_name] = _PyroOptim\n __all__.append(_name)\n del _PyroOptim\n\n# Load all schedulers from PyTorch\n# breaking change in torch >= 1.14: LRScheduler is new base class\nif hasattr(torch.optim.lr_scheduler, \"LRScheduler\"):\n _torch_scheduler_base = torch.optim.lr_scheduler.LRScheduler\nelse: # for torch < 1.13, _LRScheduler is base class\n _torch_scheduler_base = torch.optim.lr_scheduler._LRScheduler\n\nfor _name, _Optim in torch.optim.lr_scheduler.__dict__.items():\n if not isinstance(_Optim, type):\n continue\n if not issubclass(_Optim, _torch_scheduler_base) and _name != \"ReduceLROnPlateau\":\n continue\n if _Optim is torch.optim.Optimizer:\n continue\n\n _PyroOptim = (\n lambda _Optim: lambda optim_args, clip_args=None: PyroLRScheduler(\n _Optim, optim_args, clip_args\n )\n )(_Optim)\n _PyroOptim.__name__ = _name\n _PyroOptim.__doc__ = (\n \"Wraps :class:`torch.optim.{}` with \".format(_name)\n + \":class:`~pyro.optim.lr_scheduler.PyroLRScheduler`.\"\n )\n\n locals()[_name] = _PyroOptim\n __all__.append(_name)\n del _PyroOptim\n", "path": "pyro/optim/pytorch_optimizers.py"}]} | 1,119 | 285 |
gh_patches_debug_33404 | rasdani/github-patches | git_diff | aws__aws-sam-cli-1348 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
sam 0.19.0 crashes on trying to create .aws-sam directory when not permitted
### Description
While running `sam validate` from a freshly built docker image, it crashes while trying to create the `.aws-sam` config directory.
### Steps to reproduce
In our CI pipeline, we run `sam` from within a docker container built according to this `Dockerfile`:
```
FROM alpine:3.9
RUN apk add --no-cache \
build-base \
python \
python-dev \
py-pip \
groff \
docker && \
pip install awscli aws-sam-cli
```
AWS credentials are passed in as environment variables, volumes mounted from the Jenkins workspace into the docker image, and then we simply run:
`sam validate -t ./.../template.yaml`
### Observed result
Logs from the Jenkins build:
```
$ docker run -t -d -u 3000:100 --network=host -w /data/hudson/workspace/loper-portal_feature_jenkinstest -v /data/hudson/workspace/loper-portal_feature_jenkinstest:/data/hudson/workspace/loper-portal_feature_jenkinstest:rw,z -v /data/hudson/workspace/loper-portal_feature_jenkinstest@tmp:/data/hudson/workspace/loper-portal_feature_jenkinstest@tmp:rw,z -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** xxxx-aws-sam-cli cat
$ docker top 04fa30b17ceb0ae6d45b66190f32f4bad8dedd57386352a31e61f0da6ac18aa2 -eo pid,comm
[Pipeline] {
[Pipeline] withEnv
[Pipeline] {
[Pipeline] withCredentials
Masking supported pattern matches of $AWS_ACCESS_KEY_ID or $AWS_SECRET_ACCESS_KEY
[Pipeline] {
[Pipeline] stage
[Pipeline] { (Validate CloudFormation template)
[Pipeline] sh
+ sam validate --debug -t cloudformation/template.yaml
Traceback (most recent call last):
File "/usr/bin/sam", line 11, in <module>
sys.exit(cli())
File "/usr/lib/python2.7/site-packages/click/core.py", line 722, in __call__
return self.main(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/click/core.py", line 697, in main
rv = self.invoke(ctx)
File "/usr/lib/python2.7/site-packages/click/core.py", line 1063, in invoke
Command.invoke(self, ctx)
File "/usr/lib/python2.7/site-packages/click/core.py", line 895, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/lib/python2.7/site-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/click/decorators.py", line 64, in new_func
return ctx.invoke(f, obj, *args[1:], **kwargs)
File "/usr/lib/python2.7/site-packages/click/core.py", line 535, in invoke
return callback(*args, **kwargs)
File "/usr/lib/python2.7/site-packages/samcli/cli/main.py", line 83, in cli
if global_cfg.telemetry_enabled is None:
File "/usr/lib/python2.7/site-packages/samcli/cli/global_config.py", line 113, in telemetry_enabled
self._telemetry_enabled = self._get_value(TELEMETRY_ENABLED_KEY)
File "/usr/lib/python2.7/site-packages/samcli/cli/global_config.py", line 145, in _get_value
cfg_path = self._get_config_file_path(CONFIG_FILENAME)
File "/usr/lib/python2.7/site-packages/samcli/cli/global_config.py", line 170, in _get_config_file_path
self._create_dir()
File "/usr/lib/python2.7/site-packages/samcli/cli/global_config.py", line 167, in _create_dir
self.config_dir.mkdir(mode=0o700, parents=True, exist_ok=True)
File "/usr/lib/python2.7/site-packages/pathlib2/__init__.py", line 1540, in mkdir
_try_except_filenotfounderror(_try_func, _exc_func)
File "/usr/lib/python2.7/site-packages/pathlib2/__init__.py", line 113, in _try_except_filenotfounderror
try_func()
File "/usr/lib/python2.7/site-packages/pathlib2/__init__.py", line 1531, in _try_func
self._accessor.mkdir(self, mode)
File "/usr/lib/python2.7/site-packages/pathlib2/__init__.py", line 585, in wrapped
return strfunc(str(pathobj), *args)
OSError: [Errno 13] Permission denied: '/.aws-sam'
[Pipeline] }
[Pipeline] // stage
[Pipeline] }
[Pipeline] // withCredentials
[Pipeline] }
[Pipeline] // withEnv
[Pipeline] }
```
### Expected result
A non-crashing sam :)
### Additional environment details (Ex: Windows, Mac, Amazon Linux etc)
1. OS: alpine linux, docker container
2. `sam --version`: 0.19.0
### Others
This was added in the 0.19.0 Telemetry release, where in commit dbd534a `samcli/cli/global_config.py` was added, that tries to write the config dir if it doesn't exist.
Actually we should've pinned the versions used in our `Dockerfile`. Not pinning it had us miss the addition of the Telemetry feature in 0.19.0. Although I'd love to contribute, enabling Telemetry would undoubtedly go against some company policy. There _is_ the message in the first run of the new version (if it was able to write the config dir), but in our setup that's quite easy to miss. Basically I'm advocating an opt-in instead
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `samcli/cli/global_config.py`
Content:
```
1 """
2 Provides global configuration helpers.
3 """
4
5 import json
6 import logging
7 import uuid
8 import os
9
10 import click
11
12 try:
13 from pathlib import Path
14 except ImportError: # pragma: no cover
15 from pathlib2 import Path # pragma: no cover
16
17 LOG = logging.getLogger(__name__)
18
19 CONFIG_FILENAME = "metadata.json"
20 INSTALLATION_ID_KEY = "installationId"
21 TELEMETRY_ENABLED_KEY = "telemetryEnabled"
22
23
24 class GlobalConfig(object):
25 """
26 Contains helper methods for global configuration files and values. Handles
27 configuration file creation, updates, and fetching in a platform-neutral way.
28
29 Generally uses '~/.aws-sam/' or 'C:\\Users\\<user>\\AppData\\Roaming\\AWS SAM' as
30 the base directory, depending on platform.
31 """
32
33 def __init__(self, config_dir=None, installation_id=None, telemetry_enabled=None):
34 """
35 Initializes the class, with options provided to assist with testing.
36
37 :param config_dir: Optional, overrides the default config directory path.
38 :param installation_id: Optional, will use this installation id rather than checking config values.
39 """
40 self._config_dir = config_dir
41 self._installation_id = installation_id
42 self._telemetry_enabled = telemetry_enabled
43
44 @property
45 def config_dir(self):
46 if not self._config_dir:
47 # Internal Environment variable to customize SAM CLI App Dir. Currently used only by integ tests.
48 app_dir = os.getenv("__SAM_CLI_APP_DIR")
49 self._config_dir = Path(app_dir) if app_dir else Path(click.get_app_dir('AWS SAM', force_posix=True))
50
51 return Path(self._config_dir)
52
53 @property
54 def installation_id(self):
55 """
56 Returns the installation UUID for this AWS SAM CLI installation. If the
57 installation id has not yet been set, it will be set before returning.
58
59 Examples
60 --------
61
62 >>> gc = GlobalConfig()
63 >>> gc.installation_id
64 "7b7d4db7-2f54-45ba-bf2f-a2cbc9e74a34"
65
66 >>> gc = GlobalConfig()
67 >>> gc.installation_id
68 None
69
70 Returns
71 -------
72 A string containing the installation UUID, or None in case of an error.
73 """
74 if self._installation_id:
75 return self._installation_id
76 try:
77 self._installation_id = self._get_or_set_uuid(INSTALLATION_ID_KEY)
78 return self._installation_id
79 except (ValueError, IOError):
80 return None
81
82 @property
83 def telemetry_enabled(self):
84 """
85 Check if telemetry is enabled for this installation. Default value of
86 False. It first tries to get value from SAM_CLI_TELEMETRY environment variable. If its not set,
87 then it fetches the value from config file.
88
89 To enable telemetry, set SAM_CLI_TELEMETRY environment variable equal to integer 1 or string '1'.
90 All other values including words like 'True', 'true', 'false', 'False', 'abcd' etc will disable Telemetry
91
92 Examples
93 --------
94
95 >>> gc = GlobalConfig()
96 >>> gc.telemetry_enabled
97 True
98
99 Returns
100 -------
101 Boolean flag value. True if telemetry is enabled for this installation,
102 False otherwise.
103 """
104 if self._telemetry_enabled is not None:
105 return self._telemetry_enabled
106
107 # If environment variable is set, its value takes precedence over the value from config file.
108 env_name = "SAM_CLI_TELEMETRY"
109 if env_name in os.environ:
110 return os.getenv(env_name) in ('1', 1)
111
112 try:
113 self._telemetry_enabled = self._get_value(TELEMETRY_ENABLED_KEY)
114 return self._telemetry_enabled
115 except (ValueError, IOError) as ex:
116 LOG.debug("Error when retrieving telemetry_enabled flag", exc_info=ex)
117 return False
118
119 @telemetry_enabled.setter
120 def telemetry_enabled(self, value):
121 """
122 Sets the telemetry_enabled flag to the provided boolean value.
123
124 Examples
125 --------
126 >>> gc = GlobalConfig()
127 >>> gc.telemetry_enabled
128 False
129 >>> gc.telemetry_enabled = True
130 >>> gc.telemetry_enabled
131 True
132
133 Raises
134 ------
135 IOError
136 If there are errors opening or writing to the global config file.
137
138 JSONDecodeError
139 If the config file exists, and is not valid JSON.
140 """
141 self._set_value("telemetryEnabled", value)
142 self._telemetry_enabled = value
143
144 def _get_value(self, key):
145 cfg_path = self._get_config_file_path(CONFIG_FILENAME)
146 if not cfg_path.exists():
147 return None
148 with open(str(cfg_path)) as fp:
149 body = fp.read()
150 json_body = json.loads(body)
151 return json_body.get(key)
152
153 def _set_value(self, key, value):
154 cfg_path = self._get_config_file_path(CONFIG_FILENAME)
155 if not cfg_path.exists():
156 return self._set_json_cfg(cfg_path, key, value)
157 with open(str(cfg_path)) as fp:
158 body = fp.read()
159 try:
160 json_body = json.loads(body)
161 except ValueError as ex:
162 LOG.debug("Failed to decode JSON in {cfg_path}", exc_info=ex)
163 raise ex
164 return self._set_json_cfg(cfg_path, key, value, json_body)
165
166 def _create_dir(self):
167 self.config_dir.mkdir(mode=0o700, parents=True, exist_ok=True)
168
169 def _get_config_file_path(self, filename):
170 self._create_dir()
171 filepath = self.config_dir.joinpath(filename)
172 return filepath
173
174 def _get_or_set_uuid(self, key):
175 """
176 Special logic method for when we want a UUID to always be present, this
177 method behaves as a getter with side effects. Essentially, if the value
178 is not present, we will set it with a generated UUID.
179
180 If we have multiple such values in the future, a possible refactor is
181 to just be _get_or_set_value, where we also take a default value as a
182 parameter.
183 """
184 cfg_value = self._get_value(key)
185 if cfg_value is not None:
186 return cfg_value
187 return self._set_value(key, str(uuid.uuid4()))
188
189 def _set_json_cfg(self, filepath, key, value, json_body=None):
190 """
191 Special logic method to add a value to a JSON configuration file. This
192 method will write a new version of the file in question, so it will
193 either write a new file with only the first config value, or if a JSON
194 body is provided, it will upsert starting from that JSON body.
195 """
196 json_body = json_body or {}
197 json_body[key] = value
198 file_body = json.dumps(json_body, indent=4) + "\n"
199 try:
200 with open(str(filepath), 'w') as f:
201 f.write(file_body)
202 except IOError as ex:
203 LOG.debug("Error writing to {filepath}", exc_info=ex)
204 raise ex
205 return value
206
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/samcli/cli/global_config.py b/samcli/cli/global_config.py
--- a/samcli/cli/global_config.py
+++ b/samcli/cli/global_config.py
@@ -47,7 +47,6 @@
# Internal Environment variable to customize SAM CLI App Dir. Currently used only by integ tests.
app_dir = os.getenv("__SAM_CLI_APP_DIR")
self._config_dir = Path(app_dir) if app_dir else Path(click.get_app_dir('AWS SAM', force_posix=True))
-
return Path(self._config_dir)
@property
@@ -76,7 +75,7 @@
try:
self._installation_id = self._get_or_set_uuid(INSTALLATION_ID_KEY)
return self._installation_id
- except (ValueError, IOError):
+ except (ValueError, IOError, OSError):
return None
@property
@@ -112,7 +111,7 @@
try:
self._telemetry_enabled = self._get_value(TELEMETRY_ENABLED_KEY)
return self._telemetry_enabled
- except (ValueError, IOError) as ex:
+ except (ValueError, IOError, OSError) as ex:
LOG.debug("Error when retrieving telemetry_enabled flag", exc_info=ex)
return False
@@ -164,6 +163,10 @@
return self._set_json_cfg(cfg_path, key, value, json_body)
def _create_dir(self):
+ """
+ Creates configuration directory if it does not already exist, otherwise does nothing.
+ May raise an OSError if we do not have permissions to create the directory.
+ """
self.config_dir.mkdir(mode=0o700, parents=True, exist_ok=True)
def _get_config_file_path(self, filename):
| {"golden_diff": "diff --git a/samcli/cli/global_config.py b/samcli/cli/global_config.py\n--- a/samcli/cli/global_config.py\n+++ b/samcli/cli/global_config.py\n@@ -47,7 +47,6 @@\n # Internal Environment variable to customize SAM CLI App Dir. Currently used only by integ tests.\n app_dir = os.getenv(\"__SAM_CLI_APP_DIR\")\n self._config_dir = Path(app_dir) if app_dir else Path(click.get_app_dir('AWS SAM', force_posix=True))\n-\n return Path(self._config_dir)\n \n @property\n@@ -76,7 +75,7 @@\n try:\n self._installation_id = self._get_or_set_uuid(INSTALLATION_ID_KEY)\n return self._installation_id\n- except (ValueError, IOError):\n+ except (ValueError, IOError, OSError):\n return None\n \n @property\n@@ -112,7 +111,7 @@\n try:\n self._telemetry_enabled = self._get_value(TELEMETRY_ENABLED_KEY)\n return self._telemetry_enabled\n- except (ValueError, IOError) as ex:\n+ except (ValueError, IOError, OSError) as ex:\n LOG.debug(\"Error when retrieving telemetry_enabled flag\", exc_info=ex)\n return False\n \n@@ -164,6 +163,10 @@\n return self._set_json_cfg(cfg_path, key, value, json_body)\n \n def _create_dir(self):\n+ \"\"\"\n+ Creates configuration directory if it does not already exist, otherwise does nothing.\n+ May raise an OSError if we do not have permissions to create the directory.\n+ \"\"\"\n self.config_dir.mkdir(mode=0o700, parents=True, exist_ok=True)\n \n def _get_config_file_path(self, filename):\n", "issue": "sam 0.19.0 crashes on trying to create .aws-sam directory when not permitted\n### Description\r\n\r\nWhile running `sam validate` from a freshly built docker image, it crashes while trying to create the `.aws-sam` config directory.\r\n\r\n### Steps to reproduce\r\n\r\nIn our CI pipeline, we run `sam` from within a docker container built according to this `Dockerfile`:\r\n\r\n```\r\nFROM alpine:3.9\r\n\r\nRUN apk add --no-cache \\\r\n build-base \\\r\n python \\\r\n python-dev \\\r\n py-pip \\\r\n groff \\\r\n docker && \\\r\n pip install awscli aws-sam-cli\r\n```\r\n\r\nAWS credentials are passed in as environment variables, volumes mounted from the Jenkins workspace into the docker image, and then we simply run:\r\n\r\n`sam validate -t ./.../template.yaml`\r\n\r\n### Observed result\r\n\r\nLogs from the Jenkins build:\r\n\r\n```\r\n$ docker run -t -d -u 3000:100 --network=host -w /data/hudson/workspace/loper-portal_feature_jenkinstest -v /data/hudson/workspace/loper-portal_feature_jenkinstest:/data/hudson/workspace/loper-portal_feature_jenkinstest:rw,z -v /data/hudson/workspace/loper-portal_feature_jenkinstest@tmp:/data/hudson/workspace/loper-portal_feature_jenkinstest@tmp:rw,z -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** -e ******** xxxx-aws-sam-cli cat\r\n$ docker top 04fa30b17ceb0ae6d45b66190f32f4bad8dedd57386352a31e61f0da6ac18aa2 -eo pid,comm\r\n[Pipeline] {\r\n[Pipeline] withEnv\r\n[Pipeline] {\r\n[Pipeline] withCredentials\r\nMasking supported pattern matches of $AWS_ACCESS_KEY_ID or $AWS_SECRET_ACCESS_KEY\r\n[Pipeline] {\r\n[Pipeline] stage\r\n[Pipeline] { (Validate CloudFormation template)\r\n[Pipeline] sh\r\n+ sam validate --debug -t cloudformation/template.yaml\r\nTraceback (most recent call last):\r\n File \"/usr/bin/sam\", line 11, in <module>\r\n sys.exit(cli())\r\n File \"/usr/lib/python2.7/site-packages/click/core.py\", line 722, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/usr/lib/python2.7/site-packages/click/core.py\", line 697, in main\r\n rv = self.invoke(ctx)\r\n File \"/usr/lib/python2.7/site-packages/click/core.py\", line 1063, in invoke\r\n Command.invoke(self, ctx)\r\n File \"/usr/lib/python2.7/site-packages/click/core.py\", line 895, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/usr/lib/python2.7/site-packages/click/core.py\", line 535, in invoke\r\n return callback(*args, **kwargs)\r\n File \"/usr/lib/python2.7/site-packages/click/decorators.py\", line 64, in new_func\r\n return ctx.invoke(f, obj, *args[1:], **kwargs)\r\n File \"/usr/lib/python2.7/site-packages/click/core.py\", line 535, in invoke\r\n return callback(*args, **kwargs)\r\n File \"/usr/lib/python2.7/site-packages/samcli/cli/main.py\", line 83, in cli\r\n if global_cfg.telemetry_enabled is None:\r\n File \"/usr/lib/python2.7/site-packages/samcli/cli/global_config.py\", line 113, in telemetry_enabled\r\n self._telemetry_enabled = self._get_value(TELEMETRY_ENABLED_KEY)\r\n File \"/usr/lib/python2.7/site-packages/samcli/cli/global_config.py\", line 145, in _get_value\r\n cfg_path = self._get_config_file_path(CONFIG_FILENAME)\r\n File \"/usr/lib/python2.7/site-packages/samcli/cli/global_config.py\", line 170, in _get_config_file_path\r\n self._create_dir()\r\n File \"/usr/lib/python2.7/site-packages/samcli/cli/global_config.py\", line 167, in _create_dir\r\n self.config_dir.mkdir(mode=0o700, parents=True, exist_ok=True)\r\n File \"/usr/lib/python2.7/site-packages/pathlib2/__init__.py\", line 1540, in mkdir\r\n _try_except_filenotfounderror(_try_func, _exc_func)\r\n File \"/usr/lib/python2.7/site-packages/pathlib2/__init__.py\", line 113, in _try_except_filenotfounderror\r\n try_func()\r\n File \"/usr/lib/python2.7/site-packages/pathlib2/__init__.py\", line 1531, in _try_func\r\n self._accessor.mkdir(self, mode)\r\n File \"/usr/lib/python2.7/site-packages/pathlib2/__init__.py\", line 585, in wrapped\r\n return strfunc(str(pathobj), *args)\r\nOSError: [Errno 13] Permission denied: '/.aws-sam'\r\n[Pipeline] }\r\n[Pipeline] // stage\r\n[Pipeline] }\r\n[Pipeline] // withCredentials\r\n[Pipeline] }\r\n[Pipeline] // withEnv\r\n[Pipeline] }\r\n```\r\n### Expected result\r\n\r\nA non-crashing sam :)\r\n\r\n### Additional environment details (Ex: Windows, Mac, Amazon Linux etc)\r\n\r\n1. OS: alpine linux, docker container\r\n2. `sam --version`: 0.19.0\r\n\r\n### Others\r\n\r\nThis was added in the 0.19.0 Telemetry release, where in commit dbd534a `samcli/cli/global_config.py` was added, that tries to write the config dir if it doesn't exist. \r\n\r\nActually we should've pinned the versions used in our `Dockerfile`. Not pinning it had us miss the addition of the Telemetry feature in 0.19.0. Although I'd love to contribute, enabling Telemetry would undoubtedly go against some company policy. There _is_ the message in the first run of the new version (if it was able to write the config dir), but in our setup that's quite easy to miss. Basically I'm advocating an opt-in instead\r\n\n", "before_files": [{"content": "\"\"\"\nProvides global configuration helpers.\n\"\"\"\n\nimport json\nimport logging\nimport uuid\nimport os\n\nimport click\n\ntry:\n from pathlib import Path\nexcept ImportError: # pragma: no cover\n from pathlib2 import Path # pragma: no cover\n\nLOG = logging.getLogger(__name__)\n\nCONFIG_FILENAME = \"metadata.json\"\nINSTALLATION_ID_KEY = \"installationId\"\nTELEMETRY_ENABLED_KEY = \"telemetryEnabled\"\n\n\nclass GlobalConfig(object):\n \"\"\"\n Contains helper methods for global configuration files and values. Handles\n configuration file creation, updates, and fetching in a platform-neutral way.\n\n Generally uses '~/.aws-sam/' or 'C:\\\\Users\\\\<user>\\\\AppData\\\\Roaming\\\\AWS SAM' as\n the base directory, depending on platform.\n \"\"\"\n\n def __init__(self, config_dir=None, installation_id=None, telemetry_enabled=None):\n \"\"\"\n Initializes the class, with options provided to assist with testing.\n\n :param config_dir: Optional, overrides the default config directory path.\n :param installation_id: Optional, will use this installation id rather than checking config values.\n \"\"\"\n self._config_dir = config_dir\n self._installation_id = installation_id\n self._telemetry_enabled = telemetry_enabled\n\n @property\n def config_dir(self):\n if not self._config_dir:\n # Internal Environment variable to customize SAM CLI App Dir. Currently used only by integ tests.\n app_dir = os.getenv(\"__SAM_CLI_APP_DIR\")\n self._config_dir = Path(app_dir) if app_dir else Path(click.get_app_dir('AWS SAM', force_posix=True))\n\n return Path(self._config_dir)\n\n @property\n def installation_id(self):\n \"\"\"\n Returns the installation UUID for this AWS SAM CLI installation. If the\n installation id has not yet been set, it will be set before returning.\n\n Examples\n --------\n\n >>> gc = GlobalConfig()\n >>> gc.installation_id\n \"7b7d4db7-2f54-45ba-bf2f-a2cbc9e74a34\"\n\n >>> gc = GlobalConfig()\n >>> gc.installation_id\n None\n\n Returns\n -------\n A string containing the installation UUID, or None in case of an error.\n \"\"\"\n if self._installation_id:\n return self._installation_id\n try:\n self._installation_id = self._get_or_set_uuid(INSTALLATION_ID_KEY)\n return self._installation_id\n except (ValueError, IOError):\n return None\n\n @property\n def telemetry_enabled(self):\n \"\"\"\n Check if telemetry is enabled for this installation. Default value of\n False. It first tries to get value from SAM_CLI_TELEMETRY environment variable. If its not set,\n then it fetches the value from config file.\n\n To enable telemetry, set SAM_CLI_TELEMETRY environment variable equal to integer 1 or string '1'.\n All other values including words like 'True', 'true', 'false', 'False', 'abcd' etc will disable Telemetry\n\n Examples\n --------\n\n >>> gc = GlobalConfig()\n >>> gc.telemetry_enabled\n True\n\n Returns\n -------\n Boolean flag value. True if telemetry is enabled for this installation,\n False otherwise.\n \"\"\"\n if self._telemetry_enabled is not None:\n return self._telemetry_enabled\n\n # If environment variable is set, its value takes precedence over the value from config file.\n env_name = \"SAM_CLI_TELEMETRY\"\n if env_name in os.environ:\n return os.getenv(env_name) in ('1', 1)\n\n try:\n self._telemetry_enabled = self._get_value(TELEMETRY_ENABLED_KEY)\n return self._telemetry_enabled\n except (ValueError, IOError) as ex:\n LOG.debug(\"Error when retrieving telemetry_enabled flag\", exc_info=ex)\n return False\n\n @telemetry_enabled.setter\n def telemetry_enabled(self, value):\n \"\"\"\n Sets the telemetry_enabled flag to the provided boolean value.\n\n Examples\n --------\n >>> gc = GlobalConfig()\n >>> gc.telemetry_enabled\n False\n >>> gc.telemetry_enabled = True\n >>> gc.telemetry_enabled\n True\n\n Raises\n ------\n IOError\n If there are errors opening or writing to the global config file.\n\n JSONDecodeError\n If the config file exists, and is not valid JSON.\n \"\"\"\n self._set_value(\"telemetryEnabled\", value)\n self._telemetry_enabled = value\n\n def _get_value(self, key):\n cfg_path = self._get_config_file_path(CONFIG_FILENAME)\n if not cfg_path.exists():\n return None\n with open(str(cfg_path)) as fp:\n body = fp.read()\n json_body = json.loads(body)\n return json_body.get(key)\n\n def _set_value(self, key, value):\n cfg_path = self._get_config_file_path(CONFIG_FILENAME)\n if not cfg_path.exists():\n return self._set_json_cfg(cfg_path, key, value)\n with open(str(cfg_path)) as fp:\n body = fp.read()\n try:\n json_body = json.loads(body)\n except ValueError as ex:\n LOG.debug(\"Failed to decode JSON in {cfg_path}\", exc_info=ex)\n raise ex\n return self._set_json_cfg(cfg_path, key, value, json_body)\n\n def _create_dir(self):\n self.config_dir.mkdir(mode=0o700, parents=True, exist_ok=True)\n\n def _get_config_file_path(self, filename):\n self._create_dir()\n filepath = self.config_dir.joinpath(filename)\n return filepath\n\n def _get_or_set_uuid(self, key):\n \"\"\"\n Special logic method for when we want a UUID to always be present, this\n method behaves as a getter with side effects. Essentially, if the value\n is not present, we will set it with a generated UUID.\n\n If we have multiple such values in the future, a possible refactor is\n to just be _get_or_set_value, where we also take a default value as a\n parameter.\n \"\"\"\n cfg_value = self._get_value(key)\n if cfg_value is not None:\n return cfg_value\n return self._set_value(key, str(uuid.uuid4()))\n\n def _set_json_cfg(self, filepath, key, value, json_body=None):\n \"\"\"\n Special logic method to add a value to a JSON configuration file. This\n method will write a new version of the file in question, so it will\n either write a new file with only the first config value, or if a JSON\n body is provided, it will upsert starting from that JSON body.\n \"\"\"\n json_body = json_body or {}\n json_body[key] = value\n file_body = json.dumps(json_body, indent=4) + \"\\n\"\n try:\n with open(str(filepath), 'w') as f:\n f.write(file_body)\n except IOError as ex:\n LOG.debug(\"Error writing to {filepath}\", exc_info=ex)\n raise ex\n return value\n", "path": "samcli/cli/global_config.py"}], "after_files": [{"content": "\"\"\"\nProvides global configuration helpers.\n\"\"\"\n\nimport json\nimport logging\nimport uuid\nimport os\n\nimport click\n\ntry:\n from pathlib import Path\nexcept ImportError: # pragma: no cover\n from pathlib2 import Path # pragma: no cover\n\nLOG = logging.getLogger(__name__)\n\nCONFIG_FILENAME = \"metadata.json\"\nINSTALLATION_ID_KEY = \"installationId\"\nTELEMETRY_ENABLED_KEY = \"telemetryEnabled\"\n\n\nclass GlobalConfig(object):\n \"\"\"\n Contains helper methods for global configuration files and values. Handles\n configuration file creation, updates, and fetching in a platform-neutral way.\n\n Generally uses '~/.aws-sam/' or 'C:\\\\Users\\\\<user>\\\\AppData\\\\Roaming\\\\AWS SAM' as\n the base directory, depending on platform.\n \"\"\"\n\n def __init__(self, config_dir=None, installation_id=None, telemetry_enabled=None):\n \"\"\"\n Initializes the class, with options provided to assist with testing.\n\n :param config_dir: Optional, overrides the default config directory path.\n :param installation_id: Optional, will use this installation id rather than checking config values.\n \"\"\"\n self._config_dir = config_dir\n self._installation_id = installation_id\n self._telemetry_enabled = telemetry_enabled\n\n @property\n def config_dir(self):\n if not self._config_dir:\n # Internal Environment variable to customize SAM CLI App Dir. Currently used only by integ tests.\n app_dir = os.getenv(\"__SAM_CLI_APP_DIR\")\n self._config_dir = Path(app_dir) if app_dir else Path(click.get_app_dir('AWS SAM', force_posix=True))\n return Path(self._config_dir)\n\n @property\n def installation_id(self):\n \"\"\"\n Returns the installation UUID for this AWS SAM CLI installation. If the\n installation id has not yet been set, it will be set before returning.\n\n Examples\n --------\n\n >>> gc = GlobalConfig()\n >>> gc.installation_id\n \"7b7d4db7-2f54-45ba-bf2f-a2cbc9e74a34\"\n\n >>> gc = GlobalConfig()\n >>> gc.installation_id\n None\n\n Returns\n -------\n A string containing the installation UUID, or None in case of an error.\n \"\"\"\n if self._installation_id:\n return self._installation_id\n try:\n self._installation_id = self._get_or_set_uuid(INSTALLATION_ID_KEY)\n return self._installation_id\n except (ValueError, IOError, OSError):\n return None\n\n @property\n def telemetry_enabled(self):\n \"\"\"\n Check if telemetry is enabled for this installation. Default value of\n False. It first tries to get value from SAM_CLI_TELEMETRY environment variable. If its not set,\n then it fetches the value from config file.\n\n To enable telemetry, set SAM_CLI_TELEMETRY environment variable equal to integer 1 or string '1'.\n All other values including words like 'True', 'true', 'false', 'False', 'abcd' etc will disable Telemetry\n\n Examples\n --------\n\n >>> gc = GlobalConfig()\n >>> gc.telemetry_enabled\n True\n\n Returns\n -------\n Boolean flag value. True if telemetry is enabled for this installation,\n False otherwise.\n \"\"\"\n if self._telemetry_enabled is not None:\n return self._telemetry_enabled\n\n # If environment variable is set, its value takes precedence over the value from config file.\n env_name = \"SAM_CLI_TELEMETRY\"\n if env_name in os.environ:\n return os.getenv(env_name) in ('1', 1)\n\n try:\n self._telemetry_enabled = self._get_value(TELEMETRY_ENABLED_KEY)\n return self._telemetry_enabled\n except (ValueError, IOError, OSError) as ex:\n LOG.debug(\"Error when retrieving telemetry_enabled flag\", exc_info=ex)\n return False\n\n @telemetry_enabled.setter\n def telemetry_enabled(self, value):\n \"\"\"\n Sets the telemetry_enabled flag to the provided boolean value.\n\n Examples\n --------\n >>> gc = GlobalConfig()\n >>> gc.telemetry_enabled\n False\n >>> gc.telemetry_enabled = True\n >>> gc.telemetry_enabled\n True\n\n Raises\n ------\n IOError\n If there are errors opening or writing to the global config file.\n\n JSONDecodeError\n If the config file exists, and is not valid JSON.\n \"\"\"\n self._set_value(\"telemetryEnabled\", value)\n self._telemetry_enabled = value\n\n def _get_value(self, key):\n cfg_path = self._get_config_file_path(CONFIG_FILENAME)\n if not cfg_path.exists():\n return None\n with open(str(cfg_path)) as fp:\n body = fp.read()\n json_body = json.loads(body)\n return json_body.get(key)\n\n def _set_value(self, key, value):\n cfg_path = self._get_config_file_path(CONFIG_FILENAME)\n if not cfg_path.exists():\n return self._set_json_cfg(cfg_path, key, value)\n with open(str(cfg_path)) as fp:\n body = fp.read()\n try:\n json_body = json.loads(body)\n except ValueError as ex:\n LOG.debug(\"Failed to decode JSON in {cfg_path}\", exc_info=ex)\n raise ex\n return self._set_json_cfg(cfg_path, key, value, json_body)\n\n def _create_dir(self):\n \"\"\"\n Creates configuration directory if it does not already exist, otherwise does nothing.\n May raise an OSError if we do not have permissions to create the directory.\n \"\"\"\n self.config_dir.mkdir(mode=0o700, parents=True, exist_ok=True)\n\n def _get_config_file_path(self, filename):\n self._create_dir()\n filepath = self.config_dir.joinpath(filename)\n return filepath\n\n def _get_or_set_uuid(self, key):\n \"\"\"\n Special logic method for when we want a UUID to always be present, this\n method behaves as a getter with side effects. Essentially, if the value\n is not present, we will set it with a generated UUID.\n\n If we have multiple such values in the future, a possible refactor is\n to just be _get_or_set_value, where we also take a default value as a\n parameter.\n \"\"\"\n cfg_value = self._get_value(key)\n if cfg_value is not None:\n return cfg_value\n return self._set_value(key, str(uuid.uuid4()))\n\n def _set_json_cfg(self, filepath, key, value, json_body=None):\n \"\"\"\n Special logic method to add a value to a JSON configuration file. This\n method will write a new version of the file in question, so it will\n either write a new file with only the first config value, or if a JSON\n body is provided, it will upsert starting from that JSON body.\n \"\"\"\n json_body = json_body or {}\n json_body[key] = value\n file_body = json.dumps(json_body, indent=4) + \"\\n\"\n try:\n with open(str(filepath), 'w') as f:\n f.write(file_body)\n except IOError as ex:\n LOG.debug(\"Error writing to {filepath}\", exc_info=ex)\n raise ex\n return value\n", "path": "samcli/cli/global_config.py"}]} | 3,765 | 394 |
gh_patches_debug_14057 | rasdani/github-patches | git_diff | quantumlib__Cirq-2682 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Need a better compilation for swap_to_sqrt_iswap
Right now it uses 4 sqrtiswaps to support arbitrary angle. For a plain ol' `SWAP` (common for routing qubits around) you should be able to get away with 3
```python
yield cirq.Y(a) ** 0.5
yield cirq.Y(b) ** 0.5
yield SQRT_ISWAP(a, b)
yield cirq.Y(a) ** -0.5
yield cirq.Y(b) ** -0.5
yield SQRT_ISWAP(a, b)
yield cirq.X(a) ** -0.5
yield cirq.X(b) ** -0.5
yield SQRT_ISWAP(a, b)
yield cirq.X(a) ** 0.5
yield cirq.X(b) ** 0.5
```
should do it.
cc @dstrain115 @john6060 @Strilanc
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cirq/google/optimizers/convert_to_sqrt_iswap.py`
Content:
```
1 # Copyright 2019 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import List, Optional, TYPE_CHECKING
15
16 import numpy as np
17 import sympy
18
19 from cirq import ops, circuits, protocols
20
21 if TYPE_CHECKING:
22 import cirq
23
24 SQRT_ISWAP = ops.ISWAP**0.5
25 SQRT_ISWAP_INV = ops.ISWAP**-0.5
26
27
28 # TODO: Combine this with the equivalent functions in google/gate_set.py
29 # Or better yet, write a proper gate set so we don't need this in two places
30 def _near_mod_n(e, t, n, atol=1e-8):
31 return abs((e - t + 1) % n - 1) <= atol
32
33
34 def _near_mod_2pi(e, t, atol=1e-8):
35 return _near_mod_n(e, t, 2 * np.pi, atol=atol)
36
37
38 class ConvertToSqrtIswapGates(circuits.PointOptimizer):
39 """Attempts to convert gates into ISWAP**-0.5 gates.
40
41 Since we have Z rotations and arbitrary XY rotations, we
42 can rely on cirq decomposition for one qubit gates and
43 need to only specify special decompositions for two qubit gates.
44
45 Currently natively specified gates are CZPowGate, ISwapPowGate,
46 and FSimGate. This will also support gates that decompose into
47 the above gates.
48 """
49
50 def __init__(self, ignore_failures=False) -> None:
51 """
52 Args:
53 ignore_failures: If set, gates that fail to convert are forwarded
54 unchanged. If not set, conversion failures raise a TypeError.
55 """
56 super().__init__()
57 self.ignore_failures = ignore_failures
58
59 def _convert_one(self, op: 'cirq.Operation') -> 'cirq.OP_TREE':
60 """
61 Decomposer intercept: Let cirq decompose one-qubit gates,
62 intercept on 2-qubit gates if they are known gates.
63 """
64 if isinstance(op, ops.GlobalPhaseOperation):
65 return []
66
67 gate = op.gate
68
69 if len(op.qubits) != 2:
70 return NotImplemented
71
72 q0, q1 = op.qubits
73
74 if isinstance(gate, ops.CZPowGate):
75 if isinstance(gate.exponent, sympy.Basic):
76 return cphase_symbols_to_sqrt_iswap(q0, q1, gate.exponent)
77 else:
78 return cphase_to_sqrt_iswap(q0, q1, gate.exponent)
79 if isinstance(gate, ops.SwapPowGate):
80 return swap_to_sqrt_iswap(q0, q1, gate.exponent)
81 if isinstance(gate, ops.ISwapPowGate):
82 return iswap_to_sqrt_iswap(q0, q1, gate.exponent)
83 if isinstance(gate, ops.FSimGate):
84 return fsim_gate(q0, q1, gate.theta, gate.phi)
85
86 return NotImplemented
87
88 def _on_stuck_raise(self, bad):
89 return TypeError(f"Don't know how to work with {bad}. "
90 "It isn't a native sqrt ISWAP operation, "
91 "a 1 or 2 qubit gate with a known unitary, "
92 "or composite.")
93
94 def convert(self, op: 'cirq.Operation') -> List['cirq.Operation']:
95
96 a = protocols.decompose(op,
97 keep=is_sqrt_iswap_compatible,
98 intercepting_decomposer=self._convert_one,
99 on_stuck_raise=(None if self.ignore_failures
100 else self._on_stuck_raise))
101 return a
102
103 def optimization_at(self, circuit, index, op):
104 converted = self.convert(op)
105 if len(converted) == 1 and converted[0] is op:
106 return None
107
108 return circuits.PointOptimizationSummary(clear_span=1,
109 new_operations=converted,
110 clear_qubits=op.qubits)
111
112
113 def is_sqrt_iswap_compatible(op: 'cirq.Operation') -> bool:
114 """Check if the given operation is compatible with the sqrt_iswap gateset
115 gate set.
116
117 Args:
118 op: Input operation.
119
120 Returns:
121 True if the operation is native to the gate set, false otherwise.
122 """
123 return is_basic_gate(op.gate) or is_sqrt_iswap(op.gate)
124
125
126 def is_sqrt_iswap(gate: Optional['cirq.Gate']) -> bool:
127 """Checks if this is a ± sqrt(iSWAP) gate specified using either
128 ISwapPowGate or with the equivalent FSimGate.
129 """
130 if (isinstance(gate, ops.FSimGate) and
131 not isinstance(gate.theta, sympy.Basic) and
132 _near_mod_2pi(abs(gate.theta), np.pi / 4) and
133 _near_mod_2pi(gate.phi, 0)):
134 return True
135 return (isinstance(gate, ops.ISwapPowGate) and
136 not isinstance(gate.exponent, sympy.Basic) and
137 _near_mod_n(abs(gate.exponent), 0.5, 4))
138
139
140 def is_basic_gate(gate: Optional['cirq.Gate']) -> bool:
141 """Check if a gate is a basic supported one-qubit gate.
142
143 Args:
144 gate: Input gate.
145
146 Returns:
147 True if the gate is native to the gate set, false otherwise.
148 """
149 return isinstance(gate, (ops.MeasurementGate, ops.PhasedXPowGate,
150 ops.XPowGate, ops.YPowGate, ops.ZPowGate))
151
152
153 def cphase_to_sqrt_iswap(a, b, turns):
154 """Implement a C-Phase gate using two sqrt ISWAP gates and single-qubit
155 operations. The circuit is equivalent to cirq.CZPowGate(exponent=turns).
156
157 Output unitary:
158 [1 0 0 0],
159 [0 1 0 0],
160 [0 0 1 0],
161 [0 0 0 e^{i turns pi}].
162
163 Args:
164 a: the first qubit
165 b: the second qubit
166 turns: Exponent specifying the evolution time in number of rotations.
167 """
168 theta = (turns % 2) * np.pi
169 if 0 <= theta <= np.pi:
170 sign = 1.
171 theta_prime = theta
172 elif np.pi < theta < 2 * np.pi:
173 sign = -1.
174 theta_prime = 2 * np.pi - theta
175
176 if np.isclose(theta, np.pi):
177 # If we are close to pi, just set values manually to avoid possible
178 # numerical errors with arcsin of greater than 1.0 (Ahem, Windows).
179 phi = np.pi / 2
180 xi = np.pi / 2
181 else:
182 phi = np.arcsin(np.sqrt(2) * np.sin(theta_prime / 4))
183 xi = np.arctan(np.tan(phi) / np.sqrt(2))
184
185 yield ops.rz(sign * 0.5 * theta_prime).on(a)
186 yield ops.rz(sign * 0.5 * theta_prime).on(b)
187 yield ops.rx(xi).on(a)
188 yield ops.X(b)**(-sign * 0.5)
189 yield SQRT_ISWAP_INV(a, b)
190 yield ops.rx(-2 * phi).on(a)
191 yield SQRT_ISWAP(a, b)
192
193 yield ops.rx(xi).on(a)
194 yield ops.X(b)**(sign * 0.5)
195 # Corrects global phase
196 yield ops.GlobalPhaseOperation(np.exp(sign * theta_prime * 0.25j))
197
198
199 def cphase_symbols_to_sqrt_iswap(a, b, turns):
200 """Version of cphase_to_sqrt_iswap that works with symbols.
201
202 Note that the formulae contained below will need to be flattened
203 into a sweep before serializing.
204 """
205 theta = sympy.Mod(turns, 2.0) * sympy.pi
206
207 # -1 if theta > pi. Adds a hacky fudge factor so theta=pi is not 0
208 sign = sympy.sign(sympy.pi - theta + 1e-9)
209
210 # For sign = 1: theta. For sign = -1, 2pi-theta
211 theta_prime = (sympy.pi - sign * sympy.pi) + sign * theta
212
213 phi = sympy.asin(np.sqrt(2) * sympy.sin(theta_prime / 4))
214 xi = sympy.atan(sympy.tan(phi) / np.sqrt(2))
215
216 yield ops.rz(sign * 0.5 * theta_prime).on(a)
217 yield ops.rz(sign * 0.5 * theta_prime).on(b)
218 yield ops.rx(xi).on(a)
219 yield ops.X(b)**(-sign * 0.5)
220 yield SQRT_ISWAP_INV(a, b)
221 yield ops.rx(-2 * phi).on(a)
222 yield SQRT_ISWAP(a, b)
223 yield ops.rx(xi).on(a)
224 yield ops.X(b)**(sign * 0.5)
225
226
227 def iswap_to_sqrt_iswap(a, b, turns):
228 """Implement the evolution of the hopping term using two sqrt_iswap gates
229 and single-qubit operations. Output unitary:
230 [1 0 0 0],
231 [0 c is 0],
232 [0 is c 0],
233 [0 0 0 1],
234 where c = cos(t * np.pi / 2) and s = sin(t * np.pi / 2).
235
236 Args:
237 a: the first qubit
238 b: the second qubit
239 t: Exponent that specifies the evolution time in number of rotations.
240 """
241 yield ops.Z(a)**0.75
242 yield ops.Z(b)**0.25
243 yield SQRT_ISWAP_INV(a, b)
244 yield ops.Z(a)**(-turns / 2 + 1)
245 yield ops.Z(b)**(turns / 2)
246 yield SQRT_ISWAP_INV(a, b)
247 yield ops.Z(a)**0.25
248 yield ops.Z(b)**-0.25
249
250
251 def swap_to_sqrt_iswap(a, b, turns):
252 """Implement the evolution of the hopping term using two sqrt_iswap gates
253 and single-qubit operations. Output unitary:
254 [[1, 0, 0, 0],
255 [0, g·c, -i·g·s, 0],
256 [0, -i·g·s, g·c, 0],
257 [0, 0, 0, 1]]
258 where c = cos(theta) and s = sin(theta).
259 Args:
260 a: the first qubit
261 b: the second qubit
262 theta: The rotational angle that specifies the gate, where
263 c = cos(π·t/2), s = sin(π·t/2), g = exp(i·π·t/2).
264 """
265 yield ops.Z(a)**1.25
266 yield ops.Z(b)**-0.25
267 yield ops.ISWAP(a, b)**-0.5
268 yield ops.Z(a)**(-turns / 2 + 1)
269 yield ops.Z(b)**(turns / 2)
270 yield ops.ISWAP(a, b)**-0.5
271 yield ops.Z(a)**(turns / 2 - 0.25)
272 yield ops.Z(b)**(turns / 2 + 0.25)
273 yield ops.CZ.on(a, b)**(-turns)
274
275
276 def fsim_gate(a, b, theta, phi):
277 """FSimGate has a default decomposition in cirq to XXPowGate and YYPowGate,
278 which is an awkward decomposition for this gate set.
279 Decompose into ISWAP and CZ instead."""
280 if theta != 0.0:
281 yield ops.ISWAP(a, b)**(-2 * theta / np.pi)
282 if phi != 0.0:
283 yield ops.CZPowGate(exponent=-phi / np.pi)(a, b)
284
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cirq/google/optimizers/convert_to_sqrt_iswap.py b/cirq/google/optimizers/convert_to_sqrt_iswap.py
--- a/cirq/google/optimizers/convert_to_sqrt_iswap.py
+++ b/cirq/google/optimizers/convert_to_sqrt_iswap.py
@@ -262,6 +262,21 @@
theta: The rotational angle that specifies the gate, where
c = cos(π·t/2), s = sin(π·t/2), g = exp(i·π·t/2).
"""
+ if not isinstance(turns, sympy.Basic) and _near_mod_n(turns, 1.0, 2):
+ # Decomposition for cirq.SWAP
+ yield ops.Y(a)**0.5
+ yield ops.Y(b)**0.5
+ yield SQRT_ISWAP(a, b)
+ yield ops.Y(a)**-0.5
+ yield ops.Y(b)**-0.5
+ yield SQRT_ISWAP(a, b)
+ yield ops.X(a)**-0.5
+ yield ops.X(b)**-0.5
+ yield SQRT_ISWAP(a, b)
+ yield ops.X(a)**0.5
+ yield ops.X(b)**0.5
+ return
+
yield ops.Z(a)**1.25
yield ops.Z(b)**-0.25
yield ops.ISWAP(a, b)**-0.5
| {"golden_diff": "diff --git a/cirq/google/optimizers/convert_to_sqrt_iswap.py b/cirq/google/optimizers/convert_to_sqrt_iswap.py\n--- a/cirq/google/optimizers/convert_to_sqrt_iswap.py\n+++ b/cirq/google/optimizers/convert_to_sqrt_iswap.py\n@@ -262,6 +262,21 @@\n theta: The rotational angle that specifies the gate, where\n c = cos(\u03c0\u00b7t/2), s = sin(\u03c0\u00b7t/2), g = exp(i\u00b7\u03c0\u00b7t/2).\n \"\"\"\n+ if not isinstance(turns, sympy.Basic) and _near_mod_n(turns, 1.0, 2):\n+ # Decomposition for cirq.SWAP\n+ yield ops.Y(a)**0.5\n+ yield ops.Y(b)**0.5\n+ yield SQRT_ISWAP(a, b)\n+ yield ops.Y(a)**-0.5\n+ yield ops.Y(b)**-0.5\n+ yield SQRT_ISWAP(a, b)\n+ yield ops.X(a)**-0.5\n+ yield ops.X(b)**-0.5\n+ yield SQRT_ISWAP(a, b)\n+ yield ops.X(a)**0.5\n+ yield ops.X(b)**0.5\n+ return\n+\n yield ops.Z(a)**1.25\n yield ops.Z(b)**-0.25\n yield ops.ISWAP(a, b)**-0.5\n", "issue": "Need a better compilation for swap_to_sqrt_iswap\nRight now it uses 4 sqrtiswaps to support arbitrary angle. For a plain ol' `SWAP` (common for routing qubits around) you should be able to get away with 3\r\n\r\n```python\r\n yield cirq.Y(a) ** 0.5\r\n yield cirq.Y(b) ** 0.5\r\n yield SQRT_ISWAP(a, b)\r\n yield cirq.Y(a) ** -0.5\r\n yield cirq.Y(b) ** -0.5\r\n yield SQRT_ISWAP(a, b)\r\n yield cirq.X(a) ** -0.5\r\n yield cirq.X(b) ** -0.5\r\n yield SQRT_ISWAP(a, b)\r\n yield cirq.X(a) ** 0.5\r\n yield cirq.X(b) ** 0.5\r\n```\r\n\r\nshould do it.\r\n\r\ncc @dstrain115 @john6060 @Strilanc \n", "before_files": [{"content": "# Copyright 2019 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import List, Optional, TYPE_CHECKING\n\nimport numpy as np\nimport sympy\n\nfrom cirq import ops, circuits, protocols\n\nif TYPE_CHECKING:\n import cirq\n\nSQRT_ISWAP = ops.ISWAP**0.5\nSQRT_ISWAP_INV = ops.ISWAP**-0.5\n\n\n# TODO: Combine this with the equivalent functions in google/gate_set.py\n# Or better yet, write a proper gate set so we don't need this in two places\ndef _near_mod_n(e, t, n, atol=1e-8):\n return abs((e - t + 1) % n - 1) <= atol\n\n\ndef _near_mod_2pi(e, t, atol=1e-8):\n return _near_mod_n(e, t, 2 * np.pi, atol=atol)\n\n\nclass ConvertToSqrtIswapGates(circuits.PointOptimizer):\n \"\"\"Attempts to convert gates into ISWAP**-0.5 gates.\n\n Since we have Z rotations and arbitrary XY rotations, we\n can rely on cirq decomposition for one qubit gates and\n need to only specify special decompositions for two qubit gates.\n\n Currently natively specified gates are CZPowGate, ISwapPowGate,\n and FSimGate. This will also support gates that decompose into\n the above gates.\n \"\"\"\n\n def __init__(self, ignore_failures=False) -> None:\n \"\"\"\n Args:\n ignore_failures: If set, gates that fail to convert are forwarded\n unchanged. If not set, conversion failures raise a TypeError.\n \"\"\"\n super().__init__()\n self.ignore_failures = ignore_failures\n\n def _convert_one(self, op: 'cirq.Operation') -> 'cirq.OP_TREE':\n \"\"\"\n Decomposer intercept: Let cirq decompose one-qubit gates,\n intercept on 2-qubit gates if they are known gates.\n \"\"\"\n if isinstance(op, ops.GlobalPhaseOperation):\n return []\n\n gate = op.gate\n\n if len(op.qubits) != 2:\n return NotImplemented\n\n q0, q1 = op.qubits\n\n if isinstance(gate, ops.CZPowGate):\n if isinstance(gate.exponent, sympy.Basic):\n return cphase_symbols_to_sqrt_iswap(q0, q1, gate.exponent)\n else:\n return cphase_to_sqrt_iswap(q0, q1, gate.exponent)\n if isinstance(gate, ops.SwapPowGate):\n return swap_to_sqrt_iswap(q0, q1, gate.exponent)\n if isinstance(gate, ops.ISwapPowGate):\n return iswap_to_sqrt_iswap(q0, q1, gate.exponent)\n if isinstance(gate, ops.FSimGate):\n return fsim_gate(q0, q1, gate.theta, gate.phi)\n\n return NotImplemented\n\n def _on_stuck_raise(self, bad):\n return TypeError(f\"Don't know how to work with {bad}. \"\n \"It isn't a native sqrt ISWAP operation, \"\n \"a 1 or 2 qubit gate with a known unitary, \"\n \"or composite.\")\n\n def convert(self, op: 'cirq.Operation') -> List['cirq.Operation']:\n\n a = protocols.decompose(op,\n keep=is_sqrt_iswap_compatible,\n intercepting_decomposer=self._convert_one,\n on_stuck_raise=(None if self.ignore_failures\n else self._on_stuck_raise))\n return a\n\n def optimization_at(self, circuit, index, op):\n converted = self.convert(op)\n if len(converted) == 1 and converted[0] is op:\n return None\n\n return circuits.PointOptimizationSummary(clear_span=1,\n new_operations=converted,\n clear_qubits=op.qubits)\n\n\ndef is_sqrt_iswap_compatible(op: 'cirq.Operation') -> bool:\n \"\"\"Check if the given operation is compatible with the sqrt_iswap gateset\n gate set.\n\n Args:\n op: Input operation.\n\n Returns:\n True if the operation is native to the gate set, false otherwise.\n \"\"\"\n return is_basic_gate(op.gate) or is_sqrt_iswap(op.gate)\n\n\ndef is_sqrt_iswap(gate: Optional['cirq.Gate']) -> bool:\n \"\"\"Checks if this is a \u00b1 sqrt(iSWAP) gate specified using either\n ISwapPowGate or with the equivalent FSimGate.\n \"\"\"\n if (isinstance(gate, ops.FSimGate) and\n not isinstance(gate.theta, sympy.Basic) and\n _near_mod_2pi(abs(gate.theta), np.pi / 4) and\n _near_mod_2pi(gate.phi, 0)):\n return True\n return (isinstance(gate, ops.ISwapPowGate) and\n not isinstance(gate.exponent, sympy.Basic) and\n _near_mod_n(abs(gate.exponent), 0.5, 4))\n\n\ndef is_basic_gate(gate: Optional['cirq.Gate']) -> bool:\n \"\"\"Check if a gate is a basic supported one-qubit gate.\n\n Args:\n gate: Input gate.\n\n Returns:\n True if the gate is native to the gate set, false otherwise.\n \"\"\"\n return isinstance(gate, (ops.MeasurementGate, ops.PhasedXPowGate,\n ops.XPowGate, ops.YPowGate, ops.ZPowGate))\n\n\ndef cphase_to_sqrt_iswap(a, b, turns):\n \"\"\"Implement a C-Phase gate using two sqrt ISWAP gates and single-qubit\n operations. The circuit is equivalent to cirq.CZPowGate(exponent=turns).\n\n Output unitary:\n [1 0 0 0],\n [0 1 0 0],\n [0 0 1 0],\n [0 0 0 e^{i turns pi}].\n\n Args:\n a: the first qubit\n b: the second qubit\n turns: Exponent specifying the evolution time in number of rotations.\n \"\"\"\n theta = (turns % 2) * np.pi\n if 0 <= theta <= np.pi:\n sign = 1.\n theta_prime = theta\n elif np.pi < theta < 2 * np.pi:\n sign = -1.\n theta_prime = 2 * np.pi - theta\n\n if np.isclose(theta, np.pi):\n # If we are close to pi, just set values manually to avoid possible\n # numerical errors with arcsin of greater than 1.0 (Ahem, Windows).\n phi = np.pi / 2\n xi = np.pi / 2\n else:\n phi = np.arcsin(np.sqrt(2) * np.sin(theta_prime / 4))\n xi = np.arctan(np.tan(phi) / np.sqrt(2))\n\n yield ops.rz(sign * 0.5 * theta_prime).on(a)\n yield ops.rz(sign * 0.5 * theta_prime).on(b)\n yield ops.rx(xi).on(a)\n yield ops.X(b)**(-sign * 0.5)\n yield SQRT_ISWAP_INV(a, b)\n yield ops.rx(-2 * phi).on(a)\n yield SQRT_ISWAP(a, b)\n\n yield ops.rx(xi).on(a)\n yield ops.X(b)**(sign * 0.5)\n # Corrects global phase\n yield ops.GlobalPhaseOperation(np.exp(sign * theta_prime * 0.25j))\n\n\ndef cphase_symbols_to_sqrt_iswap(a, b, turns):\n \"\"\"Version of cphase_to_sqrt_iswap that works with symbols.\n\n Note that the formulae contained below will need to be flattened\n into a sweep before serializing.\n \"\"\"\n theta = sympy.Mod(turns, 2.0) * sympy.pi\n\n # -1 if theta > pi. Adds a hacky fudge factor so theta=pi is not 0\n sign = sympy.sign(sympy.pi - theta + 1e-9)\n\n # For sign = 1: theta. For sign = -1, 2pi-theta\n theta_prime = (sympy.pi - sign * sympy.pi) + sign * theta\n\n phi = sympy.asin(np.sqrt(2) * sympy.sin(theta_prime / 4))\n xi = sympy.atan(sympy.tan(phi) / np.sqrt(2))\n\n yield ops.rz(sign * 0.5 * theta_prime).on(a)\n yield ops.rz(sign * 0.5 * theta_prime).on(b)\n yield ops.rx(xi).on(a)\n yield ops.X(b)**(-sign * 0.5)\n yield SQRT_ISWAP_INV(a, b)\n yield ops.rx(-2 * phi).on(a)\n yield SQRT_ISWAP(a, b)\n yield ops.rx(xi).on(a)\n yield ops.X(b)**(sign * 0.5)\n\n\ndef iswap_to_sqrt_iswap(a, b, turns):\n \"\"\"Implement the evolution of the hopping term using two sqrt_iswap gates\n and single-qubit operations. Output unitary:\n [1 0 0 0],\n [0 c is 0],\n [0 is c 0],\n [0 0 0 1],\n where c = cos(t * np.pi / 2) and s = sin(t * np.pi / 2).\n\n Args:\n a: the first qubit\n b: the second qubit\n t: Exponent that specifies the evolution time in number of rotations.\n \"\"\"\n yield ops.Z(a)**0.75\n yield ops.Z(b)**0.25\n yield SQRT_ISWAP_INV(a, b)\n yield ops.Z(a)**(-turns / 2 + 1)\n yield ops.Z(b)**(turns / 2)\n yield SQRT_ISWAP_INV(a, b)\n yield ops.Z(a)**0.25\n yield ops.Z(b)**-0.25\n\n\ndef swap_to_sqrt_iswap(a, b, turns):\n \"\"\"Implement the evolution of the hopping term using two sqrt_iswap gates\n and single-qubit operations. Output unitary:\n [[1, 0, 0, 0],\n [0, g\u00b7c, -i\u00b7g\u00b7s, 0],\n [0, -i\u00b7g\u00b7s, g\u00b7c, 0],\n [0, 0, 0, 1]]\n where c = cos(theta) and s = sin(theta).\n Args:\n a: the first qubit\n b: the second qubit\n theta: The rotational angle that specifies the gate, where\n c = cos(\u03c0\u00b7t/2), s = sin(\u03c0\u00b7t/2), g = exp(i\u00b7\u03c0\u00b7t/2).\n \"\"\"\n yield ops.Z(a)**1.25\n yield ops.Z(b)**-0.25\n yield ops.ISWAP(a, b)**-0.5\n yield ops.Z(a)**(-turns / 2 + 1)\n yield ops.Z(b)**(turns / 2)\n yield ops.ISWAP(a, b)**-0.5\n yield ops.Z(a)**(turns / 2 - 0.25)\n yield ops.Z(b)**(turns / 2 + 0.25)\n yield ops.CZ.on(a, b)**(-turns)\n\n\ndef fsim_gate(a, b, theta, phi):\n \"\"\"FSimGate has a default decomposition in cirq to XXPowGate and YYPowGate,\n which is an awkward decomposition for this gate set.\n Decompose into ISWAP and CZ instead.\"\"\"\n if theta != 0.0:\n yield ops.ISWAP(a, b)**(-2 * theta / np.pi)\n if phi != 0.0:\n yield ops.CZPowGate(exponent=-phi / np.pi)(a, b)\n", "path": "cirq/google/optimizers/convert_to_sqrt_iswap.py"}], "after_files": [{"content": "# Copyright 2019 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import List, Optional, TYPE_CHECKING\n\nimport numpy as np\nimport sympy\n\nfrom cirq import ops, circuits, protocols\n\nif TYPE_CHECKING:\n import cirq\n\nSQRT_ISWAP = ops.ISWAP**0.5\nSQRT_ISWAP_INV = ops.ISWAP**-0.5\n\n\n# TODO: Combine this with the equivalent functions in google/gate_set.py\n# Or better yet, write a proper gate set so we don't need this in two places\ndef _near_mod_n(e, t, n, atol=1e-8):\n return abs((e - t + 1) % n - 1) <= atol\n\n\ndef _near_mod_2pi(e, t, atol=1e-8):\n return _near_mod_n(e, t, 2 * np.pi, atol=atol)\n\n\nclass ConvertToSqrtIswapGates(circuits.PointOptimizer):\n \"\"\"Attempts to convert gates into ISWAP**-0.5 gates.\n\n Since we have Z rotations and arbitrary XY rotations, we\n can rely on cirq decomposition for one qubit gates and\n need to only specify special decompositions for two qubit gates.\n\n Currently natively specified gates are CZPowGate, ISwapPowGate,\n and FSimGate. This will also support gates that decompose into\n the above gates.\n \"\"\"\n\n def __init__(self, ignore_failures=False) -> None:\n \"\"\"\n Args:\n ignore_failures: If set, gates that fail to convert are forwarded\n unchanged. If not set, conversion failures raise a TypeError.\n \"\"\"\n super().__init__()\n self.ignore_failures = ignore_failures\n\n def _convert_one(self, op: 'cirq.Operation') -> 'cirq.OP_TREE':\n \"\"\"\n Decomposer intercept: Let cirq decompose one-qubit gates,\n intercept on 2-qubit gates if they are known gates.\n \"\"\"\n if isinstance(op, ops.GlobalPhaseOperation):\n return []\n\n gate = op.gate\n\n if len(op.qubits) != 2:\n return NotImplemented\n\n q0, q1 = op.qubits\n\n if isinstance(gate, ops.CZPowGate):\n if isinstance(gate.exponent, sympy.Basic):\n return cphase_symbols_to_sqrt_iswap(q0, q1, gate.exponent)\n else:\n return cphase_to_sqrt_iswap(q0, q1, gate.exponent)\n if isinstance(gate, ops.SwapPowGate):\n return swap_to_sqrt_iswap(q0, q1, gate.exponent)\n if isinstance(gate, ops.ISwapPowGate):\n return iswap_to_sqrt_iswap(q0, q1, gate.exponent)\n if isinstance(gate, ops.FSimGate):\n return fsim_gate(q0, q1, gate.theta, gate.phi)\n\n return NotImplemented\n\n def _on_stuck_raise(self, bad):\n return TypeError(f\"Don't know how to work with {bad}. \"\n \"It isn't a native sqrt ISWAP operation, \"\n \"a 1 or 2 qubit gate with a known unitary, \"\n \"or composite.\")\n\n def convert(self, op: 'cirq.Operation') -> List['cirq.Operation']:\n\n a = protocols.decompose(op,\n keep=is_sqrt_iswap_compatible,\n intercepting_decomposer=self._convert_one,\n on_stuck_raise=(None if self.ignore_failures\n else self._on_stuck_raise))\n return a\n\n def optimization_at(self, circuit, index, op):\n converted = self.convert(op)\n if len(converted) == 1 and converted[0] is op:\n return None\n\n return circuits.PointOptimizationSummary(clear_span=1,\n new_operations=converted,\n clear_qubits=op.qubits)\n\n\ndef is_sqrt_iswap_compatible(op: 'cirq.Operation') -> bool:\n \"\"\"Check if the given operation is compatible with the sqrt_iswap gateset\n gate set.\n\n Args:\n op: Input operation.\n\n Returns:\n True if the operation is native to the gate set, false otherwise.\n \"\"\"\n return is_basic_gate(op.gate) or is_sqrt_iswap(op.gate)\n\n\ndef is_sqrt_iswap(gate: Optional['cirq.Gate']) -> bool:\n \"\"\"Checks if this is a \u00b1 sqrt(iSWAP) gate specified using either\n ISwapPowGate or with the equivalent FSimGate.\n \"\"\"\n if (isinstance(gate, ops.FSimGate) and\n not isinstance(gate.theta, sympy.Basic) and\n _near_mod_2pi(abs(gate.theta), np.pi / 4) and\n _near_mod_2pi(gate.phi, 0)):\n return True\n return (isinstance(gate, ops.ISwapPowGate) and\n not isinstance(gate.exponent, sympy.Basic) and\n _near_mod_n(abs(gate.exponent), 0.5, 4))\n\n\ndef is_basic_gate(gate: Optional['cirq.Gate']) -> bool:\n \"\"\"Check if a gate is a basic supported one-qubit gate.\n\n Args:\n gate: Input gate.\n\n Returns:\n True if the gate is native to the gate set, false otherwise.\n \"\"\"\n return isinstance(gate, (ops.MeasurementGate, ops.PhasedXPowGate,\n ops.XPowGate, ops.YPowGate, ops.ZPowGate))\n\n\ndef cphase_to_sqrt_iswap(a, b, turns):\n \"\"\"Implement a C-Phase gate using two sqrt ISWAP gates and single-qubit\n operations. The circuit is equivalent to cirq.CZPowGate(exponent=turns).\n\n Output unitary:\n [1 0 0 0],\n [0 1 0 0],\n [0 0 1 0],\n [0 0 0 e^{i turns pi}].\n\n Args:\n a: the first qubit\n b: the second qubit\n turns: Exponent specifying the evolution time in number of rotations.\n \"\"\"\n theta = (turns % 2) * np.pi\n if 0 <= theta <= np.pi:\n sign = 1.\n theta_prime = theta\n elif np.pi < theta < 2 * np.pi:\n sign = -1.\n theta_prime = 2 * np.pi - theta\n\n if np.isclose(theta, np.pi):\n # If we are close to pi, just set values manually to avoid possible\n # numerical errors with arcsin of greater than 1.0 (Ahem, Windows).\n phi = np.pi / 2\n xi = np.pi / 2\n else:\n phi = np.arcsin(np.sqrt(2) * np.sin(theta_prime / 4))\n xi = np.arctan(np.tan(phi) / np.sqrt(2))\n\n yield ops.rz(sign * 0.5 * theta_prime).on(a)\n yield ops.rz(sign * 0.5 * theta_prime).on(b)\n yield ops.rx(xi).on(a)\n yield ops.X(b)**(-sign * 0.5)\n yield SQRT_ISWAP_INV(a, b)\n yield ops.rx(-2 * phi).on(a)\n yield SQRT_ISWAP(a, b)\n\n yield ops.rx(xi).on(a)\n yield ops.X(b)**(sign * 0.5)\n # Corrects global phase\n yield ops.GlobalPhaseOperation(np.exp(sign * theta_prime * 0.25j))\n\n\ndef cphase_symbols_to_sqrt_iswap(a, b, turns):\n \"\"\"Version of cphase_to_sqrt_iswap that works with symbols.\n\n Note that the formulae contained below will need to be flattened\n into a sweep before serializing.\n \"\"\"\n theta = sympy.Mod(turns, 2.0) * sympy.pi\n\n # -1 if theta > pi. Adds a hacky fudge factor so theta=pi is not 0\n sign = sympy.sign(sympy.pi - theta + 1e-9)\n\n # For sign = 1: theta. For sign = -1, 2pi-theta\n theta_prime = (sympy.pi - sign * sympy.pi) + sign * theta\n\n phi = sympy.asin(np.sqrt(2) * sympy.sin(theta_prime / 4))\n xi = sympy.atan(sympy.tan(phi) / np.sqrt(2))\n\n yield ops.rz(sign * 0.5 * theta_prime).on(a)\n yield ops.rz(sign * 0.5 * theta_prime).on(b)\n yield ops.rx(xi).on(a)\n yield ops.X(b)**(-sign * 0.5)\n yield SQRT_ISWAP_INV(a, b)\n yield ops.rx(-2 * phi).on(a)\n yield SQRT_ISWAP(a, b)\n yield ops.rx(xi).on(a)\n yield ops.X(b)**(sign * 0.5)\n\n\ndef iswap_to_sqrt_iswap(a, b, turns):\n \"\"\"Implement the evolution of the hopping term using two sqrt_iswap gates\n and single-qubit operations. Output unitary:\n [1 0 0 0],\n [0 c is 0],\n [0 is c 0],\n [0 0 0 1],\n where c = cos(t * np.pi / 2) and s = sin(t * np.pi / 2).\n\n Args:\n a: the first qubit\n b: the second qubit\n t: Exponent that specifies the evolution time in number of rotations.\n \"\"\"\n yield ops.Z(a)**0.75\n yield ops.Z(b)**0.25\n yield SQRT_ISWAP_INV(a, b)\n yield ops.Z(a)**(-turns / 2 + 1)\n yield ops.Z(b)**(turns / 2)\n yield SQRT_ISWAP_INV(a, b)\n yield ops.Z(a)**0.25\n yield ops.Z(b)**-0.25\n\n\ndef swap_to_sqrt_iswap(a, b, turns):\n \"\"\"Implement the evolution of the hopping term using two sqrt_iswap gates\n and single-qubit operations. Output unitary:\n [[1, 0, 0, 0],\n [0, g\u00b7c, -i\u00b7g\u00b7s, 0],\n [0, -i\u00b7g\u00b7s, g\u00b7c, 0],\n [0, 0, 0, 1]]\n where c = cos(theta) and s = sin(theta).\n Args:\n a: the first qubit\n b: the second qubit\n theta: The rotational angle that specifies the gate, where\n c = cos(\u03c0\u00b7t/2), s = sin(\u03c0\u00b7t/2), g = exp(i\u00b7\u03c0\u00b7t/2).\n \"\"\"\n if not isinstance(turns, sympy.Basic) and _near_mod_n(turns, 1.0, 2):\n # Decomposition for cirq.SWAP\n yield ops.Y(a)**0.5\n yield ops.Y(b)**0.5\n yield SQRT_ISWAP(a, b)\n yield ops.Y(a)**-0.5\n yield ops.Y(b)**-0.5\n yield SQRT_ISWAP(a, b)\n yield ops.X(a)**-0.5\n yield ops.X(b)**-0.5\n yield SQRT_ISWAP(a, b)\n yield ops.X(a)**0.5\n yield ops.X(b)**0.5\n return\n\n yield ops.Z(a)**1.25\n yield ops.Z(b)**-0.25\n yield ops.ISWAP(a, b)**-0.5\n yield ops.Z(a)**(-turns / 2 + 1)\n yield ops.Z(b)**(turns / 2)\n yield ops.ISWAP(a, b)**-0.5\n yield ops.Z(a)**(turns / 2 - 0.25)\n yield ops.Z(b)**(turns / 2 + 0.25)\n yield ops.CZ.on(a, b)**(-turns)\n\n\ndef fsim_gate(a, b, theta, phi):\n \"\"\"FSimGate has a default decomposition in cirq to XXPowGate and YYPowGate,\n which is an awkward decomposition for this gate set.\n Decompose into ISWAP and CZ instead.\"\"\"\n if theta != 0.0:\n yield ops.ISWAP(a, b)**(-2 * theta / np.pi)\n if phi != 0.0:\n yield ops.CZPowGate(exponent=-phi / np.pi)(a, b)\n", "path": "cirq/google/optimizers/convert_to_sqrt_iswap.py"}]} | 4,055 | 337 |
gh_patches_debug_22001 | rasdani/github-patches | git_diff | nautobot__nautobot-5264 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Job Buttons do not commit changes in their job.
<!--
NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.
This form is only for reporting reproducible bugs. If you need assistance
with Nautobot installation, or if you have a general question, please start a
discussion instead: https://github.com/nautobot/nautobot/discussions
Please describe the environment in which you are running Nautobot. Be sure
that you are running an unmodified instance of the latest stable release
before submitting a bug report, and that any plugins have been disabled.
-->
### Environment
* Nautobot version (Docker tag too if applicable): 1.6.11
* Python version: 3.11
* Database platform, version: MySQL
* Middleware(s): NA
<!--
Describe in detail the exact steps that someone else can take to reproduce
this bug using the current stable release of Nautobot. Begin with the
creation of any necessary database objects and call out every operation
being performed explicitly. If reporting a bug in the REST API, be sure to
reconstruct the raw HTTP request(s) being made: Don't rely on a client
library such as pynautobot.
-->
### Steps to Reproduce
1. Create JobButtonReciever, set it to commit by default.
2. Create a Job Button and assign to a ContentType.
3. Click the Job Button on the assigned Content Type.
<!-- What did you expect to happen? -->
### Expected Behavior
The job runs and commits the changes.
<!-- What happened instead? -->
### Observed Behavior
The Job runs and the database changes are reverted.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nautobot/extras/templatetags/job_buttons.py`
Content:
```
1 from collections import OrderedDict
2
3 from django import template
4 from django.contrib.contenttypes.models import ContentType
5 from django.urls import reverse
6 from django.utils.html import format_html
7 from django.utils.safestring import mark_safe
8
9 from nautobot.extras.models import Job, JobButton
10 from nautobot.utilities.utils import render_jinja2
11
12
13 register = template.Library()
14
15 GROUP_DROPDOWN = """
16 <div class="btn-group">
17 <button type="button" class="btn btn-sm btn-{group_button_class} dropdown-toggle" data-toggle="dropdown">
18 {group_name} <span class="caret"></span>
19 </button>
20 <ul class="dropdown-menu pull-right">
21 {grouped_buttons}
22 </ul>
23 </div>
24 """
25
26 HIDDEN_INPUTS = """
27 <input type="hidden" name="csrfmiddlewaretoken" value="{csrf_token}">
28 <input type="hidden" name="object_pk" value="{object_pk}">
29 <input type="hidden" name="object_model_name" value="{object_model_name}">
30 <input type="hidden" name="_schedule_type" value="immediately">
31 <input type="hidden" name="_return_url" value="{redirect_path}">
32 """
33
34 NO_CONFIRM_BUTTON = """
35 <button type="submit" form="form_id_{button_id}" class="btn btn-sm btn-{button_class}" {disabled}>{button_text}</button>
36 """
37
38 NO_CONFIRM_FORM = """
39 <form id="form_id_{button_id}" action="{button_url}" method="post" class="form">
40 {hidden_inputs}
41 </form>
42 """
43
44 CONFIRM_BUTTON = """
45 <button type="button" class="btn btn-sm btn-{button_class}" data-toggle="modal" data-target="#confirm_modal_id_{button_id}" {disabled}>
46 {button_text}
47 </button>
48 """
49
50 CONFIRM_MODAL = """
51 <div class="modal fade" id="confirm_modal_id_{button_id}" tabindex="-1" role="dialog" aria-labelledby="confirm_modal_label_{button_id}">
52 <div class="modal-dialog" role="document">
53 <div class="modal-content">
54 <div class="modal-header">
55 <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">×</span></button>
56 <h4 class="modal-title" id="confirm_modal_label_{button_id}">Confirmation</h4>
57 </div>
58 <form id="form_id_{button_id}" action="{button_url}" method="post" class="form">
59 <div class="modal-body">
60 {hidden_inputs}
61 Run Job <strong>'{job}'</strong> with object <strong>'{object}'</strong>?
62 </div>
63 <div class="modal-footer">
64 <button type="button" class="btn btn-default" data-dismiss="modal">Cancel</button>
65 <button type="submit" class="btn btn-primary">Confirm</button>
66 </div>
67 </form>
68 </div>
69 </div>
70 </div>
71 """
72
73 SAFE_EMPTY_STR = mark_safe("") # noqa: S308
74
75
76 def _render_job_button_for_obj(job_button, obj, context, content_type):
77 """
78 Helper method for job_buttons templatetag to reduce repetition of code.
79
80 Returns:
81 (str, str): (button_html, form_html)
82 """
83 # Pass select context data when rendering the JobButton text as Jinja2
84 button_context = {
85 "obj": obj,
86 "debug": context.get("debug", False), # django.template.context_processors.debug
87 "request": context["request"], # django.template.context_processors.request
88 "user": context["user"], # django.contrib.auth.context_processors.auth
89 "perms": context["perms"], # django.contrib.auth.context_processors.auth
90 }
91 try:
92 text_rendered = render_jinja2(job_button.text, button_context)
93 except Exception as exc:
94 return (
95 format_html(
96 '<a class="btn btn-sm btn-{}" disabled="disabled" title="{}"><i class="mdi mdi-alert"></i> {}</a>\n',
97 "default" if not job_button.group_name else "link",
98 exc,
99 job_button.name,
100 ),
101 SAFE_EMPTY_STR,
102 )
103
104 if not text_rendered:
105 return (SAFE_EMPTY_STR, SAFE_EMPTY_STR)
106
107 # Disable buttons if the user doesn't have permission to run the underlying Job.
108 has_run_perm = Job.objects.check_perms(context["user"], instance=job_button.job, action="run")
109 hidden_inputs = format_html(
110 HIDDEN_INPUTS,
111 csrf_token=context["csrf_token"],
112 object_pk=obj.pk,
113 object_model_name=f"{content_type.app_label}.{content_type.model}",
114 redirect_path=context["request"].path,
115 )
116 template_args = {
117 "button_id": job_button.pk,
118 "button_text": text_rendered,
119 "button_class": job_button.button_class if not job_button.group_name else "link",
120 "button_url": reverse("extras:job_run", kwargs={"slug": job_button.job.slug}),
121 "object": obj,
122 "job": job_button.job,
123 "hidden_inputs": hidden_inputs,
124 "disabled": "" if has_run_perm else "disabled",
125 }
126
127 if job_button.confirmation:
128 return (
129 format_html(CONFIRM_BUTTON, **template_args),
130 format_html(CONFIRM_MODAL, **template_args),
131 )
132 else:
133 return (
134 format_html(NO_CONFIRM_BUTTON, **template_args),
135 format_html(NO_CONFIRM_FORM, **template_args),
136 )
137
138
139 @register.simple_tag(takes_context=True)
140 def job_buttons(context, obj):
141 """
142 Render all applicable job buttons for the given object.
143 """
144 content_type = ContentType.objects.get_for_model(obj)
145 # We will enforce "run" permission later in deciding which buttons to show as disabled.
146 buttons = JobButton.objects.filter(content_types=content_type)
147 if not buttons:
148 return SAFE_EMPTY_STR
149
150 buttons_html = forms_html = SAFE_EMPTY_STR
151 group_names = OrderedDict()
152
153 for jb in buttons:
154 # Organize job buttons by group for later processing
155 if jb.group_name:
156 group_names.setdefault(jb.group_name, []).append(jb)
157
158 # Render and add non-grouped buttons
159 else:
160 button_html, form_html = _render_job_button_for_obj(jb, obj, context, content_type)
161 buttons_html += button_html
162 forms_html += form_html
163
164 # Add grouped buttons to template
165 for group_name, buttons in group_names.items():
166 group_button_class = buttons[0].button_class
167
168 buttons_rendered = SAFE_EMPTY_STR
169
170 for jb in buttons:
171 # Render grouped buttons as list items
172 button_html, form_html = _render_job_button_for_obj(jb, obj, context, content_type)
173 buttons_rendered += format_html("<li>{}</li>", button_html)
174 forms_html += form_html
175
176 if buttons_rendered:
177 buttons_html += format_html(
178 GROUP_DROPDOWN,
179 group_button_class=group_button_class,
180 group_name=group_name,
181 grouped_buttons=buttons_rendered,
182 )
183
184 # We want all of the buttons first and then any modals and forms so the buttons render properly
185 return buttons_html + forms_html
186
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nautobot/extras/templatetags/job_buttons.py b/nautobot/extras/templatetags/job_buttons.py
--- a/nautobot/extras/templatetags/job_buttons.py
+++ b/nautobot/extras/templatetags/job_buttons.py
@@ -29,6 +29,7 @@
<input type="hidden" name="object_model_name" value="{object_model_name}">
<input type="hidden" name="_schedule_type" value="immediately">
<input type="hidden" name="_return_url" value="{redirect_path}">
+<input type="hidden" name="_commit" value="on">
"""
NO_CONFIRM_BUTTON = """
@@ -170,8 +171,10 @@
for jb in buttons:
# Render grouped buttons as list items
button_html, form_html = _render_job_button_for_obj(jb, obj, context, content_type)
- buttons_rendered += format_html("<li>{}</li>", button_html)
- forms_html += form_html
+ if button_html:
+ buttons_rendered += format_html("<li>{}</li>", button_html)
+ if form_html:
+ forms_html += form_html
if buttons_rendered:
buttons_html += format_html(
| {"golden_diff": "diff --git a/nautobot/extras/templatetags/job_buttons.py b/nautobot/extras/templatetags/job_buttons.py\n--- a/nautobot/extras/templatetags/job_buttons.py\n+++ b/nautobot/extras/templatetags/job_buttons.py\n@@ -29,6 +29,7 @@\n <input type=\"hidden\" name=\"object_model_name\" value=\"{object_model_name}\">\n <input type=\"hidden\" name=\"_schedule_type\" value=\"immediately\">\n <input type=\"hidden\" name=\"_return_url\" value=\"{redirect_path}\">\n+<input type=\"hidden\" name=\"_commit\" value=\"on\">\n \"\"\"\n \n NO_CONFIRM_BUTTON = \"\"\"\n@@ -170,8 +171,10 @@\n for jb in buttons:\n # Render grouped buttons as list items\n button_html, form_html = _render_job_button_for_obj(jb, obj, context, content_type)\n- buttons_rendered += format_html(\"<li>{}</li>\", button_html)\n- forms_html += form_html\n+ if button_html:\n+ buttons_rendered += format_html(\"<li>{}</li>\", button_html)\n+ if form_html:\n+ forms_html += form_html\n \n if buttons_rendered:\n buttons_html += format_html(\n", "issue": "Job Buttons do not commit changes in their job.\n<!--\r\n NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.\r\n\r\n This form is only for reporting reproducible bugs. If you need assistance\r\n with Nautobot installation, or if you have a general question, please start a\r\n discussion instead: https://github.com/nautobot/nautobot/discussions\r\n\r\n Please describe the environment in which you are running Nautobot. Be sure\r\n that you are running an unmodified instance of the latest stable release\r\n before submitting a bug report, and that any plugins have been disabled.\r\n-->\r\n### Environment\r\n* Nautobot version (Docker tag too if applicable): 1.6.11\r\n* Python version: 3.11\r\n* Database platform, version: MySQL\r\n* Middleware(s): NA\r\n\r\n<!--\r\n Describe in detail the exact steps that someone else can take to reproduce\r\n this bug using the current stable release of Nautobot. Begin with the\r\n creation of any necessary database objects and call out every operation\r\n being performed explicitly. If reporting a bug in the REST API, be sure to\r\n reconstruct the raw HTTP request(s) being made: Don't rely on a client\r\n library such as pynautobot.\r\n-->\r\n### Steps to Reproduce\r\n1. Create JobButtonReciever, set it to commit by default.\r\n2. Create a Job Button and assign to a ContentType.\r\n3. Click the Job Button on the assigned Content Type.\r\n\r\n<!-- What did you expect to happen? -->\r\n### Expected Behavior\r\n\r\nThe job runs and commits the changes.\r\n\r\n<!-- What happened instead? -->\r\n### Observed Behavior\r\n\r\nThe Job runs and the database changes are reverted.\r\n\r\n\n", "before_files": [{"content": "from collections import OrderedDict\n\nfrom django import template\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.urls import reverse\nfrom django.utils.html import format_html\nfrom django.utils.safestring import mark_safe\n\nfrom nautobot.extras.models import Job, JobButton\nfrom nautobot.utilities.utils import render_jinja2\n\n\nregister = template.Library()\n\nGROUP_DROPDOWN = \"\"\"\n<div class=\"btn-group\">\n <button type=\"button\" class=\"btn btn-sm btn-{group_button_class} dropdown-toggle\" data-toggle=\"dropdown\">\n {group_name} <span class=\"caret\"></span>\n </button>\n <ul class=\"dropdown-menu pull-right\">\n {grouped_buttons}\n </ul>\n</div>\n\"\"\"\n\nHIDDEN_INPUTS = \"\"\"\n<input type=\"hidden\" name=\"csrfmiddlewaretoken\" value=\"{csrf_token}\">\n<input type=\"hidden\" name=\"object_pk\" value=\"{object_pk}\">\n<input type=\"hidden\" name=\"object_model_name\" value=\"{object_model_name}\">\n<input type=\"hidden\" name=\"_schedule_type\" value=\"immediately\">\n<input type=\"hidden\" name=\"_return_url\" value=\"{redirect_path}\">\n\"\"\"\n\nNO_CONFIRM_BUTTON = \"\"\"\n<button type=\"submit\" form=\"form_id_{button_id}\" class=\"btn btn-sm btn-{button_class}\" {disabled}>{button_text}</button>\n\"\"\"\n\nNO_CONFIRM_FORM = \"\"\"\n<form id=\"form_id_{button_id}\" action=\"{button_url}\" method=\"post\" class=\"form\">\n {hidden_inputs}\n</form>\n\"\"\"\n\nCONFIRM_BUTTON = \"\"\"\n<button type=\"button\" class=\"btn btn-sm btn-{button_class}\" data-toggle=\"modal\" data-target=\"#confirm_modal_id_{button_id}\" {disabled}>\n {button_text}\n</button>\n\"\"\"\n\nCONFIRM_MODAL = \"\"\"\n<div class=\"modal fade\" id=\"confirm_modal_id_{button_id}\" tabindex=\"-1\" role=\"dialog\" aria-labelledby=\"confirm_modal_label_{button_id}\">\n <div class=\"modal-dialog\" role=\"document\">\n <div class=\"modal-content\">\n <div class=\"modal-header\">\n <button type=\"button\" class=\"close\" data-dismiss=\"modal\" aria-label=\"Close\"><span aria-hidden=\"true\">×</span></button>\n <h4 class=\"modal-title\" id=\"confirm_modal_label_{button_id}\">Confirmation</h4>\n </div>\n <form id=\"form_id_{button_id}\" action=\"{button_url}\" method=\"post\" class=\"form\">\n <div class=\"modal-body\">\n {hidden_inputs}\n Run Job <strong>'{job}'</strong> with object <strong>'{object}'</strong>?\n </div>\n <div class=\"modal-footer\">\n <button type=\"button\" class=\"btn btn-default\" data-dismiss=\"modal\">Cancel</button>\n <button type=\"submit\" class=\"btn btn-primary\">Confirm</button>\n </div>\n </form>\n </div>\n </div>\n</div>\n\"\"\"\n\nSAFE_EMPTY_STR = mark_safe(\"\") # noqa: S308\n\n\ndef _render_job_button_for_obj(job_button, obj, context, content_type):\n \"\"\"\n Helper method for job_buttons templatetag to reduce repetition of code.\n\n Returns:\n (str, str): (button_html, form_html)\n \"\"\"\n # Pass select context data when rendering the JobButton text as Jinja2\n button_context = {\n \"obj\": obj,\n \"debug\": context.get(\"debug\", False), # django.template.context_processors.debug\n \"request\": context[\"request\"], # django.template.context_processors.request\n \"user\": context[\"user\"], # django.contrib.auth.context_processors.auth\n \"perms\": context[\"perms\"], # django.contrib.auth.context_processors.auth\n }\n try:\n text_rendered = render_jinja2(job_button.text, button_context)\n except Exception as exc:\n return (\n format_html(\n '<a class=\"btn btn-sm btn-{}\" disabled=\"disabled\" title=\"{}\"><i class=\"mdi mdi-alert\"></i> {}</a>\\n',\n \"default\" if not job_button.group_name else \"link\",\n exc,\n job_button.name,\n ),\n SAFE_EMPTY_STR,\n )\n\n if not text_rendered:\n return (SAFE_EMPTY_STR, SAFE_EMPTY_STR)\n\n # Disable buttons if the user doesn't have permission to run the underlying Job.\n has_run_perm = Job.objects.check_perms(context[\"user\"], instance=job_button.job, action=\"run\")\n hidden_inputs = format_html(\n HIDDEN_INPUTS,\n csrf_token=context[\"csrf_token\"],\n object_pk=obj.pk,\n object_model_name=f\"{content_type.app_label}.{content_type.model}\",\n redirect_path=context[\"request\"].path,\n )\n template_args = {\n \"button_id\": job_button.pk,\n \"button_text\": text_rendered,\n \"button_class\": job_button.button_class if not job_button.group_name else \"link\",\n \"button_url\": reverse(\"extras:job_run\", kwargs={\"slug\": job_button.job.slug}),\n \"object\": obj,\n \"job\": job_button.job,\n \"hidden_inputs\": hidden_inputs,\n \"disabled\": \"\" if has_run_perm else \"disabled\",\n }\n\n if job_button.confirmation:\n return (\n format_html(CONFIRM_BUTTON, **template_args),\n format_html(CONFIRM_MODAL, **template_args),\n )\n else:\n return (\n format_html(NO_CONFIRM_BUTTON, **template_args),\n format_html(NO_CONFIRM_FORM, **template_args),\n )\n\n\[email protected]_tag(takes_context=True)\ndef job_buttons(context, obj):\n \"\"\"\n Render all applicable job buttons for the given object.\n \"\"\"\n content_type = ContentType.objects.get_for_model(obj)\n # We will enforce \"run\" permission later in deciding which buttons to show as disabled.\n buttons = JobButton.objects.filter(content_types=content_type)\n if not buttons:\n return SAFE_EMPTY_STR\n\n buttons_html = forms_html = SAFE_EMPTY_STR\n group_names = OrderedDict()\n\n for jb in buttons:\n # Organize job buttons by group for later processing\n if jb.group_name:\n group_names.setdefault(jb.group_name, []).append(jb)\n\n # Render and add non-grouped buttons\n else:\n button_html, form_html = _render_job_button_for_obj(jb, obj, context, content_type)\n buttons_html += button_html\n forms_html += form_html\n\n # Add grouped buttons to template\n for group_name, buttons in group_names.items():\n group_button_class = buttons[0].button_class\n\n buttons_rendered = SAFE_EMPTY_STR\n\n for jb in buttons:\n # Render grouped buttons as list items\n button_html, form_html = _render_job_button_for_obj(jb, obj, context, content_type)\n buttons_rendered += format_html(\"<li>{}</li>\", button_html)\n forms_html += form_html\n\n if buttons_rendered:\n buttons_html += format_html(\n GROUP_DROPDOWN,\n group_button_class=group_button_class,\n group_name=group_name,\n grouped_buttons=buttons_rendered,\n )\n\n # We want all of the buttons first and then any modals and forms so the buttons render properly\n return buttons_html + forms_html\n", "path": "nautobot/extras/templatetags/job_buttons.py"}], "after_files": [{"content": "from collections import OrderedDict\n\nfrom django import template\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.urls import reverse\nfrom django.utils.html import format_html\nfrom django.utils.safestring import mark_safe\n\nfrom nautobot.extras.models import Job, JobButton\nfrom nautobot.utilities.utils import render_jinja2\n\n\nregister = template.Library()\n\nGROUP_DROPDOWN = \"\"\"\n<div class=\"btn-group\">\n <button type=\"button\" class=\"btn btn-sm btn-{group_button_class} dropdown-toggle\" data-toggle=\"dropdown\">\n {group_name} <span class=\"caret\"></span>\n </button>\n <ul class=\"dropdown-menu pull-right\">\n {grouped_buttons}\n </ul>\n</div>\n\"\"\"\n\nHIDDEN_INPUTS = \"\"\"\n<input type=\"hidden\" name=\"csrfmiddlewaretoken\" value=\"{csrf_token}\">\n<input type=\"hidden\" name=\"object_pk\" value=\"{object_pk}\">\n<input type=\"hidden\" name=\"object_model_name\" value=\"{object_model_name}\">\n<input type=\"hidden\" name=\"_schedule_type\" value=\"immediately\">\n<input type=\"hidden\" name=\"_return_url\" value=\"{redirect_path}\">\n<input type=\"hidden\" name=\"_commit\" value=\"on\">\n\"\"\"\n\nNO_CONFIRM_BUTTON = \"\"\"\n<button type=\"submit\" form=\"form_id_{button_id}\" class=\"btn btn-sm btn-{button_class}\" {disabled}>{button_text}</button>\n\"\"\"\n\nNO_CONFIRM_FORM = \"\"\"\n<form id=\"form_id_{button_id}\" action=\"{button_url}\" method=\"post\" class=\"form\">\n {hidden_inputs}\n</form>\n\"\"\"\n\nCONFIRM_BUTTON = \"\"\"\n<button type=\"button\" class=\"btn btn-sm btn-{button_class}\" data-toggle=\"modal\" data-target=\"#confirm_modal_id_{button_id}\" {disabled}>\n {button_text}\n</button>\n\"\"\"\n\nCONFIRM_MODAL = \"\"\"\n<div class=\"modal fade\" id=\"confirm_modal_id_{button_id}\" tabindex=\"-1\" role=\"dialog\" aria-labelledby=\"confirm_modal_label_{button_id}\">\n <div class=\"modal-dialog\" role=\"document\">\n <div class=\"modal-content\">\n <div class=\"modal-header\">\n <button type=\"button\" class=\"close\" data-dismiss=\"modal\" aria-label=\"Close\"><span aria-hidden=\"true\">×</span></button>\n <h4 class=\"modal-title\" id=\"confirm_modal_label_{button_id}\">Confirmation</h4>\n </div>\n <form id=\"form_id_{button_id}\" action=\"{button_url}\" method=\"post\" class=\"form\">\n <div class=\"modal-body\">\n {hidden_inputs}\n Run Job <strong>'{job}'</strong> with object <strong>'{object}'</strong>?\n </div>\n <div class=\"modal-footer\">\n <button type=\"button\" class=\"btn btn-default\" data-dismiss=\"modal\">Cancel</button>\n <button type=\"submit\" class=\"btn btn-primary\">Confirm</button>\n </div>\n </form>\n </div>\n </div>\n</div>\n\"\"\"\n\nSAFE_EMPTY_STR = mark_safe(\"\") # noqa: S308\n\n\ndef _render_job_button_for_obj(job_button, obj, context, content_type):\n \"\"\"\n Helper method for job_buttons templatetag to reduce repetition of code.\n\n Returns:\n (str, str): (button_html, form_html)\n \"\"\"\n # Pass select context data when rendering the JobButton text as Jinja2\n button_context = {\n \"obj\": obj,\n \"debug\": context.get(\"debug\", False), # django.template.context_processors.debug\n \"request\": context[\"request\"], # django.template.context_processors.request\n \"user\": context[\"user\"], # django.contrib.auth.context_processors.auth\n \"perms\": context[\"perms\"], # django.contrib.auth.context_processors.auth\n }\n try:\n text_rendered = render_jinja2(job_button.text, button_context)\n except Exception as exc:\n return (\n format_html(\n '<a class=\"btn btn-sm btn-{}\" disabled=\"disabled\" title=\"{}\"><i class=\"mdi mdi-alert\"></i> {}</a>\\n',\n \"default\" if not job_button.group_name else \"link\",\n exc,\n job_button.name,\n ),\n SAFE_EMPTY_STR,\n )\n\n if not text_rendered:\n return (SAFE_EMPTY_STR, SAFE_EMPTY_STR)\n\n # Disable buttons if the user doesn't have permission to run the underlying Job.\n has_run_perm = Job.objects.check_perms(context[\"user\"], instance=job_button.job, action=\"run\")\n hidden_inputs = format_html(\n HIDDEN_INPUTS,\n csrf_token=context[\"csrf_token\"],\n object_pk=obj.pk,\n object_model_name=f\"{content_type.app_label}.{content_type.model}\",\n redirect_path=context[\"request\"].path,\n )\n template_args = {\n \"button_id\": job_button.pk,\n \"button_text\": text_rendered,\n \"button_class\": job_button.button_class if not job_button.group_name else \"link\",\n \"button_url\": reverse(\"extras:job_run\", kwargs={\"slug\": job_button.job.slug}),\n \"object\": obj,\n \"job\": job_button.job,\n \"hidden_inputs\": hidden_inputs,\n \"disabled\": \"\" if has_run_perm else \"disabled\",\n }\n\n if job_button.confirmation:\n return (\n format_html(CONFIRM_BUTTON, **template_args),\n format_html(CONFIRM_MODAL, **template_args),\n )\n else:\n return (\n format_html(NO_CONFIRM_BUTTON, **template_args),\n format_html(NO_CONFIRM_FORM, **template_args),\n )\n\n\[email protected]_tag(takes_context=True)\ndef job_buttons(context, obj):\n \"\"\"\n Render all applicable job buttons for the given object.\n \"\"\"\n content_type = ContentType.objects.get_for_model(obj)\n # We will enforce \"run\" permission later in deciding which buttons to show as disabled.\n buttons = JobButton.objects.filter(content_types=content_type)\n if not buttons:\n return SAFE_EMPTY_STR\n\n buttons_html = forms_html = SAFE_EMPTY_STR\n group_names = OrderedDict()\n\n for jb in buttons:\n # Organize job buttons by group for later processing\n if jb.group_name:\n group_names.setdefault(jb.group_name, []).append(jb)\n\n # Render and add non-grouped buttons\n else:\n button_html, form_html = _render_job_button_for_obj(jb, obj, context, content_type)\n buttons_html += button_html\n forms_html += form_html\n\n # Add grouped buttons to template\n for group_name, buttons in group_names.items():\n group_button_class = buttons[0].button_class\n\n buttons_rendered = SAFE_EMPTY_STR\n\n for jb in buttons:\n # Render grouped buttons as list items\n button_html, form_html = _render_job_button_for_obj(jb, obj, context, content_type)\n if button_html:\n buttons_rendered += format_html(\"<li>{}</li>\", button_html)\n if form_html:\n forms_html += form_html\n\n if buttons_rendered:\n buttons_html += format_html(\n GROUP_DROPDOWN,\n group_button_class=group_button_class,\n group_name=group_name,\n grouped_buttons=buttons_rendered,\n )\n\n # We want all of the buttons first and then any modals and forms so the buttons render properly\n return buttons_html + forms_html\n", "path": "nautobot/extras/templatetags/job_buttons.py"}]} | 2,691 | 276 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.