problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.1k
25.4k
golden_diff
stringlengths
145
5.13k
verification_info
stringlengths
582
39.1k
num_tokens
int64
271
4.1k
num_tokens_diff
int64
47
1.02k
gh_patches_debug_11324
rasdani/github-patches
git_diff
spack__spack-5006
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Issue while building mpiP Trying to build this on ubuntu desktop gives (config.log): ``` configure:4289: /home/guest/workarena/softwares/sources/spack/lib/spack/env/gcc/gcc -c conftest.c >&5 conftest.c:11:17: fatal error: mpi.h: No such file or directory compilation terminated. configure:4289: $? = 1 configure: failed program was: | /* confdefs.h */ | #define PACKAGE_NAME "mpiP" | #define PACKAGE_TARNAME "mpip" | #define PACKAGE_VERSION "3.3" | #define PACKAGE_STRING "mpiP 3.3" | #define PACKAGE_BUGREPORT "[email protected]" | #define PACKAGE_URL "" | #define DEFAULT_REPORT_FORMAT mpiPi_style_verbose | #define HAVE_LIBM 1 | /* end confdefs.h. */ | #include "mpi.h" | | int | main () | { | #ifndef MPI_Init | #ifdef __cplusplus | (void) MPI_Init; | #else | (void) MPI_Init; | #endif | #endif | | ; | return 0; | } configure:4289: result: no configure:4294: error: "Failed to find declaration for MPI_Init!" ``` I am creating PR with additional `--with-cc` option to configure but then seeing next error: ``` File "/home/guest/workarena/softwares/sources/spack/var/spack/stage/mpip-3.4.1-x7l5jk256ayuuirddcxdpbpytlnis3hq/mpiP-3.4.1/make-wrappers.py", line 712, in StandardFileHeader olist = StandardFileHeader(sname) File "/home/guest/workarena/softwares/sources/spack/var/spack/stage/mpip-3.4.1-x7l5jk256ayuuirddcxdpbpytlnis3hq/mpiP-3.4.1/make-wrappers.py", line 712, in StandardFileHeader olist.append("/* Creator: " + os.environ["LOGNAME"] + " */\n") File "/usr/lib/python2.7/UserDict.py", line 40, in __getitem__ olist.append("/* Creator: " + os.environ["LOGNAME"] + " */\n") raise KeyError(key) File "/usr/lib/python2.7/UserDict.py", line 40, in __getitem__ KeyError: 'LOGNAME' raise KeyError(key) ``` Issue while building mpiP Trying to build this on ubuntu desktop gives (config.log): ``` configure:4289: /home/guest/workarena/softwares/sources/spack/lib/spack/env/gcc/gcc -c conftest.c >&5 conftest.c:11:17: fatal error: mpi.h: No such file or directory compilation terminated. configure:4289: $? = 1 configure: failed program was: | /* confdefs.h */ | #define PACKAGE_NAME "mpiP" | #define PACKAGE_TARNAME "mpip" | #define PACKAGE_VERSION "3.3" | #define PACKAGE_STRING "mpiP 3.3" | #define PACKAGE_BUGREPORT "[email protected]" | #define PACKAGE_URL "" | #define DEFAULT_REPORT_FORMAT mpiPi_style_verbose | #define HAVE_LIBM 1 | /* end confdefs.h. */ | #include "mpi.h" | | int | main () | { | #ifndef MPI_Init | #ifdef __cplusplus | (void) MPI_Init; | #else | (void) MPI_Init; | #endif | #endif | | ; | return 0; | } configure:4289: result: no configure:4294: error: "Failed to find declaration for MPI_Init!" ``` I am creating PR with additional `--with-cc` option to configure but then seeing next error: ``` File "/home/guest/workarena/softwares/sources/spack/var/spack/stage/mpip-3.4.1-x7l5jk256ayuuirddcxdpbpytlnis3hq/mpiP-3.4.1/make-wrappers.py", line 712, in StandardFileHeader olist = StandardFileHeader(sname) File "/home/guest/workarena/softwares/sources/spack/var/spack/stage/mpip-3.4.1-x7l5jk256ayuuirddcxdpbpytlnis3hq/mpiP-3.4.1/make-wrappers.py", line 712, in StandardFileHeader olist.append("/* Creator: " + os.environ["LOGNAME"] + " */\n") File "/usr/lib/python2.7/UserDict.py", line 40, in __getitem__ olist.append("/* Creator: " + os.environ["LOGNAME"] + " */\n") raise KeyError(key) File "/usr/lib/python2.7/UserDict.py", line 40, in __getitem__ KeyError: 'LOGNAME' raise KeyError(key) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `var/spack/repos/builtin/packages/mpip/package.py` Content: ``` 1 ############################################################################## 2 # Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. 3 # Produced at the Lawrence Livermore National Laboratory. 4 # 5 # This file is part of Spack. 6 # Created by Todd Gamblin, [email protected], All rights reserved. 7 # LLNL-CODE-647188 8 # 9 # For details, see https://github.com/llnl/spack 10 # Please also see the NOTICE and LICENSE files for our notice and the LGPL. 11 # 12 # This program is free software; you can redistribute it and/or modify 13 # it under the terms of the GNU Lesser General Public License (as 14 # published by the Free Software Foundation) version 2.1, February 1999. 15 # 16 # This program is distributed in the hope that it will be useful, but 17 # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF 18 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and 19 # conditions of the GNU Lesser General Public License for more details. 20 # 21 # You should have received a copy of the GNU Lesser General Public 22 # License along with this program; if not, write to the Free Software 23 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 24 ############################################################################## 25 from spack import * 26 import os 27 28 29 class Mpip(AutotoolsPackage): 30 """mpiP: Lightweight, Scalable MPI Profiling""" 31 homepage = "http://mpip.sourceforge.net/" 32 url = "http://downloads.sourceforge.net/project/mpip/mpiP/mpiP-3.4.1/mpiP-3.4.1.tar.gz" 33 34 version("3.4.1", "1168adc83777ac31d6ebd385823aabbd") 35 36 depends_on("libelf", type="build") 37 depends_on("libdwarf", type="build") 38 depends_on('libunwind', when=os.uname()[4] == "x86_64", type="build") 39 depends_on("mpi", type="build") 40 41 def configure_args(self): 42 return ['--without-f77'] 43 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/var/spack/repos/builtin/packages/mpip/package.py b/var/spack/repos/builtin/packages/mpip/package.py --- a/var/spack/repos/builtin/packages/mpip/package.py +++ b/var/spack/repos/builtin/packages/mpip/package.py @@ -33,10 +33,14 @@ version("3.4.1", "1168adc83777ac31d6ebd385823aabbd") - depends_on("libelf", type="build") - depends_on("libdwarf", type="build") - depends_on('libunwind', when=os.uname()[4] == "x86_64", type="build") - depends_on("mpi", type="build") + depends_on("libelf") + depends_on("libdwarf") + depends_on('libunwind', when=os.uname()[4] == "x86_64") + depends_on("mpi") def configure_args(self): - return ['--without-f77'] + config_args = ['--without-f77'] + config_args.append("--with-cc=%s" % self.spec['mpi'].mpicc) + config_args.append("--with-cxx=%s" % self.spec['mpi'].mpicxx) + + return config_args
{"golden_diff": "diff --git a/var/spack/repos/builtin/packages/mpip/package.py b/var/spack/repos/builtin/packages/mpip/package.py\n--- a/var/spack/repos/builtin/packages/mpip/package.py\n+++ b/var/spack/repos/builtin/packages/mpip/package.py\n@@ -33,10 +33,14 @@\n \n version(\"3.4.1\", \"1168adc83777ac31d6ebd385823aabbd\")\n \n- depends_on(\"libelf\", type=\"build\")\n- depends_on(\"libdwarf\", type=\"build\")\n- depends_on('libunwind', when=os.uname()[4] == \"x86_64\", type=\"build\")\n- depends_on(\"mpi\", type=\"build\")\n+ depends_on(\"libelf\")\n+ depends_on(\"libdwarf\")\n+ depends_on('libunwind', when=os.uname()[4] == \"x86_64\")\n+ depends_on(\"mpi\")\n \n def configure_args(self):\n- return ['--without-f77']\n+ config_args = ['--without-f77']\n+ config_args.append(\"--with-cc=%s\" % self.spec['mpi'].mpicc)\n+ config_args.append(\"--with-cxx=%s\" % self.spec['mpi'].mpicxx)\n+\n+ return config_args\n", "issue": "Issue while building mpiP\nTrying to build this on ubuntu desktop gives (config.log):\r\n\r\n```\r\nconfigure:4289: /home/guest/workarena/softwares/sources/spack/lib/spack/env/gcc/gcc -c conftest.c >&5\r\nconftest.c:11:17: fatal error: mpi.h: No such file or directory\r\ncompilation terminated.\r\nconfigure:4289: $? = 1\r\nconfigure: failed program was:\r\n| /* confdefs.h */\r\n| #define PACKAGE_NAME \"mpiP\"\r\n| #define PACKAGE_TARNAME \"mpip\"\r\n| #define PACKAGE_VERSION \"3.3\"\r\n| #define PACKAGE_STRING \"mpiP 3.3\"\r\n| #define PACKAGE_BUGREPORT \"[email protected]\"\r\n| #define PACKAGE_URL \"\"\r\n| #define DEFAULT_REPORT_FORMAT mpiPi_style_verbose\r\n| #define HAVE_LIBM 1\r\n| /* end confdefs.h. */\r\n| #include \"mpi.h\"\r\n|\r\n| int\r\n| main ()\r\n| {\r\n| #ifndef MPI_Init\r\n| #ifdef __cplusplus\r\n| (void) MPI_Init;\r\n| #else\r\n| (void) MPI_Init;\r\n| #endif\r\n| #endif\r\n|\r\n| ;\r\n| return 0;\r\n| }\r\nconfigure:4289: result: no\r\nconfigure:4294: error: \"Failed to find declaration for MPI_Init!\"\r\n```\r\n\r\nI am creating PR with additional `--with-cc` option to configure but then seeing next error:\r\n\r\n```\r\n File \"/home/guest/workarena/softwares/sources/spack/var/spack/stage/mpip-3.4.1-x7l5jk256ayuuirddcxdpbpytlnis3hq/mpiP-3.4.1/make-wrappers.py\", line 712, in StandardFileHeader\r\n olist = StandardFileHeader(sname)\r\n File \"/home/guest/workarena/softwares/sources/spack/var/spack/stage/mpip-3.4.1-x7l5jk256ayuuirddcxdpbpytlnis3hq/mpiP-3.4.1/make-wrappers.py\", line 712, in StandardFileHeader\r\n olist.append(\"/* Creator: \" + os.environ[\"LOGNAME\"] + \" */\\n\")\r\n File \"/usr/lib/python2.7/UserDict.py\", line 40, in __getitem__\r\n olist.append(\"/* Creator: \" + os.environ[\"LOGNAME\"] + \" */\\n\")\r\n raise KeyError(key)\r\n File \"/usr/lib/python2.7/UserDict.py\", line 40, in __getitem__\r\nKeyError: 'LOGNAME' raise KeyError(key)\r\n```\r\n\nIssue while building mpiP\nTrying to build this on ubuntu desktop gives (config.log):\r\n\r\n```\r\nconfigure:4289: /home/guest/workarena/softwares/sources/spack/lib/spack/env/gcc/gcc -c conftest.c >&5\r\nconftest.c:11:17: fatal error: mpi.h: No such file or directory\r\ncompilation terminated.\r\nconfigure:4289: $? = 1\r\nconfigure: failed program was:\r\n| /* confdefs.h */\r\n| #define PACKAGE_NAME \"mpiP\"\r\n| #define PACKAGE_TARNAME \"mpip\"\r\n| #define PACKAGE_VERSION \"3.3\"\r\n| #define PACKAGE_STRING \"mpiP 3.3\"\r\n| #define PACKAGE_BUGREPORT \"[email protected]\"\r\n| #define PACKAGE_URL \"\"\r\n| #define DEFAULT_REPORT_FORMAT mpiPi_style_verbose\r\n| #define HAVE_LIBM 1\r\n| /* end confdefs.h. */\r\n| #include \"mpi.h\"\r\n|\r\n| int\r\n| main ()\r\n| {\r\n| #ifndef MPI_Init\r\n| #ifdef __cplusplus\r\n| (void) MPI_Init;\r\n| #else\r\n| (void) MPI_Init;\r\n| #endif\r\n| #endif\r\n|\r\n| ;\r\n| return 0;\r\n| }\r\nconfigure:4289: result: no\r\nconfigure:4294: error: \"Failed to find declaration for MPI_Init!\"\r\n```\r\n\r\nI am creating PR with additional `--with-cc` option to configure but then seeing next error:\r\n\r\n```\r\n File \"/home/guest/workarena/softwares/sources/spack/var/spack/stage/mpip-3.4.1-x7l5jk256ayuuirddcxdpbpytlnis3hq/mpiP-3.4.1/make-wrappers.py\", line 712, in StandardFileHeader\r\n olist = StandardFileHeader(sname)\r\n File \"/home/guest/workarena/softwares/sources/spack/var/spack/stage/mpip-3.4.1-x7l5jk256ayuuirddcxdpbpytlnis3hq/mpiP-3.4.1/make-wrappers.py\", line 712, in StandardFileHeader\r\n olist.append(\"/* Creator: \" + os.environ[\"LOGNAME\"] + \" */\\n\")\r\n File \"/usr/lib/python2.7/UserDict.py\", line 40, in __getitem__\r\n olist.append(\"/* Creator: \" + os.environ[\"LOGNAME\"] + \" */\\n\")\r\n raise KeyError(key)\r\n File \"/usr/lib/python2.7/UserDict.py\", line 40, in __getitem__\r\nKeyError: 'LOGNAME' raise KeyError(key)\r\n```\r\n\n", "before_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the NOTICE and LICENSE files for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nfrom spack import *\nimport os\n\n\nclass Mpip(AutotoolsPackage):\n \"\"\"mpiP: Lightweight, Scalable MPI Profiling\"\"\"\n homepage = \"http://mpip.sourceforge.net/\"\n url = \"http://downloads.sourceforge.net/project/mpip/mpiP/mpiP-3.4.1/mpiP-3.4.1.tar.gz\"\n\n version(\"3.4.1\", \"1168adc83777ac31d6ebd385823aabbd\")\n\n depends_on(\"libelf\", type=\"build\")\n depends_on(\"libdwarf\", type=\"build\")\n depends_on('libunwind', when=os.uname()[4] == \"x86_64\", type=\"build\")\n depends_on(\"mpi\", type=\"build\")\n\n def configure_args(self):\n return ['--without-f77']\n", "path": "var/spack/repos/builtin/packages/mpip/package.py"}], "after_files": [{"content": "##############################################################################\n# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n#\n# This file is part of Spack.\n# Created by Todd Gamblin, [email protected], All rights reserved.\n# LLNL-CODE-647188\n#\n# For details, see https://github.com/llnl/spack\n# Please also see the NOTICE and LICENSE files for our notice and the LGPL.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License (as\n# published by the Free Software Foundation) version 2.1, February 1999.\n#\n# This program is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and\n# conditions of the GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n##############################################################################\nfrom spack import *\nimport os\n\n\nclass Mpip(AutotoolsPackage):\n \"\"\"mpiP: Lightweight, Scalable MPI Profiling\"\"\"\n homepage = \"http://mpip.sourceforge.net/\"\n url = \"http://downloads.sourceforge.net/project/mpip/mpiP/mpiP-3.4.1/mpiP-3.4.1.tar.gz\"\n\n version(\"3.4.1\", \"1168adc83777ac31d6ebd385823aabbd\")\n\n depends_on(\"libelf\")\n depends_on(\"libdwarf\")\n depends_on('libunwind', when=os.uname()[4] == \"x86_64\")\n depends_on(\"mpi\")\n\n def configure_args(self):\n config_args = ['--without-f77']\n config_args.append(\"--with-cc=%s\" % self.spec['mpi'].mpicc)\n config_args.append(\"--with-cxx=%s\" % self.spec['mpi'].mpicxx)\n\n return config_args\n", "path": "var/spack/repos/builtin/packages/mpip/package.py"}]}
2,008
301
gh_patches_debug_31035
rasdani/github-patches
git_diff
apache__airflow-37512
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Filter datasets by dag_id in rest API ### Description We have a list datasets endpoint in the API. Right now we can only filter by searching uri string. It would be useful to be able to pass one or multiple `dag_id`s and only return datasets that include that `dag_id` in `consuming_dags` or `producing_tasks` ### Use case/motivation Make it easy to see what datasets are connected to a dag. Right now our webserver endpoint to get dataset dependencies returns all datasets+dags and then the UI needs to filter it down. ### Related issues _No response_ ### Are you willing to submit a PR? - [ ] Yes I am willing to submit a PR! ### Code of Conduct - [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `airflow/api_connexion/endpoints/dataset_endpoint.py` Content: ``` 1 # Licensed to the Apache Software Foundation (ASF) under one 2 # or more contributor license agreements. See the NOTICE file 3 # distributed with this work for additional information 4 # regarding copyright ownership. The ASF licenses this file 5 # to you under the Apache License, Version 2.0 (the 6 # "License"); you may not use this file except in compliance 7 # with the License. You may obtain a copy of the License at 8 # 9 # http://www.apache.org/licenses/LICENSE-2.0 10 # 11 # Unless required by applicable law or agreed to in writing, 12 # software distributed under the License is distributed on an 13 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 14 # KIND, either express or implied. See the License for the 15 # specific language governing permissions and limitations 16 # under the License. 17 from __future__ import annotations 18 19 from http import HTTPStatus 20 from typing import TYPE_CHECKING 21 22 from connexion import NoContent 23 from sqlalchemy import delete, func, select 24 from sqlalchemy.orm import joinedload, subqueryload 25 26 from airflow.api_connexion import security 27 from airflow.api_connexion.exceptions import NotFound 28 from airflow.api_connexion.parameters import apply_sorting, check_limit, format_datetime, format_parameters 29 from airflow.api_connexion.schemas.dataset_schema import ( 30 DatasetCollection, 31 DatasetEventCollection, 32 QueuedEvent, 33 QueuedEventCollection, 34 dataset_collection_schema, 35 dataset_event_collection_schema, 36 dataset_schema, 37 queued_event_collection_schema, 38 queued_event_schema, 39 ) 40 from airflow.models.dataset import DatasetDagRunQueue, DatasetEvent, DatasetModel 41 from airflow.utils.db import get_query_count 42 from airflow.utils.session import NEW_SESSION, provide_session 43 from airflow.www.extensions.init_auth_manager import get_auth_manager 44 45 if TYPE_CHECKING: 46 from sqlalchemy.orm import Session 47 48 from airflow.api_connexion.types import APIResponse 49 50 51 @security.requires_access_dataset("GET") 52 @provide_session 53 def get_dataset(*, uri: str, session: Session = NEW_SESSION) -> APIResponse: 54 """Get a Dataset.""" 55 dataset = session.scalar( 56 select(DatasetModel) 57 .where(DatasetModel.uri == uri) 58 .options(joinedload(DatasetModel.consuming_dags), joinedload(DatasetModel.producing_tasks)) 59 ) 60 if not dataset: 61 raise NotFound( 62 "Dataset not found", 63 detail=f"The Dataset with uri: `{uri}` was not found", 64 ) 65 return dataset_schema.dump(dataset) 66 67 68 @security.requires_access_dataset("GET") 69 @format_parameters({"limit": check_limit}) 70 @provide_session 71 def get_datasets( 72 *, 73 limit: int, 74 offset: int = 0, 75 uri_pattern: str | None = None, 76 order_by: str = "id", 77 session: Session = NEW_SESSION, 78 ) -> APIResponse: 79 """Get datasets.""" 80 allowed_attrs = ["id", "uri", "created_at", "updated_at"] 81 82 total_entries = session.scalars(select(func.count(DatasetModel.id))).one() 83 query = select(DatasetModel) 84 if uri_pattern: 85 query = query.where(DatasetModel.uri.ilike(f"%{uri_pattern}%")) 86 query = apply_sorting(query, order_by, {}, allowed_attrs) 87 datasets = session.scalars( 88 query.options(subqueryload(DatasetModel.consuming_dags), subqueryload(DatasetModel.producing_tasks)) 89 .offset(offset) 90 .limit(limit) 91 ).all() 92 return dataset_collection_schema.dump(DatasetCollection(datasets=datasets, total_entries=total_entries)) 93 94 95 @security.requires_access_dataset("GET") 96 @provide_session 97 @format_parameters({"limit": check_limit}) 98 def get_dataset_events( 99 *, 100 limit: int, 101 offset: int = 0, 102 order_by: str = "timestamp", 103 dataset_id: int | None = None, 104 source_dag_id: str | None = None, 105 source_task_id: str | None = None, 106 source_run_id: str | None = None, 107 source_map_index: int | None = None, 108 session: Session = NEW_SESSION, 109 ) -> APIResponse: 110 """Get dataset events.""" 111 allowed_attrs = ["source_dag_id", "source_task_id", "source_run_id", "source_map_index", "timestamp"] 112 113 query = select(DatasetEvent) 114 115 if dataset_id: 116 query = query.where(DatasetEvent.dataset_id == dataset_id) 117 if source_dag_id: 118 query = query.where(DatasetEvent.source_dag_id == source_dag_id) 119 if source_task_id: 120 query = query.where(DatasetEvent.source_task_id == source_task_id) 121 if source_run_id: 122 query = query.where(DatasetEvent.source_run_id == source_run_id) 123 if source_map_index: 124 query = query.where(DatasetEvent.source_map_index == source_map_index) 125 126 query = query.options(subqueryload(DatasetEvent.created_dagruns)) 127 128 total_entries = get_query_count(query, session=session) 129 query = apply_sorting(query, order_by, {}, allowed_attrs) 130 events = session.scalars(query.offset(offset).limit(limit)).all() 131 return dataset_event_collection_schema.dump( 132 DatasetEventCollection(dataset_events=events, total_entries=total_entries) 133 ) 134 135 136 def _generate_queued_event_where_clause( 137 *, 138 dag_id: str | None = None, 139 dataset_id: int | None = None, 140 uri: str | None = None, 141 before: str | None = None, 142 permitted_dag_ids: set[str] | None = None, 143 ) -> list: 144 """Get DatasetDagRunQueue where clause.""" 145 where_clause = [] 146 if dag_id is not None: 147 where_clause.append(DatasetDagRunQueue.target_dag_id == dag_id) 148 if dataset_id is not None: 149 where_clause.append(DatasetDagRunQueue.dataset_id == dataset_id) 150 if uri is not None: 151 where_clause.append( 152 DatasetDagRunQueue.dataset_id.in_( 153 select(DatasetModel.id).where(DatasetModel.uri == uri), 154 ), 155 ) 156 if before is not None: 157 where_clause.append(DatasetDagRunQueue.created_at < format_datetime(before)) 158 if permitted_dag_ids is not None: 159 where_clause.append(DatasetDagRunQueue.target_dag_id.in_(permitted_dag_ids)) 160 return where_clause 161 162 163 @security.requires_access_dataset("GET") 164 @security.requires_access_dag("GET") 165 @provide_session 166 def get_dag_dataset_queued_event( 167 *, dag_id: str, uri: str, before: str | None = None, session: Session = NEW_SESSION 168 ) -> APIResponse: 169 """Get a queued Dataset event for a DAG.""" 170 where_clause = _generate_queued_event_where_clause(dag_id=dag_id, uri=uri, before=before) 171 ddrq = session.scalar( 172 select(DatasetDagRunQueue) 173 .join(DatasetModel, DatasetDagRunQueue.dataset_id == DatasetModel.id) 174 .where(*where_clause) 175 ) 176 if ddrq is None: 177 raise NotFound( 178 "Queue event not found", 179 detail=f"Queue event with dag_id: `{dag_id}` and dataset uri: `{uri}` was not found", 180 ) 181 queued_event = {"created_at": ddrq.created_at, "dag_id": dag_id, "uri": uri} 182 return queued_event_schema.dump(queued_event) 183 184 185 @security.requires_access_dataset("DELETE") 186 @security.requires_access_dag("GET") 187 @provide_session 188 def delete_dag_dataset_queued_event( 189 *, dag_id: str, uri: str, before: str | None = None, session: Session = NEW_SESSION 190 ) -> APIResponse: 191 """Delete a queued Dataset event for a DAG.""" 192 where_clause = _generate_queued_event_where_clause(dag_id=dag_id, uri=uri, before=before) 193 delete_stmt = ( 194 delete(DatasetDagRunQueue).where(*where_clause).execution_options(synchronize_session="fetch") 195 ) 196 result = session.execute(delete_stmt) 197 if result.rowcount > 0: 198 return NoContent, HTTPStatus.NO_CONTENT 199 raise NotFound( 200 "Queue event not found", 201 detail=f"Queue event with dag_id: `{dag_id}` and dataset uri: `{uri}` was not found", 202 ) 203 204 205 @security.requires_access_dataset("GET") 206 @security.requires_access_dag("GET") 207 @provide_session 208 def get_dag_dataset_queued_events( 209 *, dag_id: str, before: str | None = None, session: Session = NEW_SESSION 210 ) -> APIResponse: 211 """Get queued Dataset events for a DAG.""" 212 where_clause = _generate_queued_event_where_clause(dag_id=dag_id, before=before) 213 query = ( 214 select(DatasetDagRunQueue, DatasetModel.uri) 215 .join(DatasetModel, DatasetDagRunQueue.dataset_id == DatasetModel.id) 216 .where(*where_clause) 217 ) 218 result = session.execute(query).all() 219 total_entries = get_query_count(query, session=session) 220 if not result: 221 raise NotFound( 222 "Queue event not found", 223 detail=f"Queue event with dag_id: `{dag_id}` was not found", 224 ) 225 queued_events = [ 226 QueuedEvent(created_at=ddrq.created_at, dag_id=ddrq.target_dag_id, uri=uri) for ddrq, uri in result 227 ] 228 return queued_event_collection_schema.dump( 229 QueuedEventCollection(queued_events=queued_events, total_entries=total_entries) 230 ) 231 232 233 @security.requires_access_dataset("DELETE") 234 @security.requires_access_dag("GET") 235 @provide_session 236 def delete_dag_dataset_queued_events( 237 *, dag_id: str, before: str | None = None, session: Session = NEW_SESSION 238 ) -> APIResponse: 239 """Delete queued Dataset events for a DAG.""" 240 where_clause = _generate_queued_event_where_clause(dag_id=dag_id, before=before) 241 delete_stmt = delete(DatasetDagRunQueue).where(*where_clause) 242 result = session.execute(delete_stmt) 243 if result.rowcount > 0: 244 return NoContent, HTTPStatus.NO_CONTENT 245 246 raise NotFound( 247 "Queue event not found", 248 detail=f"Queue event with dag_id: `{dag_id}` was not found", 249 ) 250 251 252 @security.requires_access_dataset("GET") 253 @provide_session 254 def get_dataset_queued_events( 255 *, uri: str, before: str | None = None, session: Session = NEW_SESSION 256 ) -> APIResponse: 257 """Get queued Dataset events for a Dataset.""" 258 permitted_dag_ids = get_auth_manager().get_permitted_dag_ids(methods=["GET"]) 259 where_clause = _generate_queued_event_where_clause( 260 uri=uri, before=before, permitted_dag_ids=permitted_dag_ids 261 ) 262 query = ( 263 select(DatasetDagRunQueue, DatasetModel.uri) 264 .join(DatasetModel, DatasetDagRunQueue.dataset_id == DatasetModel.id) 265 .where(*where_clause) 266 ) 267 total_entries = get_query_count(query, session=session) 268 result = session.execute(query).all() 269 if total_entries > 0: 270 queued_events = [ 271 QueuedEvent(created_at=ddrq.created_at, dag_id=ddrq.target_dag_id, uri=uri) 272 for ddrq, uri in result 273 ] 274 return queued_event_collection_schema.dump( 275 QueuedEventCollection(queued_events=queued_events, total_entries=total_entries) 276 ) 277 raise NotFound( 278 "Queue event not found", 279 detail=f"Queue event with dataset uri: `{uri}` was not found", 280 ) 281 282 283 @security.requires_access_dataset("DELETE") 284 @provide_session 285 def delete_dataset_queued_events( 286 *, uri: str, before: str | None = None, session: Session = NEW_SESSION 287 ) -> APIResponse: 288 """Delete queued Dataset events for a Dataset.""" 289 permitted_dag_ids = get_auth_manager().get_permitted_dag_ids(methods=["GET"]) 290 where_clause = _generate_queued_event_where_clause( 291 uri=uri, before=before, permitted_dag_ids=permitted_dag_ids 292 ) 293 delete_stmt = ( 294 delete(DatasetDagRunQueue).where(*where_clause).execution_options(synchronize_session="fetch") 295 ) 296 297 result = session.execute(delete_stmt) 298 if result.rowcount > 0: 299 return NoContent, HTTPStatus.NO_CONTENT 300 raise NotFound( 301 "Queue event not found", 302 detail=f"Queue event with dataset uri: `{uri}` was not found", 303 ) 304 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/airflow/api_connexion/endpoints/dataset_endpoint.py b/airflow/api_connexion/endpoints/dataset_endpoint.py --- a/airflow/api_connexion/endpoints/dataset_endpoint.py +++ b/airflow/api_connexion/endpoints/dataset_endpoint.py @@ -27,10 +27,12 @@ from airflow.api_connexion.exceptions import NotFound from airflow.api_connexion.parameters import apply_sorting, check_limit, format_datetime, format_parameters from airflow.api_connexion.schemas.dataset_schema import ( + DagScheduleDatasetReference, DatasetCollection, DatasetEventCollection, QueuedEvent, QueuedEventCollection, + TaskOutletDatasetReference, dataset_collection_schema, dataset_event_collection_schema, dataset_schema, @@ -73,6 +75,7 @@ limit: int, offset: int = 0, uri_pattern: str | None = None, + dag_ids: str | None = None, order_by: str = "id", session: Session = NEW_SESSION, ) -> APIResponse: @@ -81,6 +84,13 @@ total_entries = session.scalars(select(func.count(DatasetModel.id))).one() query = select(DatasetModel) + + if dag_ids: + dags_list = dag_ids.split(",") + query = query.filter( + (DatasetModel.consuming_dags.any(DagScheduleDatasetReference.dag_id.in_(dags_list))) + | (DatasetModel.producing_tasks.any(TaskOutletDatasetReference.dag_id.in_(dags_list))) + ) if uri_pattern: query = query.where(DatasetModel.uri.ilike(f"%{uri_pattern}%")) query = apply_sorting(query, order_by, {}, allowed_attrs)
{"golden_diff": "diff --git a/airflow/api_connexion/endpoints/dataset_endpoint.py b/airflow/api_connexion/endpoints/dataset_endpoint.py\n--- a/airflow/api_connexion/endpoints/dataset_endpoint.py\n+++ b/airflow/api_connexion/endpoints/dataset_endpoint.py\n@@ -27,10 +27,12 @@\n from airflow.api_connexion.exceptions import NotFound\n from airflow.api_connexion.parameters import apply_sorting, check_limit, format_datetime, format_parameters\n from airflow.api_connexion.schemas.dataset_schema import (\n+ DagScheduleDatasetReference,\n DatasetCollection,\n DatasetEventCollection,\n QueuedEvent,\n QueuedEventCollection,\n+ TaskOutletDatasetReference,\n dataset_collection_schema,\n dataset_event_collection_schema,\n dataset_schema,\n@@ -73,6 +75,7 @@\n limit: int,\n offset: int = 0,\n uri_pattern: str | None = None,\n+ dag_ids: str | None = None,\n order_by: str = \"id\",\n session: Session = NEW_SESSION,\n ) -> APIResponse:\n@@ -81,6 +84,13 @@\n \n total_entries = session.scalars(select(func.count(DatasetModel.id))).one()\n query = select(DatasetModel)\n+\n+ if dag_ids:\n+ dags_list = dag_ids.split(\",\")\n+ query = query.filter(\n+ (DatasetModel.consuming_dags.any(DagScheduleDatasetReference.dag_id.in_(dags_list)))\n+ | (DatasetModel.producing_tasks.any(TaskOutletDatasetReference.dag_id.in_(dags_list)))\n+ )\n if uri_pattern:\n query = query.where(DatasetModel.uri.ilike(f\"%{uri_pattern}%\"))\n query = apply_sorting(query, order_by, {}, allowed_attrs)\n", "issue": "Filter datasets by dag_id in rest API\n### Description\r\n\r\nWe have a list datasets endpoint in the API. Right now we can only filter by searching uri string. It would be useful to be able to pass one or multiple `dag_id`s and only return datasets that include that `dag_id` in `consuming_dags` or `producing_tasks`\r\n\r\n### Use case/motivation\r\n\r\nMake it easy to see what datasets are connected to a dag.\r\n\r\nRight now our webserver endpoint to get dataset dependencies returns all datasets+dags and then the UI needs to filter it down.\r\n\r\n\r\n### Related issues\r\n\r\n_No response_\r\n\r\n### Are you willing to submit a PR?\r\n\r\n- [ ] Yes I am willing to submit a PR!\r\n\r\n### Code of Conduct\r\n\r\n- [X] I agree to follow this project's [Code of Conduct](https://github.com/apache/airflow/blob/main/CODE_OF_CONDUCT.md)\r\n\n", "before_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom __future__ import annotations\n\nfrom http import HTTPStatus\nfrom typing import TYPE_CHECKING\n\nfrom connexion import NoContent\nfrom sqlalchemy import delete, func, select\nfrom sqlalchemy.orm import joinedload, subqueryload\n\nfrom airflow.api_connexion import security\nfrom airflow.api_connexion.exceptions import NotFound\nfrom airflow.api_connexion.parameters import apply_sorting, check_limit, format_datetime, format_parameters\nfrom airflow.api_connexion.schemas.dataset_schema import (\n DatasetCollection,\n DatasetEventCollection,\n QueuedEvent,\n QueuedEventCollection,\n dataset_collection_schema,\n dataset_event_collection_schema,\n dataset_schema,\n queued_event_collection_schema,\n queued_event_schema,\n)\nfrom airflow.models.dataset import DatasetDagRunQueue, DatasetEvent, DatasetModel\nfrom airflow.utils.db import get_query_count\nfrom airflow.utils.session import NEW_SESSION, provide_session\nfrom airflow.www.extensions.init_auth_manager import get_auth_manager\n\nif TYPE_CHECKING:\n from sqlalchemy.orm import Session\n\n from airflow.api_connexion.types import APIResponse\n\n\[email protected]_access_dataset(\"GET\")\n@provide_session\ndef get_dataset(*, uri: str, session: Session = NEW_SESSION) -> APIResponse:\n \"\"\"Get a Dataset.\"\"\"\n dataset = session.scalar(\n select(DatasetModel)\n .where(DatasetModel.uri == uri)\n .options(joinedload(DatasetModel.consuming_dags), joinedload(DatasetModel.producing_tasks))\n )\n if not dataset:\n raise NotFound(\n \"Dataset not found\",\n detail=f\"The Dataset with uri: `{uri}` was not found\",\n )\n return dataset_schema.dump(dataset)\n\n\[email protected]_access_dataset(\"GET\")\n@format_parameters({\"limit\": check_limit})\n@provide_session\ndef get_datasets(\n *,\n limit: int,\n offset: int = 0,\n uri_pattern: str | None = None,\n order_by: str = \"id\",\n session: Session = NEW_SESSION,\n) -> APIResponse:\n \"\"\"Get datasets.\"\"\"\n allowed_attrs = [\"id\", \"uri\", \"created_at\", \"updated_at\"]\n\n total_entries = session.scalars(select(func.count(DatasetModel.id))).one()\n query = select(DatasetModel)\n if uri_pattern:\n query = query.where(DatasetModel.uri.ilike(f\"%{uri_pattern}%\"))\n query = apply_sorting(query, order_by, {}, allowed_attrs)\n datasets = session.scalars(\n query.options(subqueryload(DatasetModel.consuming_dags), subqueryload(DatasetModel.producing_tasks))\n .offset(offset)\n .limit(limit)\n ).all()\n return dataset_collection_schema.dump(DatasetCollection(datasets=datasets, total_entries=total_entries))\n\n\[email protected]_access_dataset(\"GET\")\n@provide_session\n@format_parameters({\"limit\": check_limit})\ndef get_dataset_events(\n *,\n limit: int,\n offset: int = 0,\n order_by: str = \"timestamp\",\n dataset_id: int | None = None,\n source_dag_id: str | None = None,\n source_task_id: str | None = None,\n source_run_id: str | None = None,\n source_map_index: int | None = None,\n session: Session = NEW_SESSION,\n) -> APIResponse:\n \"\"\"Get dataset events.\"\"\"\n allowed_attrs = [\"source_dag_id\", \"source_task_id\", \"source_run_id\", \"source_map_index\", \"timestamp\"]\n\n query = select(DatasetEvent)\n\n if dataset_id:\n query = query.where(DatasetEvent.dataset_id == dataset_id)\n if source_dag_id:\n query = query.where(DatasetEvent.source_dag_id == source_dag_id)\n if source_task_id:\n query = query.where(DatasetEvent.source_task_id == source_task_id)\n if source_run_id:\n query = query.where(DatasetEvent.source_run_id == source_run_id)\n if source_map_index:\n query = query.where(DatasetEvent.source_map_index == source_map_index)\n\n query = query.options(subqueryload(DatasetEvent.created_dagruns))\n\n total_entries = get_query_count(query, session=session)\n query = apply_sorting(query, order_by, {}, allowed_attrs)\n events = session.scalars(query.offset(offset).limit(limit)).all()\n return dataset_event_collection_schema.dump(\n DatasetEventCollection(dataset_events=events, total_entries=total_entries)\n )\n\n\ndef _generate_queued_event_where_clause(\n *,\n dag_id: str | None = None,\n dataset_id: int | None = None,\n uri: str | None = None,\n before: str | None = None,\n permitted_dag_ids: set[str] | None = None,\n) -> list:\n \"\"\"Get DatasetDagRunQueue where clause.\"\"\"\n where_clause = []\n if dag_id is not None:\n where_clause.append(DatasetDagRunQueue.target_dag_id == dag_id)\n if dataset_id is not None:\n where_clause.append(DatasetDagRunQueue.dataset_id == dataset_id)\n if uri is not None:\n where_clause.append(\n DatasetDagRunQueue.dataset_id.in_(\n select(DatasetModel.id).where(DatasetModel.uri == uri),\n ),\n )\n if before is not None:\n where_clause.append(DatasetDagRunQueue.created_at < format_datetime(before))\n if permitted_dag_ids is not None:\n where_clause.append(DatasetDagRunQueue.target_dag_id.in_(permitted_dag_ids))\n return where_clause\n\n\[email protected]_access_dataset(\"GET\")\[email protected]_access_dag(\"GET\")\n@provide_session\ndef get_dag_dataset_queued_event(\n *, dag_id: str, uri: str, before: str | None = None, session: Session = NEW_SESSION\n) -> APIResponse:\n \"\"\"Get a queued Dataset event for a DAG.\"\"\"\n where_clause = _generate_queued_event_where_clause(dag_id=dag_id, uri=uri, before=before)\n ddrq = session.scalar(\n select(DatasetDagRunQueue)\n .join(DatasetModel, DatasetDagRunQueue.dataset_id == DatasetModel.id)\n .where(*where_clause)\n )\n if ddrq is None:\n raise NotFound(\n \"Queue event not found\",\n detail=f\"Queue event with dag_id: `{dag_id}` and dataset uri: `{uri}` was not found\",\n )\n queued_event = {\"created_at\": ddrq.created_at, \"dag_id\": dag_id, \"uri\": uri}\n return queued_event_schema.dump(queued_event)\n\n\[email protected]_access_dataset(\"DELETE\")\[email protected]_access_dag(\"GET\")\n@provide_session\ndef delete_dag_dataset_queued_event(\n *, dag_id: str, uri: str, before: str | None = None, session: Session = NEW_SESSION\n) -> APIResponse:\n \"\"\"Delete a queued Dataset event for a DAG.\"\"\"\n where_clause = _generate_queued_event_where_clause(dag_id=dag_id, uri=uri, before=before)\n delete_stmt = (\n delete(DatasetDagRunQueue).where(*where_clause).execution_options(synchronize_session=\"fetch\")\n )\n result = session.execute(delete_stmt)\n if result.rowcount > 0:\n return NoContent, HTTPStatus.NO_CONTENT\n raise NotFound(\n \"Queue event not found\",\n detail=f\"Queue event with dag_id: `{dag_id}` and dataset uri: `{uri}` was not found\",\n )\n\n\[email protected]_access_dataset(\"GET\")\[email protected]_access_dag(\"GET\")\n@provide_session\ndef get_dag_dataset_queued_events(\n *, dag_id: str, before: str | None = None, session: Session = NEW_SESSION\n) -> APIResponse:\n \"\"\"Get queued Dataset events for a DAG.\"\"\"\n where_clause = _generate_queued_event_where_clause(dag_id=dag_id, before=before)\n query = (\n select(DatasetDagRunQueue, DatasetModel.uri)\n .join(DatasetModel, DatasetDagRunQueue.dataset_id == DatasetModel.id)\n .where(*where_clause)\n )\n result = session.execute(query).all()\n total_entries = get_query_count(query, session=session)\n if not result:\n raise NotFound(\n \"Queue event not found\",\n detail=f\"Queue event with dag_id: `{dag_id}` was not found\",\n )\n queued_events = [\n QueuedEvent(created_at=ddrq.created_at, dag_id=ddrq.target_dag_id, uri=uri) for ddrq, uri in result\n ]\n return queued_event_collection_schema.dump(\n QueuedEventCollection(queued_events=queued_events, total_entries=total_entries)\n )\n\n\[email protected]_access_dataset(\"DELETE\")\[email protected]_access_dag(\"GET\")\n@provide_session\ndef delete_dag_dataset_queued_events(\n *, dag_id: str, before: str | None = None, session: Session = NEW_SESSION\n) -> APIResponse:\n \"\"\"Delete queued Dataset events for a DAG.\"\"\"\n where_clause = _generate_queued_event_where_clause(dag_id=dag_id, before=before)\n delete_stmt = delete(DatasetDagRunQueue).where(*where_clause)\n result = session.execute(delete_stmt)\n if result.rowcount > 0:\n return NoContent, HTTPStatus.NO_CONTENT\n\n raise NotFound(\n \"Queue event not found\",\n detail=f\"Queue event with dag_id: `{dag_id}` was not found\",\n )\n\n\[email protected]_access_dataset(\"GET\")\n@provide_session\ndef get_dataset_queued_events(\n *, uri: str, before: str | None = None, session: Session = NEW_SESSION\n) -> APIResponse:\n \"\"\"Get queued Dataset events for a Dataset.\"\"\"\n permitted_dag_ids = get_auth_manager().get_permitted_dag_ids(methods=[\"GET\"])\n where_clause = _generate_queued_event_where_clause(\n uri=uri, before=before, permitted_dag_ids=permitted_dag_ids\n )\n query = (\n select(DatasetDagRunQueue, DatasetModel.uri)\n .join(DatasetModel, DatasetDagRunQueue.dataset_id == DatasetModel.id)\n .where(*where_clause)\n )\n total_entries = get_query_count(query, session=session)\n result = session.execute(query).all()\n if total_entries > 0:\n queued_events = [\n QueuedEvent(created_at=ddrq.created_at, dag_id=ddrq.target_dag_id, uri=uri)\n for ddrq, uri in result\n ]\n return queued_event_collection_schema.dump(\n QueuedEventCollection(queued_events=queued_events, total_entries=total_entries)\n )\n raise NotFound(\n \"Queue event not found\",\n detail=f\"Queue event with dataset uri: `{uri}` was not found\",\n )\n\n\[email protected]_access_dataset(\"DELETE\")\n@provide_session\ndef delete_dataset_queued_events(\n *, uri: str, before: str | None = None, session: Session = NEW_SESSION\n) -> APIResponse:\n \"\"\"Delete queued Dataset events for a Dataset.\"\"\"\n permitted_dag_ids = get_auth_manager().get_permitted_dag_ids(methods=[\"GET\"])\n where_clause = _generate_queued_event_where_clause(\n uri=uri, before=before, permitted_dag_ids=permitted_dag_ids\n )\n delete_stmt = (\n delete(DatasetDagRunQueue).where(*where_clause).execution_options(synchronize_session=\"fetch\")\n )\n\n result = session.execute(delete_stmt)\n if result.rowcount > 0:\n return NoContent, HTTPStatus.NO_CONTENT\n raise NotFound(\n \"Queue event not found\",\n detail=f\"Queue event with dataset uri: `{uri}` was not found\",\n )\n", "path": "airflow/api_connexion/endpoints/dataset_endpoint.py"}], "after_files": [{"content": "# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\nfrom __future__ import annotations\n\nfrom http import HTTPStatus\nfrom typing import TYPE_CHECKING\n\nfrom connexion import NoContent\nfrom sqlalchemy import delete, func, select\nfrom sqlalchemy.orm import joinedload, subqueryload\n\nfrom airflow.api_connexion import security\nfrom airflow.api_connexion.exceptions import NotFound\nfrom airflow.api_connexion.parameters import apply_sorting, check_limit, format_datetime, format_parameters\nfrom airflow.api_connexion.schemas.dataset_schema import (\n DagScheduleDatasetReference,\n DatasetCollection,\n DatasetEventCollection,\n QueuedEvent,\n QueuedEventCollection,\n TaskOutletDatasetReference,\n dataset_collection_schema,\n dataset_event_collection_schema,\n dataset_schema,\n queued_event_collection_schema,\n queued_event_schema,\n)\nfrom airflow.models.dataset import DatasetDagRunQueue, DatasetEvent, DatasetModel\nfrom airflow.utils.db import get_query_count\nfrom airflow.utils.session import NEW_SESSION, provide_session\nfrom airflow.www.extensions.init_auth_manager import get_auth_manager\n\nif TYPE_CHECKING:\n from sqlalchemy.orm import Session\n\n from airflow.api_connexion.types import APIResponse\n\n\[email protected]_access_dataset(\"GET\")\n@provide_session\ndef get_dataset(*, uri: str, session: Session = NEW_SESSION) -> APIResponse:\n \"\"\"Get a Dataset.\"\"\"\n dataset = session.scalar(\n select(DatasetModel)\n .where(DatasetModel.uri == uri)\n .options(joinedload(DatasetModel.consuming_dags), joinedload(DatasetModel.producing_tasks))\n )\n if not dataset:\n raise NotFound(\n \"Dataset not found\",\n detail=f\"The Dataset with uri: `{uri}` was not found\",\n )\n return dataset_schema.dump(dataset)\n\n\[email protected]_access_dataset(\"GET\")\n@format_parameters({\"limit\": check_limit})\n@provide_session\ndef get_datasets(\n *,\n limit: int,\n offset: int = 0,\n uri_pattern: str | None = None,\n dag_ids: str | None = None,\n order_by: str = \"id\",\n session: Session = NEW_SESSION,\n) -> APIResponse:\n \"\"\"Get datasets.\"\"\"\n allowed_attrs = [\"id\", \"uri\", \"created_at\", \"updated_at\"]\n\n total_entries = session.scalars(select(func.count(DatasetModel.id))).one()\n query = select(DatasetModel)\n\n if dag_ids:\n dags_list = dag_ids.split(\",\")\n query = query.filter(\n (DatasetModel.consuming_dags.any(DagScheduleDatasetReference.dag_id.in_(dags_list)))\n | (DatasetModel.producing_tasks.any(TaskOutletDatasetReference.dag_id.in_(dags_list)))\n )\n if uri_pattern:\n query = query.where(DatasetModel.uri.ilike(f\"%{uri_pattern}%\"))\n query = apply_sorting(query, order_by, {}, allowed_attrs)\n datasets = session.scalars(\n query.options(subqueryload(DatasetModel.consuming_dags), subqueryload(DatasetModel.producing_tasks))\n .offset(offset)\n .limit(limit)\n ).all()\n return dataset_collection_schema.dump(DatasetCollection(datasets=datasets, total_entries=total_entries))\n\n\[email protected]_access_dataset(\"GET\")\n@provide_session\n@format_parameters({\"limit\": check_limit})\ndef get_dataset_events(\n *,\n limit: int,\n offset: int = 0,\n order_by: str = \"timestamp\",\n dataset_id: int | None = None,\n source_dag_id: str | None = None,\n source_task_id: str | None = None,\n source_run_id: str | None = None,\n source_map_index: int | None = None,\n session: Session = NEW_SESSION,\n) -> APIResponse:\n \"\"\"Get dataset events.\"\"\"\n allowed_attrs = [\"source_dag_id\", \"source_task_id\", \"source_run_id\", \"source_map_index\", \"timestamp\"]\n\n query = select(DatasetEvent)\n\n if dataset_id:\n query = query.where(DatasetEvent.dataset_id == dataset_id)\n if source_dag_id:\n query = query.where(DatasetEvent.source_dag_id == source_dag_id)\n if source_task_id:\n query = query.where(DatasetEvent.source_task_id == source_task_id)\n if source_run_id:\n query = query.where(DatasetEvent.source_run_id == source_run_id)\n if source_map_index:\n query = query.where(DatasetEvent.source_map_index == source_map_index)\n\n query = query.options(subqueryload(DatasetEvent.created_dagruns))\n\n total_entries = get_query_count(query, session=session)\n query = apply_sorting(query, order_by, {}, allowed_attrs)\n events = session.scalars(query.offset(offset).limit(limit)).all()\n return dataset_event_collection_schema.dump(\n DatasetEventCollection(dataset_events=events, total_entries=total_entries)\n )\n\n\ndef _generate_queued_event_where_clause(\n *,\n dag_id: str | None = None,\n dataset_id: int | None = None,\n uri: str | None = None,\n before: str | None = None,\n permitted_dag_ids: set[str] | None = None,\n) -> list:\n \"\"\"Get DatasetDagRunQueue where clause.\"\"\"\n where_clause = []\n if dag_id is not None:\n where_clause.append(DatasetDagRunQueue.target_dag_id == dag_id)\n if dataset_id is not None:\n where_clause.append(DatasetDagRunQueue.dataset_id == dataset_id)\n if uri is not None:\n where_clause.append(\n DatasetDagRunQueue.dataset_id.in_(\n select(DatasetModel.id).where(DatasetModel.uri == uri),\n ),\n )\n if before is not None:\n where_clause.append(DatasetDagRunQueue.created_at < format_datetime(before))\n if permitted_dag_ids is not None:\n where_clause.append(DatasetDagRunQueue.target_dag_id.in_(permitted_dag_ids))\n return where_clause\n\n\[email protected]_access_dataset(\"GET\")\[email protected]_access_dag(\"GET\")\n@provide_session\ndef get_dag_dataset_queued_event(\n *, dag_id: str, uri: str, before: str | None = None, session: Session = NEW_SESSION\n) -> APIResponse:\n \"\"\"Get a queued Dataset event for a DAG.\"\"\"\n where_clause = _generate_queued_event_where_clause(dag_id=dag_id, uri=uri, before=before)\n ddrq = session.scalar(\n select(DatasetDagRunQueue)\n .join(DatasetModel, DatasetDagRunQueue.dataset_id == DatasetModel.id)\n .where(*where_clause)\n )\n if ddrq is None:\n raise NotFound(\n \"Queue event not found\",\n detail=f\"Queue event with dag_id: `{dag_id}` and dataset uri: `{uri}` was not found\",\n )\n queued_event = {\"created_at\": ddrq.created_at, \"dag_id\": dag_id, \"uri\": uri}\n return queued_event_schema.dump(queued_event)\n\n\[email protected]_access_dataset(\"DELETE\")\[email protected]_access_dag(\"GET\")\n@provide_session\ndef delete_dag_dataset_queued_event(\n *, dag_id: str, uri: str, before: str | None = None, session: Session = NEW_SESSION\n) -> APIResponse:\n \"\"\"Delete a queued Dataset event for a DAG.\"\"\"\n where_clause = _generate_queued_event_where_clause(dag_id=dag_id, uri=uri, before=before)\n delete_stmt = (\n delete(DatasetDagRunQueue).where(*where_clause).execution_options(synchronize_session=\"fetch\")\n )\n result = session.execute(delete_stmt)\n if result.rowcount > 0:\n return NoContent, HTTPStatus.NO_CONTENT\n raise NotFound(\n \"Queue event not found\",\n detail=f\"Queue event with dag_id: `{dag_id}` and dataset uri: `{uri}` was not found\",\n )\n\n\[email protected]_access_dataset(\"GET\")\[email protected]_access_dag(\"GET\")\n@provide_session\ndef get_dag_dataset_queued_events(\n *, dag_id: str, before: str | None = None, session: Session = NEW_SESSION\n) -> APIResponse:\n \"\"\"Get queued Dataset events for a DAG.\"\"\"\n where_clause = _generate_queued_event_where_clause(dag_id=dag_id, before=before)\n query = (\n select(DatasetDagRunQueue, DatasetModel.uri)\n .join(DatasetModel, DatasetDagRunQueue.dataset_id == DatasetModel.id)\n .where(*where_clause)\n )\n result = session.execute(query).all()\n total_entries = get_query_count(query, session=session)\n if not result:\n raise NotFound(\n \"Queue event not found\",\n detail=f\"Queue event with dag_id: `{dag_id}` was not found\",\n )\n queued_events = [\n QueuedEvent(created_at=ddrq.created_at, dag_id=ddrq.target_dag_id, uri=uri) for ddrq, uri in result\n ]\n return queued_event_collection_schema.dump(\n QueuedEventCollection(queued_events=queued_events, total_entries=total_entries)\n )\n\n\[email protected]_access_dataset(\"DELETE\")\[email protected]_access_dag(\"GET\")\n@provide_session\ndef delete_dag_dataset_queued_events(\n *, dag_id: str, before: str | None = None, session: Session = NEW_SESSION\n) -> APIResponse:\n \"\"\"Delete queued Dataset events for a DAG.\"\"\"\n where_clause = _generate_queued_event_where_clause(dag_id=dag_id, before=before)\n delete_stmt = delete(DatasetDagRunQueue).where(*where_clause)\n result = session.execute(delete_stmt)\n if result.rowcount > 0:\n return NoContent, HTTPStatus.NO_CONTENT\n\n raise NotFound(\n \"Queue event not found\",\n detail=f\"Queue event with dag_id: `{dag_id}` was not found\",\n )\n\n\[email protected]_access_dataset(\"GET\")\n@provide_session\ndef get_dataset_queued_events(\n *, uri: str, before: str | None = None, session: Session = NEW_SESSION\n) -> APIResponse:\n \"\"\"Get queued Dataset events for a Dataset.\"\"\"\n permitted_dag_ids = get_auth_manager().get_permitted_dag_ids(methods=[\"GET\"])\n where_clause = _generate_queued_event_where_clause(\n uri=uri, before=before, permitted_dag_ids=permitted_dag_ids\n )\n query = (\n select(DatasetDagRunQueue, DatasetModel.uri)\n .join(DatasetModel, DatasetDagRunQueue.dataset_id == DatasetModel.id)\n .where(*where_clause)\n )\n total_entries = get_query_count(query, session=session)\n result = session.execute(query).all()\n if total_entries > 0:\n queued_events = [\n QueuedEvent(created_at=ddrq.created_at, dag_id=ddrq.target_dag_id, uri=uri)\n for ddrq, uri in result\n ]\n return queued_event_collection_schema.dump(\n QueuedEventCollection(queued_events=queued_events, total_entries=total_entries)\n )\n raise NotFound(\n \"Queue event not found\",\n detail=f\"Queue event with dataset uri: `{uri}` was not found\",\n )\n\n\[email protected]_access_dataset(\"DELETE\")\n@provide_session\ndef delete_dataset_queued_events(\n *, uri: str, before: str | None = None, session: Session = NEW_SESSION\n) -> APIResponse:\n \"\"\"Delete queued Dataset events for a Dataset.\"\"\"\n permitted_dag_ids = get_auth_manager().get_permitted_dag_ids(methods=[\"GET\"])\n where_clause = _generate_queued_event_where_clause(\n uri=uri, before=before, permitted_dag_ids=permitted_dag_ids\n )\n delete_stmt = (\n delete(DatasetDagRunQueue).where(*where_clause).execution_options(synchronize_session=\"fetch\")\n )\n\n result = session.execute(delete_stmt)\n if result.rowcount > 0:\n return NoContent, HTTPStatus.NO_CONTENT\n raise NotFound(\n \"Queue event not found\",\n detail=f\"Queue event with dataset uri: `{uri}` was not found\",\n )\n", "path": "airflow/api_connexion/endpoints/dataset_endpoint.py"}]}
3,939
383
gh_patches_debug_33523
rasdani/github-patches
git_diff
aws__aws-cli-1945
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Not properly outputting in MINGW System: Windows 7 64bit steps to reproduce: 1. Open mingw64 (git bash) 2. Run `aws configure` 3. Observe no output 4. Hit enter 4 times 5. Observe output It looks as if AWS CLI is thinking it is outputting text however it actually isn't. ![image](https://cloud.githubusercontent.com/assets/855184/14644114/86f4eb30-0616-11e6-835c-21cb94db9eb4.png) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `awscli/compat.py` Content: ``` 1 # Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 3 # Licensed under the Apache License, Version 2.0 (the "License"). You 4 # may not use this file except in compliance with the License. A copy of 5 # the License is located at 6 7 # http://aws.amazon.com/apache2.0/ 8 9 # or in the "license" file accompanying this file. This file is 10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 11 # ANY KIND, either express or implied. See the License for the specific 12 # language governing permissions and limitations under the License. 13 import sys 14 import os 15 import zipfile 16 17 from botocore.compat import six 18 #import botocore.compat 19 20 # If you ever want to import from the vendored six. Add it here and then 21 # import from awscli.compat. Also try to keep it in alphabetical order. 22 # This may get large. 23 advance_iterator = six.advance_iterator 24 PY3 = six.PY3 25 queue = six.moves.queue 26 shlex_quote = six.moves.shlex_quote 27 StringIO = six.StringIO 28 urlopen = six.moves.urllib.request.urlopen 29 30 # Most, but not all, python installations will have zlib. This is required to 31 # compress any files we send via a push. If we can't compress, we can still 32 # package the files in a zip container. 33 try: 34 import zlib 35 ZIP_COMPRESSION_MODE = zipfile.ZIP_DEFLATED 36 except ImportError: 37 ZIP_COMPRESSION_MODE = zipfile.ZIP_STORED 38 39 40 class BinaryStdout(object): 41 def __enter__(self): 42 if sys.platform == "win32": 43 import msvcrt 44 self.previous_mode = msvcrt.setmode(sys.stdout.fileno(), 45 os.O_BINARY) 46 return sys.stdout 47 48 def __exit__(self, type, value, traceback): 49 if sys.platform == "win32": 50 import msvcrt 51 msvcrt.setmode(sys.stdout.fileno(), self.previous_mode) 52 53 54 if six.PY3: 55 import locale 56 import urllib.parse as urlparse 57 58 from urllib.error import URLError 59 60 raw_input = input 61 62 def get_stdout_text_writer(): 63 return sys.stdout 64 65 def compat_open(filename, mode='r', encoding=None): 66 """Back-port open() that accepts an encoding argument. 67 68 In python3 this uses the built in open() and in python2 this 69 uses the io.open() function. 70 71 If the file is not being opened in binary mode, then we'll 72 use locale.getpreferredencoding() to find the preferred 73 encoding. 74 75 """ 76 if 'b' not in mode: 77 encoding = locale.getpreferredencoding() 78 return open(filename, mode, encoding=encoding) 79 80 else: 81 import codecs 82 import locale 83 import io 84 import urlparse 85 86 from urllib2 import URLError 87 88 raw_input = raw_input 89 90 def get_stdout_text_writer(): 91 # In python3, all the sys.stdout/sys.stderr streams are in text 92 # mode. This means they expect unicode, and will encode the 93 # unicode automatically before actually writing to stdout/stderr. 94 # In python2, that's not the case. In order to provide a consistent 95 # interface, we can create a wrapper around sys.stdout that will take 96 # unicode, and automatically encode it to the preferred encoding. 97 # That way consumers can just call get_stdout_text_writer() and write 98 # unicode to the returned stream. Note that get_stdout_text_writer 99 # just returns sys.stdout in the PY3 section above because python3 100 # handles this. 101 return codecs.getwriter(locale.getpreferredencoding())(sys.stdout) 102 103 def compat_open(filename, mode='r', encoding=None): 104 # See docstring for compat_open in the PY3 section above. 105 if 'b' not in mode: 106 encoding = locale.getpreferredencoding() 107 return io.open(filename, mode, encoding=encoding) 108 ``` Path: `awscli/customizations/configure/configure.py` Content: ``` 1 # Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"). You 4 # may not use this file except in compliance with the License. A copy of 5 # the License is located at 6 # 7 # http://aws.amazon.com/apache2.0/ 8 # 9 # or in the "license" file accompanying this file. This file is 10 # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 11 # ANY KIND, either express or implied. See the License for the specific 12 # language governing permissions and limitations under the License. 13 import os 14 import logging 15 16 from botocore.exceptions import ProfileNotFound 17 18 from awscli.compat import raw_input 19 from awscli.customizations.commands import BasicCommand 20 from awscli.customizations.configure.addmodel import AddModelCommand 21 from awscli.customizations.configure.set import ConfigureSetCommand 22 from awscli.customizations.configure.get import ConfigureGetCommand 23 from awscli.customizations.configure.list import ConfigureListCommand 24 from awscli.customizations.configure.writer import ConfigFileWriter 25 26 from . import mask_value 27 28 29 logger = logging.getLogger(__name__) 30 31 32 def register_configure_cmd(cli): 33 cli.register('building-command-table.main', 34 ConfigureCommand.add_command) 35 36 37 class InteractivePrompter(object): 38 39 def get_value(self, current_value, config_name, prompt_text=''): 40 if config_name in ('aws_access_key_id', 'aws_secret_access_key'): 41 current_value = mask_value(current_value) 42 response = raw_input("%s [%s]: " % (prompt_text, current_value)) 43 if not response: 44 # If the user hits enter, we return a value of None 45 # instead of an empty string. That way we can determine 46 # whether or not a value has changed. 47 response = None 48 return response 49 50 51 class ConfigureCommand(BasicCommand): 52 NAME = 'configure' 53 DESCRIPTION = BasicCommand.FROM_FILE() 54 SYNOPSIS = ('aws configure [--profile profile-name]') 55 EXAMPLES = ( 56 'To create a new configuration::\n' 57 '\n' 58 ' $ aws configure\n' 59 ' AWS Access Key ID [None]: accesskey\n' 60 ' AWS Secret Access Key [None]: secretkey\n' 61 ' Default region name [None]: us-west-2\n' 62 ' Default output format [None]:\n' 63 '\n' 64 'To update just the region name::\n' 65 '\n' 66 ' $ aws configure\n' 67 ' AWS Access Key ID [****]:\n' 68 ' AWS Secret Access Key [****]:\n' 69 ' Default region name [us-west-1]: us-west-2\n' 70 ' Default output format [None]:\n' 71 ) 72 SUBCOMMANDS = [ 73 {'name': 'list', 'command_class': ConfigureListCommand}, 74 {'name': 'get', 'command_class': ConfigureGetCommand}, 75 {'name': 'set', 'command_class': ConfigureSetCommand}, 76 {'name': 'add-model', 'command_class': AddModelCommand} 77 ] 78 79 # If you want to add new values to prompt, update this list here. 80 VALUES_TO_PROMPT = [ 81 # (logical_name, config_name, prompt_text) 82 ('aws_access_key_id', "AWS Access Key ID"), 83 ('aws_secret_access_key', "AWS Secret Access Key"), 84 ('region', "Default region name"), 85 ('output', "Default output format"), 86 ] 87 88 def __init__(self, session, prompter=None, config_writer=None): 89 super(ConfigureCommand, self).__init__(session) 90 if prompter is None: 91 prompter = InteractivePrompter() 92 self._prompter = prompter 93 if config_writer is None: 94 config_writer = ConfigFileWriter() 95 self._config_writer = config_writer 96 97 def _run_main(self, parsed_args, parsed_globals): 98 # Called when invoked with no args "aws configure" 99 new_values = {} 100 # This is the config from the config file scoped to a specific 101 # profile. 102 try: 103 config = self._session.get_scoped_config() 104 except ProfileNotFound: 105 config = {} 106 for config_name, prompt_text in self.VALUES_TO_PROMPT: 107 current_value = config.get(config_name) 108 new_value = self._prompter.get_value(current_value, config_name, 109 prompt_text) 110 if new_value is not None and new_value != current_value: 111 new_values[config_name] = new_value 112 config_filename = os.path.expanduser( 113 self._session.get_config_variable('config_file')) 114 if new_values: 115 self._write_out_creds_file_values(new_values, 116 parsed_globals.profile) 117 if parsed_globals.profile is not None: 118 new_values['__section__'] = ( 119 'profile %s' % parsed_globals.profile) 120 self._config_writer.update_config(new_values, config_filename) 121 122 def _write_out_creds_file_values(self, new_values, profile_name): 123 # The access_key/secret_key are now *always* written to the shared 124 # credentials file (~/.aws/credentials), see aws/aws-cli#847. 125 # post-conditions: ~/.aws/credentials will have the updated credential 126 # file values and new_values will have the cred vars removed. 127 credential_file_values = {} 128 if 'aws_access_key_id' in new_values: 129 credential_file_values['aws_access_key_id'] = new_values.pop( 130 'aws_access_key_id') 131 if 'aws_secret_access_key' in new_values: 132 credential_file_values['aws_secret_access_key'] = new_values.pop( 133 'aws_secret_access_key') 134 if credential_file_values: 135 if profile_name is not None: 136 credential_file_values['__section__'] = profile_name 137 shared_credentials_filename = os.path.expanduser( 138 self._session.get_config_variable('credentials_file')) 139 self._config_writer.update_config( 140 credential_file_values, 141 shared_credentials_filename) 142 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/awscli/compat.py b/awscli/compat.py --- a/awscli/compat.py +++ b/awscli/compat.py @@ -105,3 +105,20 @@ if 'b' not in mode: encoding = locale.getpreferredencoding() return io.open(filename, mode, encoding=encoding) + + +def compat_input(prompt): + """ + Cygwin's pty's are based on pipes. Therefore, when it interacts with a Win32 + program (such as Win32 python), what that program sees is a pipe instead of + a console. This is important because python buffers pipes, and so on a + pty-based terminal, text will not necessarily appear immediately. In most + cases, this isn't a big deal. But when we're doing an interactive prompt, + the result is that the prompts won't display until we fill the buffer. Since + raw_input does not flush the prompt, we need to manually write and flush it. + + See https://github.com/mintty/mintty/issues/56 for more details. + """ + sys.stdout.write(prompt) + sys.stdout.flush() + return raw_input() diff --git a/awscli/customizations/configure/configure.py b/awscli/customizations/configure/configure.py --- a/awscli/customizations/configure/configure.py +++ b/awscli/customizations/configure/configure.py @@ -15,7 +15,7 @@ from botocore.exceptions import ProfileNotFound -from awscli.compat import raw_input +from awscli.compat import compat_input from awscli.customizations.commands import BasicCommand from awscli.customizations.configure.addmodel import AddModelCommand from awscli.customizations.configure.set import ConfigureSetCommand @@ -39,7 +39,7 @@ def get_value(self, current_value, config_name, prompt_text=''): if config_name in ('aws_access_key_id', 'aws_secret_access_key'): current_value = mask_value(current_value) - response = raw_input("%s [%s]: " % (prompt_text, current_value)) + response = compat_input("%s [%s]: " % (prompt_text, current_value)) if not response: # If the user hits enter, we return a value of None # instead of an empty string. That way we can determine
{"golden_diff": "diff --git a/awscli/compat.py b/awscli/compat.py\n--- a/awscli/compat.py\n+++ b/awscli/compat.py\n@@ -105,3 +105,20 @@\n if 'b' not in mode:\n encoding = locale.getpreferredencoding()\n return io.open(filename, mode, encoding=encoding)\n+\n+\n+def compat_input(prompt):\n+ \"\"\"\n+ Cygwin's pty's are based on pipes. Therefore, when it interacts with a Win32\n+ program (such as Win32 python), what that program sees is a pipe instead of\n+ a console. This is important because python buffers pipes, and so on a\n+ pty-based terminal, text will not necessarily appear immediately. In most\n+ cases, this isn't a big deal. But when we're doing an interactive prompt,\n+ the result is that the prompts won't display until we fill the buffer. Since\n+ raw_input does not flush the prompt, we need to manually write and flush it.\n+\n+ See https://github.com/mintty/mintty/issues/56 for more details.\n+ \"\"\"\n+ sys.stdout.write(prompt)\n+ sys.stdout.flush()\n+ return raw_input()\ndiff --git a/awscli/customizations/configure/configure.py b/awscli/customizations/configure/configure.py\n--- a/awscli/customizations/configure/configure.py\n+++ b/awscli/customizations/configure/configure.py\n@@ -15,7 +15,7 @@\n \n from botocore.exceptions import ProfileNotFound\n \n-from awscli.compat import raw_input\n+from awscli.compat import compat_input\n from awscli.customizations.commands import BasicCommand\n from awscli.customizations.configure.addmodel import AddModelCommand\n from awscli.customizations.configure.set import ConfigureSetCommand\n@@ -39,7 +39,7 @@\n def get_value(self, current_value, config_name, prompt_text=''):\n if config_name in ('aws_access_key_id', 'aws_secret_access_key'):\n current_value = mask_value(current_value)\n- response = raw_input(\"%s [%s]: \" % (prompt_text, current_value))\n+ response = compat_input(\"%s [%s]: \" % (prompt_text, current_value))\n if not response:\n # If the user hits enter, we return a value of None\n # instead of an empty string. That way we can determine\n", "issue": "Not properly outputting in MINGW\nSystem: Windows 7 64bit\n\nsteps to reproduce:\n1. Open mingw64 (git bash) \n2. Run `aws configure`\n3. Observe no output\n4. Hit enter 4 times\n5. Observe output\n\nIt looks as if AWS CLI is thinking it is outputting text however it actually isn't.\n\n![image](https://cloud.githubusercontent.com/assets/855184/14644114/86f4eb30-0616-11e6-835c-21cb94db9eb4.png)\n\n", "before_files": [{"content": "# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n\n# http://aws.amazon.com/apache2.0/\n\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport sys\nimport os\nimport zipfile\n\nfrom botocore.compat import six\n#import botocore.compat\n\n# If you ever want to import from the vendored six. Add it here and then\n# import from awscli.compat. Also try to keep it in alphabetical order.\n# This may get large.\nadvance_iterator = six.advance_iterator\nPY3 = six.PY3\nqueue = six.moves.queue\nshlex_quote = six.moves.shlex_quote\nStringIO = six.StringIO\nurlopen = six.moves.urllib.request.urlopen\n\n# Most, but not all, python installations will have zlib. This is required to\n# compress any files we send via a push. If we can't compress, we can still\n# package the files in a zip container.\ntry:\n import zlib\n ZIP_COMPRESSION_MODE = zipfile.ZIP_DEFLATED\nexcept ImportError:\n ZIP_COMPRESSION_MODE = zipfile.ZIP_STORED\n\n\nclass BinaryStdout(object):\n def __enter__(self):\n if sys.platform == \"win32\":\n import msvcrt\n self.previous_mode = msvcrt.setmode(sys.stdout.fileno(),\n os.O_BINARY)\n return sys.stdout\n\n def __exit__(self, type, value, traceback):\n if sys.platform == \"win32\":\n import msvcrt\n msvcrt.setmode(sys.stdout.fileno(), self.previous_mode) \n\n\nif six.PY3:\n import locale\n import urllib.parse as urlparse\n\n from urllib.error import URLError\n\n raw_input = input\n\n def get_stdout_text_writer():\n return sys.stdout\n\n def compat_open(filename, mode='r', encoding=None):\n \"\"\"Back-port open() that accepts an encoding argument.\n\n In python3 this uses the built in open() and in python2 this\n uses the io.open() function.\n\n If the file is not being opened in binary mode, then we'll\n use locale.getpreferredencoding() to find the preferred\n encoding.\n\n \"\"\"\n if 'b' not in mode:\n encoding = locale.getpreferredencoding()\n return open(filename, mode, encoding=encoding)\n\nelse:\n import codecs\n import locale\n import io\n import urlparse\n\n from urllib2 import URLError\n\n raw_input = raw_input\n\n def get_stdout_text_writer():\n # In python3, all the sys.stdout/sys.stderr streams are in text\n # mode. This means they expect unicode, and will encode the\n # unicode automatically before actually writing to stdout/stderr.\n # In python2, that's not the case. In order to provide a consistent\n # interface, we can create a wrapper around sys.stdout that will take\n # unicode, and automatically encode it to the preferred encoding.\n # That way consumers can just call get_stdout_text_writer() and write\n # unicode to the returned stream. Note that get_stdout_text_writer\n # just returns sys.stdout in the PY3 section above because python3\n # handles this.\n return codecs.getwriter(locale.getpreferredencoding())(sys.stdout)\n\n def compat_open(filename, mode='r', encoding=None):\n # See docstring for compat_open in the PY3 section above.\n if 'b' not in mode:\n encoding = locale.getpreferredencoding()\n return io.open(filename, mode, encoding=encoding)\n", "path": "awscli/compat.py"}, {"content": "# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport os\nimport logging\n\nfrom botocore.exceptions import ProfileNotFound\n\nfrom awscli.compat import raw_input\nfrom awscli.customizations.commands import BasicCommand\nfrom awscli.customizations.configure.addmodel import AddModelCommand\nfrom awscli.customizations.configure.set import ConfigureSetCommand\nfrom awscli.customizations.configure.get import ConfigureGetCommand\nfrom awscli.customizations.configure.list import ConfigureListCommand\nfrom awscli.customizations.configure.writer import ConfigFileWriter\n\nfrom . import mask_value\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef register_configure_cmd(cli):\n cli.register('building-command-table.main',\n ConfigureCommand.add_command)\n\n\nclass InteractivePrompter(object):\n\n def get_value(self, current_value, config_name, prompt_text=''):\n if config_name in ('aws_access_key_id', 'aws_secret_access_key'):\n current_value = mask_value(current_value)\n response = raw_input(\"%s [%s]: \" % (prompt_text, current_value))\n if not response:\n # If the user hits enter, we return a value of None\n # instead of an empty string. That way we can determine\n # whether or not a value has changed.\n response = None\n return response\n\n\nclass ConfigureCommand(BasicCommand):\n NAME = 'configure'\n DESCRIPTION = BasicCommand.FROM_FILE()\n SYNOPSIS = ('aws configure [--profile profile-name]')\n EXAMPLES = (\n 'To create a new configuration::\\n'\n '\\n'\n ' $ aws configure\\n'\n ' AWS Access Key ID [None]: accesskey\\n'\n ' AWS Secret Access Key [None]: secretkey\\n'\n ' Default region name [None]: us-west-2\\n'\n ' Default output format [None]:\\n'\n '\\n'\n 'To update just the region name::\\n'\n '\\n'\n ' $ aws configure\\n'\n ' AWS Access Key ID [****]:\\n'\n ' AWS Secret Access Key [****]:\\n'\n ' Default region name [us-west-1]: us-west-2\\n'\n ' Default output format [None]:\\n'\n )\n SUBCOMMANDS = [\n {'name': 'list', 'command_class': ConfigureListCommand},\n {'name': 'get', 'command_class': ConfigureGetCommand},\n {'name': 'set', 'command_class': ConfigureSetCommand},\n {'name': 'add-model', 'command_class': AddModelCommand}\n ]\n\n # If you want to add new values to prompt, update this list here.\n VALUES_TO_PROMPT = [\n # (logical_name, config_name, prompt_text)\n ('aws_access_key_id', \"AWS Access Key ID\"),\n ('aws_secret_access_key', \"AWS Secret Access Key\"),\n ('region', \"Default region name\"),\n ('output', \"Default output format\"),\n ]\n\n def __init__(self, session, prompter=None, config_writer=None):\n super(ConfigureCommand, self).__init__(session)\n if prompter is None:\n prompter = InteractivePrompter()\n self._prompter = prompter\n if config_writer is None:\n config_writer = ConfigFileWriter()\n self._config_writer = config_writer\n\n def _run_main(self, parsed_args, parsed_globals):\n # Called when invoked with no args \"aws configure\"\n new_values = {}\n # This is the config from the config file scoped to a specific\n # profile.\n try:\n config = self._session.get_scoped_config()\n except ProfileNotFound:\n config = {}\n for config_name, prompt_text in self.VALUES_TO_PROMPT:\n current_value = config.get(config_name)\n new_value = self._prompter.get_value(current_value, config_name,\n prompt_text)\n if new_value is not None and new_value != current_value:\n new_values[config_name] = new_value\n config_filename = os.path.expanduser(\n self._session.get_config_variable('config_file'))\n if new_values:\n self._write_out_creds_file_values(new_values,\n parsed_globals.profile)\n if parsed_globals.profile is not None:\n new_values['__section__'] = (\n 'profile %s' % parsed_globals.profile)\n self._config_writer.update_config(new_values, config_filename)\n\n def _write_out_creds_file_values(self, new_values, profile_name):\n # The access_key/secret_key are now *always* written to the shared\n # credentials file (~/.aws/credentials), see aws/aws-cli#847.\n # post-conditions: ~/.aws/credentials will have the updated credential\n # file values and new_values will have the cred vars removed.\n credential_file_values = {}\n if 'aws_access_key_id' in new_values:\n credential_file_values['aws_access_key_id'] = new_values.pop(\n 'aws_access_key_id')\n if 'aws_secret_access_key' in new_values:\n credential_file_values['aws_secret_access_key'] = new_values.pop(\n 'aws_secret_access_key')\n if credential_file_values:\n if profile_name is not None:\n credential_file_values['__section__'] = profile_name\n shared_credentials_filename = os.path.expanduser(\n self._session.get_config_variable('credentials_file'))\n self._config_writer.update_config(\n credential_file_values,\n shared_credentials_filename)\n", "path": "awscli/customizations/configure/configure.py"}], "after_files": [{"content": "# Copyright 2012-2013 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n\n# http://aws.amazon.com/apache2.0/\n\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport sys\nimport os\nimport zipfile\n\nfrom botocore.compat import six\n#import botocore.compat\n\n# If you ever want to import from the vendored six. Add it here and then\n# import from awscli.compat. Also try to keep it in alphabetical order.\n# This may get large.\nadvance_iterator = six.advance_iterator\nPY3 = six.PY3\nqueue = six.moves.queue\nshlex_quote = six.moves.shlex_quote\nStringIO = six.StringIO\nurlopen = six.moves.urllib.request.urlopen\n\n# Most, but not all, python installations will have zlib. This is required to\n# compress any files we send via a push. If we can't compress, we can still\n# package the files in a zip container.\ntry:\n import zlib\n ZIP_COMPRESSION_MODE = zipfile.ZIP_DEFLATED\nexcept ImportError:\n ZIP_COMPRESSION_MODE = zipfile.ZIP_STORED\n\n\nclass BinaryStdout(object):\n def __enter__(self):\n if sys.platform == \"win32\":\n import msvcrt\n self.previous_mode = msvcrt.setmode(sys.stdout.fileno(),\n os.O_BINARY)\n return sys.stdout\n\n def __exit__(self, type, value, traceback):\n if sys.platform == \"win32\":\n import msvcrt\n msvcrt.setmode(sys.stdout.fileno(), self.previous_mode) \n\n\nif six.PY3:\n import locale\n import urllib.parse as urlparse\n\n from urllib.error import URLError\n\n raw_input = input\n\n def get_stdout_text_writer():\n return sys.stdout\n\n def compat_open(filename, mode='r', encoding=None):\n \"\"\"Back-port open() that accepts an encoding argument.\n\n In python3 this uses the built in open() and in python2 this\n uses the io.open() function.\n\n If the file is not being opened in binary mode, then we'll\n use locale.getpreferredencoding() to find the preferred\n encoding.\n\n \"\"\"\n if 'b' not in mode:\n encoding = locale.getpreferredencoding()\n return open(filename, mode, encoding=encoding)\n\nelse:\n import codecs\n import locale\n import io\n import urlparse\n\n from urllib2 import URLError\n\n raw_input = raw_input\n\n def get_stdout_text_writer():\n # In python3, all the sys.stdout/sys.stderr streams are in text\n # mode. This means they expect unicode, and will encode the\n # unicode automatically before actually writing to stdout/stderr.\n # In python2, that's not the case. In order to provide a consistent\n # interface, we can create a wrapper around sys.stdout that will take\n # unicode, and automatically encode it to the preferred encoding.\n # That way consumers can just call get_stdout_text_writer() and write\n # unicode to the returned stream. Note that get_stdout_text_writer\n # just returns sys.stdout in the PY3 section above because python3\n # handles this.\n return codecs.getwriter(locale.getpreferredencoding())(sys.stdout)\n\n def compat_open(filename, mode='r', encoding=None):\n # See docstring for compat_open in the PY3 section above.\n if 'b' not in mode:\n encoding = locale.getpreferredencoding()\n return io.open(filename, mode, encoding=encoding)\n\n\ndef compat_input(prompt):\n \"\"\"\n Cygwin's pty's are based on pipes. Therefore, when it interacts with a Win32\n program (such as Win32 python), what that program sees is a pipe instead of\n a console. This is important because python buffers pipes, and so on a\n pty-based terminal, text will not necessarily appear immediately. In most\n cases, this isn't a big deal. But when we're doing an interactive prompt,\n the result is that the prompts won't display until we fill the buffer. Since\n raw_input does not flush the prompt, we need to manually write and flush it.\n\n See https://github.com/mintty/mintty/issues/56 for more details.\n \"\"\"\n sys.stdout.write(prompt)\n sys.stdout.flush()\n return raw_input()\n", "path": "awscli/compat.py"}, {"content": "# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You\n# may not use this file except in compliance with the License. A copy of\n# the License is located at\n#\n# http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF\n# ANY KIND, either express or implied. See the License for the specific\n# language governing permissions and limitations under the License.\nimport os\nimport logging\n\nfrom botocore.exceptions import ProfileNotFound\n\nfrom awscli.compat import compat_input\nfrom awscli.customizations.commands import BasicCommand\nfrom awscli.customizations.configure.addmodel import AddModelCommand\nfrom awscli.customizations.configure.set import ConfigureSetCommand\nfrom awscli.customizations.configure.get import ConfigureGetCommand\nfrom awscli.customizations.configure.list import ConfigureListCommand\nfrom awscli.customizations.configure.writer import ConfigFileWriter\n\nfrom . import mask_value\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef register_configure_cmd(cli):\n cli.register('building-command-table.main',\n ConfigureCommand.add_command)\n\n\nclass InteractivePrompter(object):\n\n def get_value(self, current_value, config_name, prompt_text=''):\n if config_name in ('aws_access_key_id', 'aws_secret_access_key'):\n current_value = mask_value(current_value)\n response = compat_input(\"%s [%s]: \" % (prompt_text, current_value))\n if not response:\n # If the user hits enter, we return a value of None\n # instead of an empty string. That way we can determine\n # whether or not a value has changed.\n response = None\n return response\n\n\nclass ConfigureCommand(BasicCommand):\n NAME = 'configure'\n DESCRIPTION = BasicCommand.FROM_FILE()\n SYNOPSIS = ('aws configure [--profile profile-name]')\n EXAMPLES = (\n 'To create a new configuration::\\n'\n '\\n'\n ' $ aws configure\\n'\n ' AWS Access Key ID [None]: accesskey\\n'\n ' AWS Secret Access Key [None]: secretkey\\n'\n ' Default region name [None]: us-west-2\\n'\n ' Default output format [None]:\\n'\n '\\n'\n 'To update just the region name::\\n'\n '\\n'\n ' $ aws configure\\n'\n ' AWS Access Key ID [****]:\\n'\n ' AWS Secret Access Key [****]:\\n'\n ' Default region name [us-west-1]: us-west-2\\n'\n ' Default output format [None]:\\n'\n )\n SUBCOMMANDS = [\n {'name': 'list', 'command_class': ConfigureListCommand},\n {'name': 'get', 'command_class': ConfigureGetCommand},\n {'name': 'set', 'command_class': ConfigureSetCommand},\n {'name': 'add-model', 'command_class': AddModelCommand}\n ]\n\n # If you want to add new values to prompt, update this list here.\n VALUES_TO_PROMPT = [\n # (logical_name, config_name, prompt_text)\n ('aws_access_key_id', \"AWS Access Key ID\"),\n ('aws_secret_access_key', \"AWS Secret Access Key\"),\n ('region', \"Default region name\"),\n ('output', \"Default output format\"),\n ]\n\n def __init__(self, session, prompter=None, config_writer=None):\n super(ConfigureCommand, self).__init__(session)\n if prompter is None:\n prompter = InteractivePrompter()\n self._prompter = prompter\n if config_writer is None:\n config_writer = ConfigFileWriter()\n self._config_writer = config_writer\n\n def _run_main(self, parsed_args, parsed_globals):\n # Called when invoked with no args \"aws configure\"\n new_values = {}\n # This is the config from the config file scoped to a specific\n # profile.\n try:\n config = self._session.get_scoped_config()\n except ProfileNotFound:\n config = {}\n for config_name, prompt_text in self.VALUES_TO_PROMPT:\n current_value = config.get(config_name)\n new_value = self._prompter.get_value(current_value, config_name,\n prompt_text)\n if new_value is not None and new_value != current_value:\n new_values[config_name] = new_value\n config_filename = os.path.expanduser(\n self._session.get_config_variable('config_file'))\n if new_values:\n self._write_out_creds_file_values(new_values,\n parsed_globals.profile)\n if parsed_globals.profile is not None:\n new_values['__section__'] = (\n 'profile %s' % parsed_globals.profile)\n self._config_writer.update_config(new_values, config_filename)\n\n def _write_out_creds_file_values(self, new_values, profile_name):\n # The access_key/secret_key are now *always* written to the shared\n # credentials file (~/.aws/credentials), see aws/aws-cli#847.\n # post-conditions: ~/.aws/credentials will have the updated credential\n # file values and new_values will have the cred vars removed.\n credential_file_values = {}\n if 'aws_access_key_id' in new_values:\n credential_file_values['aws_access_key_id'] = new_values.pop(\n 'aws_access_key_id')\n if 'aws_secret_access_key' in new_values:\n credential_file_values['aws_secret_access_key'] = new_values.pop(\n 'aws_secret_access_key')\n if credential_file_values:\n if profile_name is not None:\n credential_file_values['__section__'] = profile_name\n shared_credentials_filename = os.path.expanduser(\n self._session.get_config_variable('credentials_file'))\n self._config_writer.update_config(\n credential_file_values,\n shared_credentials_filename)\n", "path": "awscli/customizations/configure/configure.py"}]}
3,099
520
gh_patches_debug_8530
rasdani/github-patches
git_diff
dbt-labs__dbt-core-2509
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- DBT 0.17.0rc3 - Compilation failure regression ### Describe the bug When trying to compile a project having some schema tests using `dbt_utils.equality` the compilation fails because the model does not exists. However calling DBT run works. The compilation failure happens when using DBT 0.17.0rc3, however compiling the same project using DBT 0.16.1 succeeds. ### Steps To Reproduce The following ZIP file includes a dummy project to reproduce the issue. [dummy.zip](https://github.com/fishtown-analytics/dbt/files/4705434/dummy.zip) You will need to configure a profile for it though. Extract the project and run: ``` dbt deps dbt compile ``` ### Expected behavior The compilation succeeds on both DBT 0.16.1 and 0.17.0rc3 ### Actual behavior The compilation succeeds for DBT 0.16.1 and fails for 0.17.0rc3 with the following error: ``` Encountered an error: Runtime Error Database Error in test dbt_utils_equality_my_second_dbt_model_id__ref_my_first_dbt_model_ (models/example/schema.yml) 002003 (42S02): SQL compilation error: Table 'DB.SCHEMA.MY_SECOND_DBT_MODEL' does not exist or not authorized. ``` ### System information **Which database are you using dbt with?** - [ ] postgres - [ ] redshift - [ ] bigquery - [x] snowflake - [ ] other (specify: ____________) **The output of `dbt --version`:** ``` dbt --version installed version: 0.17.0-rc3 latest version: 0.16.1 Your version of dbt is ahead of the latest release! Plugins: - snowflake: 0.17.0rc3 - postgres: 0.17.0rc3 - redshift: 0.17.0rc3 - bigquery: 0.17.0rc3 ``` **The operating system you're using:** Linux Mint 19.3 **The output of `python --version`:** Python 3.6.9 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `plugins/snowflake/dbt/adapters/snowflake/impl.py` Content: ``` 1 from dataclasses import dataclass 2 from typing import Mapping, Any, Optional, List, Union 3 4 import agate 5 6 from dbt.adapters.base.impl import AdapterConfig 7 from dbt.adapters.sql import SQLAdapter 8 from dbt.adapters.sql.impl import ( 9 LIST_SCHEMAS_MACRO_NAME, 10 LIST_RELATIONS_MACRO_NAME, 11 ) 12 from dbt.adapters.snowflake import SnowflakeConnectionManager 13 from dbt.adapters.snowflake import SnowflakeRelation 14 from dbt.adapters.snowflake import SnowflakeColumn 15 from dbt.contracts.graph.manifest import Manifest 16 from dbt.exceptions import RuntimeException, DatabaseException 17 from dbt.utils import filter_null_values 18 19 20 @dataclass 21 class SnowflakeConfig(AdapterConfig): 22 transient: Optional[bool] = None 23 cluster_by: Optional[Union[str, List[str]]] = None 24 automatic_clustering: Optional[bool] = None 25 secure: Optional[bool] = None 26 copy_grants: Optional[bool] = None 27 snowflake_warehouse: Optional[str] = None 28 29 30 class SnowflakeAdapter(SQLAdapter): 31 Relation = SnowflakeRelation 32 Column = SnowflakeColumn 33 ConnectionManager = SnowflakeConnectionManager 34 35 AdapterSpecificConfigs = SnowflakeConfig 36 37 @classmethod 38 def date_function(cls): 39 return "CURRENT_TIMESTAMP()" 40 41 @classmethod 42 def _catalog_filter_table( 43 cls, table: agate.Table, manifest: Manifest 44 ) -> agate.Table: 45 # On snowflake, users can set QUOTED_IDENTIFIERS_IGNORE_CASE, so force 46 # the column names to their lowercased forms. 47 lowered = table.rename( 48 column_names=[c.lower() for c in table.column_names] 49 ) 50 return super()._catalog_filter_table(lowered, manifest) 51 52 def _make_match_kwargs(self, database, schema, identifier): 53 quoting = self.config.quoting 54 if identifier is not None and quoting["identifier"] is False: 55 identifier = identifier.upper() 56 57 if schema is not None and quoting["schema"] is False: 58 schema = schema.upper() 59 60 if database is not None and quoting["database"] is False: 61 database = database.upper() 62 63 return filter_null_values( 64 {"identifier": identifier, "schema": schema, "database": database} 65 ) 66 67 def _get_warehouse(self) -> str: 68 _, table = self.execute( 69 'select current_warehouse() as warehouse', 70 fetch=True 71 ) 72 if len(table) == 0 or len(table[0]) == 0: 73 # can this happen? 74 raise RuntimeException( 75 'Could not get current warehouse: no results' 76 ) 77 return str(table[0][0]) 78 79 def _use_warehouse(self, warehouse: str): 80 """Use the given warehouse. Quotes are never applied.""" 81 self.execute('use warehouse {}'.format(warehouse)) 82 83 def pre_model_hook(self, config: Mapping[str, Any]) -> Optional[str]: 84 default_warehouse = self.config.credentials.warehouse 85 warehouse = config.get('snowflake_warehouse', default_warehouse) 86 if warehouse == default_warehouse or warehouse is None: 87 return None 88 previous = self._get_warehouse() 89 self._use_warehouse(warehouse) 90 return previous 91 92 def post_model_hook( 93 self, config: Mapping[str, Any], context: Optional[str] 94 ) -> None: 95 if context is not None: 96 self._use_warehouse(context) 97 98 def list_schemas(self, database: str) -> List[str]: 99 try: 100 results = self.execute_macro( 101 LIST_SCHEMAS_MACRO_NAME, 102 kwargs={'database': database} 103 ) 104 except DatabaseException as exc: 105 msg = ( 106 f'Database error while listing schemas in database ' 107 f'"{database}"\n{exc}' 108 ) 109 raise RuntimeException(msg) 110 # this uses 'show terse schemas in database', and the column name we 111 # want is 'name' 112 113 return [row['name'] for row in results] 114 115 def list_relations_without_caching( 116 self, schema_relation: SnowflakeRelation 117 ) -> List[SnowflakeRelation]: 118 kwargs = {'schema_relation': schema_relation} 119 try: 120 results = self.execute_macro( 121 LIST_RELATIONS_MACRO_NAME, 122 kwargs=kwargs 123 ) 124 except DatabaseException as exc: 125 # if the schema doesn't exist, we just want to return. 126 # Alternatively, we could query the list of schemas before we start 127 # and skip listing the missing ones, which sounds expensive. 128 if 'Object does not exist' in str(exc): 129 return [] 130 raise 131 132 relations = [] 133 quote_policy = { 134 'database': True, 135 'schema': True, 136 'identifier': True 137 } 138 139 columns = ['database_name', 'schema_name', 'name', 'kind'] 140 for _database, _schema, _identifier, _type in results.select(columns): 141 try: 142 _type = self.Relation.get_relation_type(_type.lower()) 143 except ValueError: 144 _type = self.Relation.External 145 relations.append(self.Relation.create( 146 database=_database, 147 schema=_schema, 148 identifier=_identifier, 149 quote_policy=quote_policy, 150 type=_type 151 )) 152 153 return relations 154 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/plugins/snowflake/dbt/adapters/snowflake/impl.py b/plugins/snowflake/dbt/adapters/snowflake/impl.py --- a/plugins/snowflake/dbt/adapters/snowflake/impl.py +++ b/plugins/snowflake/dbt/adapters/snowflake/impl.py @@ -112,6 +112,15 @@ return [row['name'] for row in results] + def get_columns_in_relation(self, relation): + try: + return super().get_columns_in_relation(relation) + except DatabaseException as exc: + if 'does not exist or not authorized' in str(exc): + return [] + else: + raise + def list_relations_without_caching( self, schema_relation: SnowflakeRelation ) -> List[SnowflakeRelation]:
{"golden_diff": "diff --git a/plugins/snowflake/dbt/adapters/snowflake/impl.py b/plugins/snowflake/dbt/adapters/snowflake/impl.py\n--- a/plugins/snowflake/dbt/adapters/snowflake/impl.py\n+++ b/plugins/snowflake/dbt/adapters/snowflake/impl.py\n@@ -112,6 +112,15 @@\n \n return [row['name'] for row in results]\n \n+ def get_columns_in_relation(self, relation):\n+ try:\n+ return super().get_columns_in_relation(relation)\n+ except DatabaseException as exc:\n+ if 'does not exist or not authorized' in str(exc):\n+ return []\n+ else:\n+ raise\n+\n def list_relations_without_caching(\n self, schema_relation: SnowflakeRelation\n ) -> List[SnowflakeRelation]:\n", "issue": "DBT 0.17.0rc3 - Compilation failure regression\n### Describe the bug\r\n\r\nWhen trying to compile a project having some schema tests using `dbt_utils.equality` the compilation fails because the model does not exists.\r\nHowever calling DBT run works.\r\n\r\nThe compilation failure happens when using DBT 0.17.0rc3, however compiling the same project using DBT 0.16.1 succeeds.\r\n\r\n### Steps To Reproduce\r\n\r\nThe following ZIP file includes a dummy project to reproduce the issue.\r\n[dummy.zip](https://github.com/fishtown-analytics/dbt/files/4705434/dummy.zip)\r\n\r\nYou will need to configure a profile for it though.\r\n\r\nExtract the project and run:\r\n```\r\ndbt deps\r\ndbt compile\r\n```\r\n\r\n\r\n### Expected behavior\r\n\r\nThe compilation succeeds on both DBT 0.16.1 and 0.17.0rc3\r\n\r\n### Actual behavior\r\n\r\nThe compilation succeeds for DBT 0.16.1 and fails for 0.17.0rc3 with the following error:\r\n\r\n```\r\nEncountered an error:\r\nRuntime Error\r\n Database Error in test dbt_utils_equality_my_second_dbt_model_id__ref_my_first_dbt_model_ (models/example/schema.yml)\r\n 002003 (42S02): SQL compilation error:\r\n Table 'DB.SCHEMA.MY_SECOND_DBT_MODEL' does not exist or not authorized.\r\n```\r\n\r\n### System information\r\n**Which database are you using dbt with?**\r\n- [ ] postgres\r\n- [ ] redshift\r\n- [ ] bigquery\r\n- [x] snowflake\r\n- [ ] other (specify: ____________)\r\n\r\n\r\n**The output of `dbt --version`:**\r\n```\r\ndbt --version\r\ninstalled version: 0.17.0-rc3\r\n latest version: 0.16.1\r\n\r\nYour version of dbt is ahead of the latest release!\r\n\r\nPlugins:\r\n - snowflake: 0.17.0rc3\r\n - postgres: 0.17.0rc3\r\n - redshift: 0.17.0rc3\r\n - bigquery: 0.17.0rc3\r\n```\r\n\r\n**The operating system you're using:** Linux Mint 19.3\r\n\r\n\r\n**The output of `python --version`:** Python 3.6.9\n", "before_files": [{"content": "from dataclasses import dataclass\nfrom typing import Mapping, Any, Optional, List, Union\n\nimport agate\n\nfrom dbt.adapters.base.impl import AdapterConfig\nfrom dbt.adapters.sql import SQLAdapter\nfrom dbt.adapters.sql.impl import (\n LIST_SCHEMAS_MACRO_NAME,\n LIST_RELATIONS_MACRO_NAME,\n)\nfrom dbt.adapters.snowflake import SnowflakeConnectionManager\nfrom dbt.adapters.snowflake import SnowflakeRelation\nfrom dbt.adapters.snowflake import SnowflakeColumn\nfrom dbt.contracts.graph.manifest import Manifest\nfrom dbt.exceptions import RuntimeException, DatabaseException\nfrom dbt.utils import filter_null_values\n\n\n@dataclass\nclass SnowflakeConfig(AdapterConfig):\n transient: Optional[bool] = None\n cluster_by: Optional[Union[str, List[str]]] = None\n automatic_clustering: Optional[bool] = None\n secure: Optional[bool] = None\n copy_grants: Optional[bool] = None\n snowflake_warehouse: Optional[str] = None\n\n\nclass SnowflakeAdapter(SQLAdapter):\n Relation = SnowflakeRelation\n Column = SnowflakeColumn\n ConnectionManager = SnowflakeConnectionManager\n\n AdapterSpecificConfigs = SnowflakeConfig\n\n @classmethod\n def date_function(cls):\n return \"CURRENT_TIMESTAMP()\"\n\n @classmethod\n def _catalog_filter_table(\n cls, table: agate.Table, manifest: Manifest\n ) -> agate.Table:\n # On snowflake, users can set QUOTED_IDENTIFIERS_IGNORE_CASE, so force\n # the column names to their lowercased forms.\n lowered = table.rename(\n column_names=[c.lower() for c in table.column_names]\n )\n return super()._catalog_filter_table(lowered, manifest)\n\n def _make_match_kwargs(self, database, schema, identifier):\n quoting = self.config.quoting\n if identifier is not None and quoting[\"identifier\"] is False:\n identifier = identifier.upper()\n\n if schema is not None and quoting[\"schema\"] is False:\n schema = schema.upper()\n\n if database is not None and quoting[\"database\"] is False:\n database = database.upper()\n\n return filter_null_values(\n {\"identifier\": identifier, \"schema\": schema, \"database\": database}\n )\n\n def _get_warehouse(self) -> str:\n _, table = self.execute(\n 'select current_warehouse() as warehouse',\n fetch=True\n )\n if len(table) == 0 or len(table[0]) == 0:\n # can this happen?\n raise RuntimeException(\n 'Could not get current warehouse: no results'\n )\n return str(table[0][0])\n\n def _use_warehouse(self, warehouse: str):\n \"\"\"Use the given warehouse. Quotes are never applied.\"\"\"\n self.execute('use warehouse {}'.format(warehouse))\n\n def pre_model_hook(self, config: Mapping[str, Any]) -> Optional[str]:\n default_warehouse = self.config.credentials.warehouse\n warehouse = config.get('snowflake_warehouse', default_warehouse)\n if warehouse == default_warehouse or warehouse is None:\n return None\n previous = self._get_warehouse()\n self._use_warehouse(warehouse)\n return previous\n\n def post_model_hook(\n self, config: Mapping[str, Any], context: Optional[str]\n ) -> None:\n if context is not None:\n self._use_warehouse(context)\n\n def list_schemas(self, database: str) -> List[str]:\n try:\n results = self.execute_macro(\n LIST_SCHEMAS_MACRO_NAME,\n kwargs={'database': database}\n )\n except DatabaseException as exc:\n msg = (\n f'Database error while listing schemas in database '\n f'\"{database}\"\\n{exc}'\n )\n raise RuntimeException(msg)\n # this uses 'show terse schemas in database', and the column name we\n # want is 'name'\n\n return [row['name'] for row in results]\n\n def list_relations_without_caching(\n self, schema_relation: SnowflakeRelation\n ) -> List[SnowflakeRelation]:\n kwargs = {'schema_relation': schema_relation}\n try:\n results = self.execute_macro(\n LIST_RELATIONS_MACRO_NAME,\n kwargs=kwargs\n )\n except DatabaseException as exc:\n # if the schema doesn't exist, we just want to return.\n # Alternatively, we could query the list of schemas before we start\n # and skip listing the missing ones, which sounds expensive.\n if 'Object does not exist' in str(exc):\n return []\n raise\n\n relations = []\n quote_policy = {\n 'database': True,\n 'schema': True,\n 'identifier': True\n }\n\n columns = ['database_name', 'schema_name', 'name', 'kind']\n for _database, _schema, _identifier, _type in results.select(columns):\n try:\n _type = self.Relation.get_relation_type(_type.lower())\n except ValueError:\n _type = self.Relation.External\n relations.append(self.Relation.create(\n database=_database,\n schema=_schema,\n identifier=_identifier,\n quote_policy=quote_policy,\n type=_type\n ))\n\n return relations\n", "path": "plugins/snowflake/dbt/adapters/snowflake/impl.py"}], "after_files": [{"content": "from dataclasses import dataclass\nfrom typing import Mapping, Any, Optional, List, Union\n\nimport agate\n\nfrom dbt.adapters.base.impl import AdapterConfig\nfrom dbt.adapters.sql import SQLAdapter\nfrom dbt.adapters.sql.impl import (\n LIST_SCHEMAS_MACRO_NAME,\n LIST_RELATIONS_MACRO_NAME,\n)\nfrom dbt.adapters.snowflake import SnowflakeConnectionManager\nfrom dbt.adapters.snowflake import SnowflakeRelation\nfrom dbt.adapters.snowflake import SnowflakeColumn\nfrom dbt.contracts.graph.manifest import Manifest\nfrom dbt.exceptions import RuntimeException, DatabaseException\nfrom dbt.utils import filter_null_values\n\n\n@dataclass\nclass SnowflakeConfig(AdapterConfig):\n transient: Optional[bool] = None\n cluster_by: Optional[Union[str, List[str]]] = None\n automatic_clustering: Optional[bool] = None\n secure: Optional[bool] = None\n copy_grants: Optional[bool] = None\n snowflake_warehouse: Optional[str] = None\n\n\nclass SnowflakeAdapter(SQLAdapter):\n Relation = SnowflakeRelation\n Column = SnowflakeColumn\n ConnectionManager = SnowflakeConnectionManager\n\n AdapterSpecificConfigs = SnowflakeConfig\n\n @classmethod\n def date_function(cls):\n return \"CURRENT_TIMESTAMP()\"\n\n @classmethod\n def _catalog_filter_table(\n cls, table: agate.Table, manifest: Manifest\n ) -> agate.Table:\n # On snowflake, users can set QUOTED_IDENTIFIERS_IGNORE_CASE, so force\n # the column names to their lowercased forms.\n lowered = table.rename(\n column_names=[c.lower() for c in table.column_names]\n )\n return super()._catalog_filter_table(lowered, manifest)\n\n def _make_match_kwargs(self, database, schema, identifier):\n quoting = self.config.quoting\n if identifier is not None and quoting[\"identifier\"] is False:\n identifier = identifier.upper()\n\n if schema is not None and quoting[\"schema\"] is False:\n schema = schema.upper()\n\n if database is not None and quoting[\"database\"] is False:\n database = database.upper()\n\n return filter_null_values(\n {\"identifier\": identifier, \"schema\": schema, \"database\": database}\n )\n\n def _get_warehouse(self) -> str:\n _, table = self.execute(\n 'select current_warehouse() as warehouse',\n fetch=True\n )\n if len(table) == 0 or len(table[0]) == 0:\n # can this happen?\n raise RuntimeException(\n 'Could not get current warehouse: no results'\n )\n return str(table[0][0])\n\n def _use_warehouse(self, warehouse: str):\n \"\"\"Use the given warehouse. Quotes are never applied.\"\"\"\n self.execute('use warehouse {}'.format(warehouse))\n\n def pre_model_hook(self, config: Mapping[str, Any]) -> Optional[str]:\n default_warehouse = self.config.credentials.warehouse\n warehouse = config.get('snowflake_warehouse', default_warehouse)\n if warehouse == default_warehouse or warehouse is None:\n return None\n previous = self._get_warehouse()\n self._use_warehouse(warehouse)\n return previous\n\n def post_model_hook(\n self, config: Mapping[str, Any], context: Optional[str]\n ) -> None:\n if context is not None:\n self._use_warehouse(context)\n\n def list_schemas(self, database: str) -> List[str]:\n try:\n results = self.execute_macro(\n LIST_SCHEMAS_MACRO_NAME,\n kwargs={'database': database}\n )\n except DatabaseException as exc:\n msg = (\n f'Database error while listing schemas in database '\n f'\"{database}\"\\n{exc}'\n )\n raise RuntimeException(msg)\n # this uses 'show terse schemas in database', and the column name we\n # want is 'name'\n\n return [row['name'] for row in results]\n\n def get_columns_in_relation(self, relation):\n try:\n return super().get_columns_in_relation(relation)\n except DatabaseException as exc:\n if 'does not exist or not authorized' in str(exc):\n return []\n else:\n raise\n\n def list_relations_without_caching(\n self, schema_relation: SnowflakeRelation\n ) -> List[SnowflakeRelation]:\n kwargs = {'schema_relation': schema_relation}\n try:\n results = self.execute_macro(\n LIST_RELATIONS_MACRO_NAME,\n kwargs=kwargs\n )\n except DatabaseException as exc:\n # if the schema doesn't exist, we just want to return.\n # Alternatively, we could query the list of schemas before we start\n # and skip listing the missing ones, which sounds expensive.\n if 'Object does not exist' in str(exc):\n return []\n raise\n\n relations = []\n quote_policy = {\n 'database': True,\n 'schema': True,\n 'identifier': True\n }\n\n columns = ['database_name', 'schema_name', 'name', 'kind']\n for _database, _schema, _identifier, _type in results.select(columns):\n try:\n _type = self.Relation.get_relation_type(_type.lower())\n except ValueError:\n _type = self.Relation.External\n relations.append(self.Relation.create(\n database=_database,\n schema=_schema,\n identifier=_identifier,\n quote_policy=quote_policy,\n type=_type\n ))\n\n return relations\n", "path": "plugins/snowflake/dbt/adapters/snowflake/impl.py"}]}
2,263
187
gh_patches_debug_12235
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-2303
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- _OTEL_METRICS_EXPORTER env var should be OTEL_METRICS_EXPORTER The environment variable `_OTEL_METRICS_EXPORTER` is prefixed with an underscore, but there's no need for it as that environment variable is marked as stable in the specification https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/sdk-environment-variables.md#exporter-selection --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `opentelemetry-api/src/opentelemetry/environment_variables.py` Content: ``` 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 OTEL_PROPAGATORS = "OTEL_PROPAGATORS" 16 """ 17 .. envvar:: OTEL_PROPAGATORS 18 """ 19 20 OTEL_PYTHON_CONTEXT = "OTEL_PYTHON_CONTEXT" 21 """ 22 .. envvar:: OTEL_PYTHON_CONTEXT 23 """ 24 25 OTEL_PYTHON_ID_GENERATOR = "OTEL_PYTHON_ID_GENERATOR" 26 """ 27 .. envvar:: OTEL_PYTHON_ID_GENERATOR 28 """ 29 30 OTEL_TRACES_EXPORTER = "OTEL_TRACES_EXPORTER" 31 """ 32 .. envvar:: OTEL_TRACES_EXPORTER 33 """ 34 35 OTEL_PYTHON_TRACER_PROVIDER = "OTEL_PYTHON_TRACER_PROVIDER" 36 """ 37 .. envvar:: OTEL_PYTHON_TRACER_PROVIDER 38 """ 39 40 _OTEL_PYTHON_METER_PROVIDER = "OTEL_PYTHON_METER_PROVIDER" 41 """ 42 .. envvar:: OTEL_PYTHON_METER_PROVIDER 43 """ 44 45 _OTEL_METRICS_EXPORTER = "OTEL_METRICS_EXPORTER" 46 """ 47 .. envvar:: OTEL_METRICS_EXPORTER 48 49 """ 50 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/opentelemetry-api/src/opentelemetry/environment_variables.py b/opentelemetry-api/src/opentelemetry/environment_variables.py --- a/opentelemetry-api/src/opentelemetry/environment_variables.py +++ b/opentelemetry-api/src/opentelemetry/environment_variables.py @@ -12,6 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. +OTEL_METRICS_EXPORTER = "OTEL_METRICS_EXPORTER" +""" +.. envvar:: OTEL_METRICS_EXPORTER + +""" + OTEL_PROPAGATORS = "OTEL_PROPAGATORS" """ .. envvar:: OTEL_PROPAGATORS @@ -41,9 +47,3 @@ """ .. envvar:: OTEL_PYTHON_METER_PROVIDER """ - -_OTEL_METRICS_EXPORTER = "OTEL_METRICS_EXPORTER" -""" -.. envvar:: OTEL_METRICS_EXPORTER - -"""
{"golden_diff": "diff --git a/opentelemetry-api/src/opentelemetry/environment_variables.py b/opentelemetry-api/src/opentelemetry/environment_variables.py\n--- a/opentelemetry-api/src/opentelemetry/environment_variables.py\n+++ b/opentelemetry-api/src/opentelemetry/environment_variables.py\n@@ -12,6 +12,12 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+OTEL_METRICS_EXPORTER = \"OTEL_METRICS_EXPORTER\"\n+\"\"\"\n+.. envvar:: OTEL_METRICS_EXPORTER\n+\n+\"\"\"\n+\n OTEL_PROPAGATORS = \"OTEL_PROPAGATORS\"\n \"\"\"\n .. envvar:: OTEL_PROPAGATORS\n@@ -41,9 +47,3 @@\n \"\"\"\n .. envvar:: OTEL_PYTHON_METER_PROVIDER\n \"\"\"\n-\n-_OTEL_METRICS_EXPORTER = \"OTEL_METRICS_EXPORTER\"\n-\"\"\"\n-.. envvar:: OTEL_METRICS_EXPORTER\n-\n-\"\"\"\n", "issue": "_OTEL_METRICS_EXPORTER env var should be OTEL_METRICS_EXPORTER\nThe environment variable `_OTEL_METRICS_EXPORTER` is prefixed with an underscore, but there's no need for it as that environment variable is marked as stable in the specification https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/sdk-environment-variables.md#exporter-selection\r\n\r\n\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nOTEL_PROPAGATORS = \"OTEL_PROPAGATORS\"\n\"\"\"\n.. envvar:: OTEL_PROPAGATORS\n\"\"\"\n\nOTEL_PYTHON_CONTEXT = \"OTEL_PYTHON_CONTEXT\"\n\"\"\"\n.. envvar:: OTEL_PYTHON_CONTEXT\n\"\"\"\n\nOTEL_PYTHON_ID_GENERATOR = \"OTEL_PYTHON_ID_GENERATOR\"\n\"\"\"\n.. envvar:: OTEL_PYTHON_ID_GENERATOR\n\"\"\"\n\nOTEL_TRACES_EXPORTER = \"OTEL_TRACES_EXPORTER\"\n\"\"\"\n.. envvar:: OTEL_TRACES_EXPORTER\n\"\"\"\n\nOTEL_PYTHON_TRACER_PROVIDER = \"OTEL_PYTHON_TRACER_PROVIDER\"\n\"\"\"\n.. envvar:: OTEL_PYTHON_TRACER_PROVIDER\n\"\"\"\n\n_OTEL_PYTHON_METER_PROVIDER = \"OTEL_PYTHON_METER_PROVIDER\"\n\"\"\"\n.. envvar:: OTEL_PYTHON_METER_PROVIDER\n\"\"\"\n\n_OTEL_METRICS_EXPORTER = \"OTEL_METRICS_EXPORTER\"\n\"\"\"\n.. envvar:: OTEL_METRICS_EXPORTER\n\n\"\"\"\n", "path": "opentelemetry-api/src/opentelemetry/environment_variables.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nOTEL_METRICS_EXPORTER = \"OTEL_METRICS_EXPORTER\"\n\"\"\"\n.. envvar:: OTEL_METRICS_EXPORTER\n\n\"\"\"\n\nOTEL_PROPAGATORS = \"OTEL_PROPAGATORS\"\n\"\"\"\n.. envvar:: OTEL_PROPAGATORS\n\"\"\"\n\nOTEL_PYTHON_CONTEXT = \"OTEL_PYTHON_CONTEXT\"\n\"\"\"\n.. envvar:: OTEL_PYTHON_CONTEXT\n\"\"\"\n\nOTEL_PYTHON_ID_GENERATOR = \"OTEL_PYTHON_ID_GENERATOR\"\n\"\"\"\n.. envvar:: OTEL_PYTHON_ID_GENERATOR\n\"\"\"\n\nOTEL_TRACES_EXPORTER = \"OTEL_TRACES_EXPORTER\"\n\"\"\"\n.. envvar:: OTEL_TRACES_EXPORTER\n\"\"\"\n\nOTEL_PYTHON_TRACER_PROVIDER = \"OTEL_PYTHON_TRACER_PROVIDER\"\n\"\"\"\n.. envvar:: OTEL_PYTHON_TRACER_PROVIDER\n\"\"\"\n\n_OTEL_PYTHON_METER_PROVIDER = \"OTEL_PYTHON_METER_PROVIDER\"\n\"\"\"\n.. envvar:: OTEL_PYTHON_METER_PROVIDER\n\"\"\"\n", "path": "opentelemetry-api/src/opentelemetry/environment_variables.py"}]}
777
206
gh_patches_debug_42145
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-1140
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Walmart Spider Error Something with the Walmart spider appears to be failing. When importing the geojson file from alltheplaces.xyz to qgis or geojson.io, there are a large number of locations missing in the western US. ![image](https://user-images.githubusercontent.com/13926434/47406283-1fa81c80-d71b-11e8-8126-5264e71e1525.png) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `locations/spiders/walmart.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 import scrapy 3 import json 4 5 from locations.items import GeojsonPointItem 6 7 8 class WalmartSpider(scrapy.Spider): 9 name = "walmart" 10 allowed_domains = ["walmart.com"] 11 start_urls = ( 12 'https://www.walmart.com/sitemap_store_main.xml', 13 ) 14 15 def store_hours(self, store_hours): 16 if store_hours == 'Mo-Su': 17 return u'24/7' 18 elif store_hours is None: 19 return None 20 else: 21 return store_hours 22 23 def parse(self, response): 24 response.selector.remove_namespaces() 25 for u in response.xpath('//loc/text()').extract(): 26 if u.endswith('/details'): 27 yield scrapy.Request(u.strip(), callback=self.parse_store) 28 29 def parse_store(self, response): 30 addr = response.xpath('//div[@itemprop="address"]')[0] 31 yield GeojsonPointItem( 32 lat=response.xpath('//meta[@itemprop="latitude"]/@content').extract_first(), 33 lon=response.xpath('//meta[@itemprop="longitude"]/@content').extract_first(), 34 ref=response.url.split('/')[4], 35 phone=response.xpath('//meta[@itemprop="telephone"]/@content').extract_first(), 36 name=response.xpath('//meta[@itemprop="name"]/@content').extract_first(), 37 opening_hours=self.store_hours(response.xpath('//meta[@itemprop="openingHours"]/@content').extract_first()), 38 addr_full=addr.xpath('//span[@itemprop="streetAddress"]/text()').extract_first(), 39 city=addr.xpath('//span[@itemprop="locality"]/text()').extract_first(), 40 state=addr.xpath('//span[@itemprop="addressRegion"]/text()').extract_first(), 41 postcode=addr.xpath('//span[@itemprop="postalCode"]/text()').extract_first(), 42 ) 43 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/locations/spiders/walmart.py b/locations/spiders/walmart.py --- a/locations/spiders/walmart.py +++ b/locations/spiders/walmart.py @@ -1,7 +1,9 @@ # -*- coding: utf-8 -*- import scrapy import json +import re +from collections import defaultdict from locations.items import GeojsonPointItem @@ -11,14 +13,39 @@ start_urls = ( 'https://www.walmart.com/sitemap_store_main.xml', ) + retries = defaultdict(int) def store_hours(self, store_hours): - if store_hours == 'Mo-Su': + if store_hours.get('operationalHours').get('open24Hours') is True: return u'24/7' - elif store_hours is None: + elif not store_hours.get('operationalHoursCombined'): return None else: - return store_hours + op_hours = store_hours.get('operationalHoursCombined') + open_hours = [] + for op_hour in op_hours: + if op_hour.get('dailyHours').get('closed') is True: + continue + + if op_hour.get('dailyHours').get('openFullDay') is True: + start_hr = '00:00' + end_hr = '24:00' + else: + start_hr = op_hour.get('dailyHours').get('startHr') + end_hr = op_hour.get('dailyHours').get('endHr') + + start_day = op_hour.get('startDayName') + end_day = op_hour.get('endDayName') + + if end_day is None: + end_day = '' + + hours = start_day+'-'+end_day+' '+start_hr+'-'+end_hr + open_hours.append(hours) + + hours_combined = '; '.join(open_hours) + + return hours_combined def parse(self, response): response.selector.remove_namespaces() @@ -27,16 +54,30 @@ yield scrapy.Request(u.strip(), callback=self.parse_store) def parse_store(self, response): - addr = response.xpath('//div[@itemprop="address"]')[0] + script = response.xpath("//script[contains(.,'WML_REDUX_INITIAL_STATE')]").extract_first() + # In rare cases will hit page before script tag loads with content + if script is None: + if self.retries.get(response.url, 0) <= 2: + self.retries[response.url] += 1 + yield scrapy.Request(response.url, callback=self.parse_store) # Try again + else: + raise Exception('Retried too many times') + + script_content = re.search(r'window.__WML_REDUX_INITIAL_STATE__ = (.*);</script>', script, + flags=re.IGNORECASE | re.DOTALL).group(1) + + store_data = json.loads(script_content).get('store') + yield GeojsonPointItem( - lat=response.xpath('//meta[@itemprop="latitude"]/@content').extract_first(), - lon=response.xpath('//meta[@itemprop="longitude"]/@content').extract_first(), - ref=response.url.split('/')[4], - phone=response.xpath('//meta[@itemprop="telephone"]/@content').extract_first(), - name=response.xpath('//meta[@itemprop="name"]/@content').extract_first(), - opening_hours=self.store_hours(response.xpath('//meta[@itemprop="openingHours"]/@content').extract_first()), - addr_full=addr.xpath('//span[@itemprop="streetAddress"]/text()').extract_first(), - city=addr.xpath('//span[@itemprop="locality"]/text()').extract_first(), - state=addr.xpath('//span[@itemprop="addressRegion"]/text()').extract_first(), - postcode=addr.xpath('//span[@itemprop="postalCode"]/text()').extract_first(), + lat=store_data.get('geoPoint').get('latitude'), + lon=store_data.get('geoPoint').get('longitude'), + ref=store_data.get('id'), + phone=store_data.get('phone'), + name=store_data.get('displayName'), + opening_hours=self.store_hours(store_data), + addr_full=store_data.get('address').get('streetAddress'), + city=store_data.get('address').get('city'), + state=store_data.get('address').get('state'), + postcode=store_data.get('address').get('postalCode'), + website=store_data.get('detailsPageURL'), )
{"golden_diff": "diff --git a/locations/spiders/walmart.py b/locations/spiders/walmart.py\n--- a/locations/spiders/walmart.py\n+++ b/locations/spiders/walmart.py\n@@ -1,7 +1,9 @@\n # -*- coding: utf-8 -*-\n import scrapy\n import json\n+import re\n \n+from collections import defaultdict\n from locations.items import GeojsonPointItem\n \n \n@@ -11,14 +13,39 @@\n start_urls = (\n 'https://www.walmart.com/sitemap_store_main.xml',\n )\n+ retries = defaultdict(int)\n \n def store_hours(self, store_hours):\n- if store_hours == 'Mo-Su':\n+ if store_hours.get('operationalHours').get('open24Hours') is True:\n return u'24/7'\n- elif store_hours is None:\n+ elif not store_hours.get('operationalHoursCombined'):\n return None\n else:\n- return store_hours\n+ op_hours = store_hours.get('operationalHoursCombined')\n+ open_hours = []\n+ for op_hour in op_hours:\n+ if op_hour.get('dailyHours').get('closed') is True:\n+ continue\n+\n+ if op_hour.get('dailyHours').get('openFullDay') is True:\n+ start_hr = '00:00'\n+ end_hr = '24:00'\n+ else:\n+ start_hr = op_hour.get('dailyHours').get('startHr')\n+ end_hr = op_hour.get('dailyHours').get('endHr')\n+\n+ start_day = op_hour.get('startDayName')\n+ end_day = op_hour.get('endDayName')\n+\n+ if end_day is None:\n+ end_day = ''\n+\n+ hours = start_day+'-'+end_day+' '+start_hr+'-'+end_hr\n+ open_hours.append(hours)\n+\n+ hours_combined = '; '.join(open_hours)\n+\n+ return hours_combined\n \n def parse(self, response):\n response.selector.remove_namespaces()\n@@ -27,16 +54,30 @@\n yield scrapy.Request(u.strip(), callback=self.parse_store)\n \n def parse_store(self, response):\n- addr = response.xpath('//div[@itemprop=\"address\"]')[0]\n+ script = response.xpath(\"//script[contains(.,'WML_REDUX_INITIAL_STATE')]\").extract_first()\n+ # In rare cases will hit page before script tag loads with content\n+ if script is None:\n+ if self.retries.get(response.url, 0) <= 2:\n+ self.retries[response.url] += 1\n+ yield scrapy.Request(response.url, callback=self.parse_store) # Try again\n+ else:\n+ raise Exception('Retried too many times')\n+\n+ script_content = re.search(r'window.__WML_REDUX_INITIAL_STATE__ = (.*);</script>', script,\n+ flags=re.IGNORECASE | re.DOTALL).group(1)\n+\n+ store_data = json.loads(script_content).get('store')\n+\n yield GeojsonPointItem(\n- lat=response.xpath('//meta[@itemprop=\"latitude\"]/@content').extract_first(),\n- lon=response.xpath('//meta[@itemprop=\"longitude\"]/@content').extract_first(),\n- ref=response.url.split('/')[4],\n- phone=response.xpath('//meta[@itemprop=\"telephone\"]/@content').extract_first(),\n- name=response.xpath('//meta[@itemprop=\"name\"]/@content').extract_first(),\n- opening_hours=self.store_hours(response.xpath('//meta[@itemprop=\"openingHours\"]/@content').extract_first()),\n- addr_full=addr.xpath('//span[@itemprop=\"streetAddress\"]/text()').extract_first(),\n- city=addr.xpath('//span[@itemprop=\"locality\"]/text()').extract_first(),\n- state=addr.xpath('//span[@itemprop=\"addressRegion\"]/text()').extract_first(),\n- postcode=addr.xpath('//span[@itemprop=\"postalCode\"]/text()').extract_first(),\n+ lat=store_data.get('geoPoint').get('latitude'),\n+ lon=store_data.get('geoPoint').get('longitude'),\n+ ref=store_data.get('id'),\n+ phone=store_data.get('phone'),\n+ name=store_data.get('displayName'),\n+ opening_hours=self.store_hours(store_data),\n+ addr_full=store_data.get('address').get('streetAddress'),\n+ city=store_data.get('address').get('city'),\n+ state=store_data.get('address').get('state'),\n+ postcode=store_data.get('address').get('postalCode'),\n+ website=store_data.get('detailsPageURL'),\n )\n", "issue": "Walmart Spider Error\nSomething with the Walmart spider appears to be failing. When importing the geojson file from alltheplaces.xyz to qgis or geojson.io, there are a large number of locations missing in the western US.\r\n\r\n![image](https://user-images.githubusercontent.com/13926434/47406283-1fa81c80-d71b-11e8-8126-5264e71e1525.png)\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\n\nfrom locations.items import GeojsonPointItem\n\n\nclass WalmartSpider(scrapy.Spider):\n name = \"walmart\"\n allowed_domains = [\"walmart.com\"]\n start_urls = (\n 'https://www.walmart.com/sitemap_store_main.xml',\n )\n\n def store_hours(self, store_hours):\n if store_hours == 'Mo-Su':\n return u'24/7'\n elif store_hours is None:\n return None\n else:\n return store_hours\n\n def parse(self, response):\n response.selector.remove_namespaces()\n for u in response.xpath('//loc/text()').extract():\n if u.endswith('/details'):\n yield scrapy.Request(u.strip(), callback=self.parse_store)\n\n def parse_store(self, response):\n addr = response.xpath('//div[@itemprop=\"address\"]')[0]\n yield GeojsonPointItem(\n lat=response.xpath('//meta[@itemprop=\"latitude\"]/@content').extract_first(),\n lon=response.xpath('//meta[@itemprop=\"longitude\"]/@content').extract_first(),\n ref=response.url.split('/')[4],\n phone=response.xpath('//meta[@itemprop=\"telephone\"]/@content').extract_first(),\n name=response.xpath('//meta[@itemprop=\"name\"]/@content').extract_first(),\n opening_hours=self.store_hours(response.xpath('//meta[@itemprop=\"openingHours\"]/@content').extract_first()),\n addr_full=addr.xpath('//span[@itemprop=\"streetAddress\"]/text()').extract_first(),\n city=addr.xpath('//span[@itemprop=\"locality\"]/text()').extract_first(),\n state=addr.xpath('//span[@itemprop=\"addressRegion\"]/text()').extract_first(),\n postcode=addr.xpath('//span[@itemprop=\"postalCode\"]/text()').extract_first(),\n )\n", "path": "locations/spiders/walmart.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nimport re\n\nfrom collections import defaultdict\nfrom locations.items import GeojsonPointItem\n\n\nclass WalmartSpider(scrapy.Spider):\n name = \"walmart\"\n allowed_domains = [\"walmart.com\"]\n start_urls = (\n 'https://www.walmart.com/sitemap_store_main.xml',\n )\n retries = defaultdict(int)\n\n def store_hours(self, store_hours):\n if store_hours.get('operationalHours').get('open24Hours') is True:\n return u'24/7'\n elif not store_hours.get('operationalHoursCombined'):\n return None\n else:\n op_hours = store_hours.get('operationalHoursCombined')\n open_hours = []\n for op_hour in op_hours:\n if op_hour.get('dailyHours').get('closed') is True:\n continue\n\n if op_hour.get('dailyHours').get('openFullDay') is True:\n start_hr = '00:00'\n end_hr = '24:00'\n else:\n start_hr = op_hour.get('dailyHours').get('startHr')\n end_hr = op_hour.get('dailyHours').get('endHr')\n\n start_day = op_hour.get('startDayName')\n end_day = op_hour.get('endDayName')\n\n if end_day is None:\n end_day = ''\n\n hours = start_day+'-'+end_day+' '+start_hr+'-'+end_hr\n open_hours.append(hours)\n\n hours_combined = '; '.join(open_hours)\n\n return hours_combined\n\n def parse(self, response):\n response.selector.remove_namespaces()\n for u in response.xpath('//loc/text()').extract():\n if u.endswith('/details'):\n yield scrapy.Request(u.strip(), callback=self.parse_store)\n\n def parse_store(self, response):\n script = response.xpath(\"//script[contains(.,'WML_REDUX_INITIAL_STATE')]\").extract_first()\n # In rare cases will hit page before script tag loads with content\n if script is None:\n if self.retries.get(response.url, 0) <= 2:\n self.retries[response.url] += 1\n yield scrapy.Request(response.url, callback=self.parse_store) # Try again\n else:\n raise Exception('Retried too many times')\n\n script_content = re.search(r'window.__WML_REDUX_INITIAL_STATE__ = (.*);</script>', script,\n flags=re.IGNORECASE | re.DOTALL).group(1)\n\n store_data = json.loads(script_content).get('store')\n\n yield GeojsonPointItem(\n lat=store_data.get('geoPoint').get('latitude'),\n lon=store_data.get('geoPoint').get('longitude'),\n ref=store_data.get('id'),\n phone=store_data.get('phone'),\n name=store_data.get('displayName'),\n opening_hours=self.store_hours(store_data),\n addr_full=store_data.get('address').get('streetAddress'),\n city=store_data.get('address').get('city'),\n state=store_data.get('address').get('state'),\n postcode=store_data.get('address').get('postalCode'),\n website=store_data.get('detailsPageURL'),\n )\n", "path": "locations/spiders/walmart.py"}]}
834
1,012
gh_patches_debug_40967
rasdani/github-patches
git_diff
lk-geimfari__mimesis-873
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Remove download_image function. # Bug report This is not a kind of problem mimesis solves. So it's pretty annoying when tests fail because of it all the time. It's better just remove it. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mimesis/shortcuts.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 """This module is provide internal util functions.""" 4 5 import ssl 6 from os import path 7 from typing import Union 8 from urllib import request 9 from uuid import uuid4 10 11 __all__ = ['download_image', 'luhn_checksum'] 12 13 14 def luhn_checksum(num: str) -> str: 15 """Calculate a checksum for num using the Luhn algorithm. 16 17 :param num: The number to calculate a checksum for as a string. 18 :return: Checksum for number. 19 """ 20 check = 0 21 for i, s in enumerate(reversed(num)): 22 sx = int(s) 23 sx = sx * 2 if i % 2 == 0 else sx 24 sx = sx - 9 if sx > 9 else sx 25 check += sx 26 return str(check * 9 % 10) 27 28 29 def download_image(url: str = '', save_path: str = '', 30 unverified_ctx: bool = False) -> Union[None, str]: 31 """Download image and save in current directory on local machine. 32 33 :param url: URL to image. 34 :param save_path: Saving path. 35 :param unverified_ctx: Create unverified context. 36 :return: Path to downloaded image. 37 :rtype: str or None 38 """ 39 if unverified_ctx: 40 ssl._create_default_https_context = ssl._create_unverified_context 41 42 if url: 43 image_name = url.rsplit('/')[-1] 44 45 splitted_name = image_name.rsplit('.') 46 if len(splitted_name) < 2: 47 image_name = '{}.jpg'.format(uuid4()) 48 else: 49 image_name = '{}.{}'.format(uuid4(), splitted_name[-1]) 50 full_image_path = path.join(save_path, image_name) 51 request.urlretrieve(url, full_image_path) 52 return full_image_path 53 return None 54 ``` Path: `mimesis/providers/internet.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 """Provides data related to internet.""" 4 5 import urllib.error 6 import urllib.request 7 from ipaddress import IPv4Address, IPv6Address 8 from typing import List, Optional, Union 9 10 from mimesis.data import ( 11 EMOJI, 12 HASHTAGS, 13 HTTP_METHODS, 14 HTTP_STATUS_CODES, 15 HTTP_STATUS_MSGS, 16 NETWORK_PROTOCOLS, 17 TLD, 18 USER_AGENTS, 19 USERNAMES, 20 ) 21 from mimesis.enums import Layer, MimeType, PortRange, TLDType 22 from mimesis.exceptions import NonEnumerableError 23 from mimesis.providers.base import BaseProvider 24 from mimesis.providers.file import File 25 26 __all__ = ['Internet'] 27 28 29 class Internet(BaseProvider): 30 """Class for generating data related to the internet.""" 31 32 def __init__(self, *args, **kwargs): 33 """Initialize attributes. 34 35 :param args: Arguments. 36 :param kwargs: Keyword arguments. 37 """ 38 super().__init__(*args, **kwargs) 39 self.__file = File(seed=self.seed) 40 self._MAX_IPV4 = (2 ** 32) - 1 41 self._MAX_IPV6 = (2 ** 128) - 1 42 43 class Meta: 44 """Class for metadata.""" 45 46 name = 'internet' 47 48 def content_type(self, mime_type: Optional[MimeType] = None) -> str: 49 """Get a random HTTP content type. 50 51 :return: Content type. 52 53 :Example: 54 Content-Type: application/json 55 """ 56 fmt = self.__file.mime_type(type_=mime_type) 57 return 'Content-Type: {}'.format(fmt) 58 59 def http_status_message(self) -> str: 60 """Get a random HTTP status message. 61 62 :return: HTTP status message. 63 64 :Example: 65 200 OK 66 """ 67 return self.random.choice(HTTP_STATUS_MSGS) 68 69 def http_status_code(self) -> int: 70 """Get a random HTTP status code. 71 72 :return: HTTP status. 73 74 :Example: 75 200 76 """ 77 return self.random.choice(HTTP_STATUS_CODES) 78 79 def http_method(self) -> str: 80 """Get a random HTTP method. 81 82 :return: HTTP method. 83 84 :Example: 85 POST 86 """ 87 return self.random.choice(HTTP_METHODS) 88 89 def ip_v4_object(self) -> IPv4Address: 90 """Generate random IPv4Address object. 91 92 See documentation for module ipaddress: 93 https://docs.python.org/3.7/library/ipaddress.html 94 95 :return: IPv4Address object. 96 """ 97 return IPv4Address( 98 self.random.randint(0, self._MAX_IPV4), 99 ) 100 101 def ip_v4(self, with_port: bool = False, 102 port_range: PortRange = PortRange.ALL) -> str: 103 """Generate a random IPv4 address as string. 104 105 :param port_range: PortRange enum object. 106 :param with_port: Add port from PortRange to IP. 107 :return: IPv4 address as string. 108 109 :Example: 110 19.121.223.58 or 19.121.223.58:8000 111 """ 112 ip = str(self.ip_v4_object()) 113 114 if with_port: 115 port = self.port(port_range=port_range) 116 return '{}:{}'.format(ip, port) 117 118 return ip 119 120 def ip_v6_object(self) -> IPv6Address: 121 """Generate random IPv6Address object. 122 123 See documentation for module ipaddress: 124 https://docs.python.org/3.7/library/ipaddress.html 125 126 :return: IPv6Address object. 127 """ 128 return IPv6Address( 129 self.random.randint( 130 0, self._MAX_IPV6, 131 ), 132 ) 133 134 def ip_v6(self) -> str: 135 """Generate a random IPv6 address as string. 136 137 :return: IPv6 address string. 138 139 :Example: 140 2001:c244:cf9d:1fb1:c56d:f52c:8a04:94f3 141 """ 142 return str(self.ip_v6_object()) 143 144 def mac_address(self) -> str: 145 """Generate a random MAC address. 146 147 :return: Random MAC address. 148 149 :Example: 150 00:16:3e:25:e7:b1 151 """ 152 mac_hex = [ 153 0x00, 0x16, 0x3e, 154 self.random.randint(0x00, 0x7f), 155 self.random.randint(0x00, 0xff), 156 self.random.randint(0x00, 0xff), 157 ] 158 mac = map(lambda x: '{:02x}'.format(x), mac_hex) 159 return ':'.join(mac) 160 161 def emoji(self) -> str: 162 """Get a random emoji shortcut code. 163 164 :return: Emoji code. 165 166 :Example: 167 :kissing: 168 """ 169 return self.random.choice(EMOJI) 170 171 @staticmethod 172 def image_placeholder(width: Union[int, str] = 1920, 173 height: Union[int, str] = 1080) -> str: 174 """Generate a link to the image placeholder. 175 176 :param width: Width of image. 177 :param height: Height of image. 178 :return: URL to image placeholder. 179 """ 180 url = 'http://placehold.it/{width}x{height}' 181 return url.format(width=width, height=height) 182 183 @staticmethod 184 def stock_image(width: Union[int, str] = 1920, 185 height: Union[int, str] = 1080, 186 keywords: Optional[List[str]] = None, 187 writable: bool = False) -> Union[str, bytes]: 188 """Generate random stock image (JPEG) hosted on Unsplash. 189 190 See «Random search term» on https://source.unsplash.com/ 191 for more details. 192 193 .. note:: This method required an active HTTP connection. 194 195 :param width: Width of the image. 196 :param height: Height of the image. 197 :param keywords: List of search keywords. 198 :param writable: Return image as sequence ob bytes. 199 :return: Link to the image. 200 """ 201 api = 'https://source.unsplash.com/{}x{}?{}' 202 203 if keywords is not None: 204 keywords_str = ','.join(keywords) 205 else: 206 keywords_str = '' 207 208 url = api.format(width, height, keywords_str) 209 210 try: 211 response = urllib.request.urlopen(url) 212 if writable: 213 return response.read() 214 url = response.geturl() 215 return url 216 except urllib.error.URLError: 217 raise urllib.error.URLError( 218 'Required an active HTTP connection') 219 220 def hashtags(self, quantity: int = 4) -> Union[str, list]: 221 """Generate a list of hashtags. 222 223 :param quantity: The quantity of hashtags. 224 :return: The list of hashtags. 225 :raises NonEnumerableError: if category is not in Hashtag. 226 227 :Example: 228 ['#love', '#sky', '#nice'] 229 """ 230 tags = ['#' + self.random.choice(HASHTAGS) 231 for _ in range(quantity)] 232 233 if int(quantity) == 1: 234 return tags[0] 235 236 return tags 237 238 def home_page(self, tld_type: Optional[TLDType] = None) -> str: 239 """Generate a random home page. 240 241 :param tld_type: TLD type. 242 :return: Random home page. 243 244 :Example: 245 https://fontir.info 246 """ 247 resource = self.random.choice(USERNAMES) 248 domain = self.top_level_domain( 249 tld_type=tld_type, 250 ) 251 252 return 'https://{}{}'.format( 253 resource, domain) 254 255 def top_level_domain(self, tld_type: Optional[TLDType] = None) -> str: 256 """Return random top level domain. 257 258 :param tld_type: Enum object DomainType 259 :return: Top level domain. 260 :raises NonEnumerableError: if tld_type not in DomainType. 261 """ 262 key = self._validate_enum(item=tld_type, enum=TLDType) 263 return self.random.choice(TLD[key]) 264 265 def user_agent(self) -> str: 266 """Get a random user agent. 267 268 :return: User agent. 269 270 :Example: 271 Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:15.0) 272 Gecko/20100101 Firefox/15.0.1 273 """ 274 return self.random.choice(USER_AGENTS) 275 276 def network_protocol(self, layer: Optional[Layer] = None) -> str: 277 """Get a random network protocol form OSI model. 278 279 :param layer: Enum object Layer. 280 :return: Protocol name. 281 282 :Example: 283 AMQP 284 """ 285 key = self._validate_enum(item=layer, enum=Layer) 286 protocols = NETWORK_PROTOCOLS[key] 287 return self.random.choice(protocols) 288 289 def port(self, port_range: PortRange = PortRange.ALL) -> int: 290 """Generate random port. 291 292 :param port_range: PortRange enum object. 293 :return: Port number. 294 :raises NonEnumerableError: if port_range is not in PortRange. 295 296 :Example: 297 8080 298 """ 299 if isinstance(port_range, PortRange): 300 return self.random.randint(*port_range.value) 301 else: 302 raise NonEnumerableError(PortRange) 303 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mimesis/providers/internet.py b/mimesis/providers/internet.py --- a/mimesis/providers/internet.py +++ b/mimesis/providers/internet.py @@ -185,12 +185,13 @@ height: Union[int, str] = 1080, keywords: Optional[List[str]] = None, writable: bool = False) -> Union[str, bytes]: - """Generate random stock image (JPEG) hosted on Unsplash. + """Generate random stock image (JPG/JPEG) hosted on Unsplash. See «Random search term» on https://source.unsplash.com/ for more details. - .. note:: This method required an active HTTP connection. + .. note:: This method required an active HTTP connection + if you want to get writable object :param width: Width of the image. :param height: Height of the image. @@ -198,24 +199,23 @@ :param writable: Return image as sequence ob bytes. :return: Link to the image. """ - api = 'https://source.unsplash.com/{}x{}?{}' + api_url = 'https://source.unsplash.com/{}x{}?{}' if keywords is not None: keywords_str = ','.join(keywords) else: keywords_str = '' - url = api.format(width, height, keywords_str) + url = api_url.format(width, height, keywords_str) - try: - response = urllib.request.urlopen(url) - if writable: + if writable: + try: + response = urllib.request.urlopen(url) return response.read() - url = response.geturl() - return url - except urllib.error.URLError: - raise urllib.error.URLError( - 'Required an active HTTP connection') + except urllib.error.URLError: + raise urllib.error.URLError( + 'Required an active HTTP connection') + return url def hashtags(self, quantity: int = 4) -> Union[str, list]: """Generate a list of hashtags. diff --git a/mimesis/shortcuts.py b/mimesis/shortcuts.py --- a/mimesis/shortcuts.py +++ b/mimesis/shortcuts.py @@ -8,7 +8,7 @@ from urllib import request from uuid import uuid4 -__all__ = ['download_image', 'luhn_checksum'] +__all__ = ['luhn_checksum'] def luhn_checksum(num: str) -> str: @@ -24,30 +24,3 @@ sx = sx - 9 if sx > 9 else sx check += sx return str(check * 9 % 10) - - -def download_image(url: str = '', save_path: str = '', - unverified_ctx: bool = False) -> Union[None, str]: - """Download image and save in current directory on local machine. - - :param url: URL to image. - :param save_path: Saving path. - :param unverified_ctx: Create unverified context. - :return: Path to downloaded image. - :rtype: str or None - """ - if unverified_ctx: - ssl._create_default_https_context = ssl._create_unverified_context - - if url: - image_name = url.rsplit('/')[-1] - - splitted_name = image_name.rsplit('.') - if len(splitted_name) < 2: - image_name = '{}.jpg'.format(uuid4()) - else: - image_name = '{}.{}'.format(uuid4(), splitted_name[-1]) - full_image_path = path.join(save_path, image_name) - request.urlretrieve(url, full_image_path) - return full_image_path - return None
{"golden_diff": "diff --git a/mimesis/providers/internet.py b/mimesis/providers/internet.py\n--- a/mimesis/providers/internet.py\n+++ b/mimesis/providers/internet.py\n@@ -185,12 +185,13 @@\n height: Union[int, str] = 1080,\n keywords: Optional[List[str]] = None,\n writable: bool = False) -> Union[str, bytes]:\n- \"\"\"Generate random stock image (JPEG) hosted on Unsplash.\n+ \"\"\"Generate random stock image (JPG/JPEG) hosted on Unsplash.\n \n See \u00abRandom search term\u00bb on https://source.unsplash.com/\n for more details.\n \n- .. note:: This method required an active HTTP connection.\n+ .. note:: This method required an active HTTP connection\n+ if you want to get writable object\n \n :param width: Width of the image.\n :param height: Height of the image.\n@@ -198,24 +199,23 @@\n :param writable: Return image as sequence ob bytes.\n :return: Link to the image.\n \"\"\"\n- api = 'https://source.unsplash.com/{}x{}?{}'\n+ api_url = 'https://source.unsplash.com/{}x{}?{}'\n \n if keywords is not None:\n keywords_str = ','.join(keywords)\n else:\n keywords_str = ''\n \n- url = api.format(width, height, keywords_str)\n+ url = api_url.format(width, height, keywords_str)\n \n- try:\n- response = urllib.request.urlopen(url)\n- if writable:\n+ if writable:\n+ try:\n+ response = urllib.request.urlopen(url)\n return response.read()\n- url = response.geturl()\n- return url\n- except urllib.error.URLError:\n- raise urllib.error.URLError(\n- 'Required an active HTTP connection')\n+ except urllib.error.URLError:\n+ raise urllib.error.URLError(\n+ 'Required an active HTTP connection')\n+ return url\n \n def hashtags(self, quantity: int = 4) -> Union[str, list]:\n \"\"\"Generate a list of hashtags.\ndiff --git a/mimesis/shortcuts.py b/mimesis/shortcuts.py\n--- a/mimesis/shortcuts.py\n+++ b/mimesis/shortcuts.py\n@@ -8,7 +8,7 @@\n from urllib import request\n from uuid import uuid4\n \n-__all__ = ['download_image', 'luhn_checksum']\n+__all__ = ['luhn_checksum']\n \n \n def luhn_checksum(num: str) -> str:\n@@ -24,30 +24,3 @@\n sx = sx - 9 if sx > 9 else sx\n check += sx\n return str(check * 9 % 10)\n-\n-\n-def download_image(url: str = '', save_path: str = '',\n- unverified_ctx: bool = False) -> Union[None, str]:\n- \"\"\"Download image and save in current directory on local machine.\n-\n- :param url: URL to image.\n- :param save_path: Saving path.\n- :param unverified_ctx: Create unverified context.\n- :return: Path to downloaded image.\n- :rtype: str or None\n- \"\"\"\n- if unverified_ctx:\n- ssl._create_default_https_context = ssl._create_unverified_context\n-\n- if url:\n- image_name = url.rsplit('/')[-1]\n-\n- splitted_name = image_name.rsplit('.')\n- if len(splitted_name) < 2:\n- image_name = '{}.jpg'.format(uuid4())\n- else:\n- image_name = '{}.{}'.format(uuid4(), splitted_name[-1])\n- full_image_path = path.join(save_path, image_name)\n- request.urlretrieve(url, full_image_path)\n- return full_image_path\n- return None\n", "issue": "Remove download_image function.\n# Bug report\r\n\r\nThis is not a kind of problem mimesis solves. So it's pretty annoying when tests fail because of it all the time. It's better just remove it.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module is provide internal util functions.\"\"\"\n\nimport ssl\nfrom os import path\nfrom typing import Union\nfrom urllib import request\nfrom uuid import uuid4\n\n__all__ = ['download_image', 'luhn_checksum']\n\n\ndef luhn_checksum(num: str) -> str:\n \"\"\"Calculate a checksum for num using the Luhn algorithm.\n\n :param num: The number to calculate a checksum for as a string.\n :return: Checksum for number.\n \"\"\"\n check = 0\n for i, s in enumerate(reversed(num)):\n sx = int(s)\n sx = sx * 2 if i % 2 == 0 else sx\n sx = sx - 9 if sx > 9 else sx\n check += sx\n return str(check * 9 % 10)\n\n\ndef download_image(url: str = '', save_path: str = '',\n unverified_ctx: bool = False) -> Union[None, str]:\n \"\"\"Download image and save in current directory on local machine.\n\n :param url: URL to image.\n :param save_path: Saving path.\n :param unverified_ctx: Create unverified context.\n :return: Path to downloaded image.\n :rtype: str or None\n \"\"\"\n if unverified_ctx:\n ssl._create_default_https_context = ssl._create_unverified_context\n\n if url:\n image_name = url.rsplit('/')[-1]\n\n splitted_name = image_name.rsplit('.')\n if len(splitted_name) < 2:\n image_name = '{}.jpg'.format(uuid4())\n else:\n image_name = '{}.{}'.format(uuid4(), splitted_name[-1])\n full_image_path = path.join(save_path, image_name)\n request.urlretrieve(url, full_image_path)\n return full_image_path\n return None\n", "path": "mimesis/shortcuts.py"}, {"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Provides data related to internet.\"\"\"\n\nimport urllib.error\nimport urllib.request\nfrom ipaddress import IPv4Address, IPv6Address\nfrom typing import List, Optional, Union\n\nfrom mimesis.data import (\n EMOJI,\n HASHTAGS,\n HTTP_METHODS,\n HTTP_STATUS_CODES,\n HTTP_STATUS_MSGS,\n NETWORK_PROTOCOLS,\n TLD,\n USER_AGENTS,\n USERNAMES,\n)\nfrom mimesis.enums import Layer, MimeType, PortRange, TLDType\nfrom mimesis.exceptions import NonEnumerableError\nfrom mimesis.providers.base import BaseProvider\nfrom mimesis.providers.file import File\n\n__all__ = ['Internet']\n\n\nclass Internet(BaseProvider):\n \"\"\"Class for generating data related to the internet.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize attributes.\n\n :param args: Arguments.\n :param kwargs: Keyword arguments.\n \"\"\"\n super().__init__(*args, **kwargs)\n self.__file = File(seed=self.seed)\n self._MAX_IPV4 = (2 ** 32) - 1\n self._MAX_IPV6 = (2 ** 128) - 1\n\n class Meta:\n \"\"\"Class for metadata.\"\"\"\n\n name = 'internet'\n\n def content_type(self, mime_type: Optional[MimeType] = None) -> str:\n \"\"\"Get a random HTTP content type.\n\n :return: Content type.\n\n :Example:\n Content-Type: application/json\n \"\"\"\n fmt = self.__file.mime_type(type_=mime_type)\n return 'Content-Type: {}'.format(fmt)\n\n def http_status_message(self) -> str:\n \"\"\"Get a random HTTP status message.\n\n :return: HTTP status message.\n\n :Example:\n 200 OK\n \"\"\"\n return self.random.choice(HTTP_STATUS_MSGS)\n\n def http_status_code(self) -> int:\n \"\"\"Get a random HTTP status code.\n\n :return: HTTP status.\n\n :Example:\n 200\n \"\"\"\n return self.random.choice(HTTP_STATUS_CODES)\n\n def http_method(self) -> str:\n \"\"\"Get a random HTTP method.\n\n :return: HTTP method.\n\n :Example:\n POST\n \"\"\"\n return self.random.choice(HTTP_METHODS)\n\n def ip_v4_object(self) -> IPv4Address:\n \"\"\"Generate random IPv4Address object.\n\n See documentation for module ipaddress:\n https://docs.python.org/3.7/library/ipaddress.html\n\n :return: IPv4Address object.\n \"\"\"\n return IPv4Address(\n self.random.randint(0, self._MAX_IPV4),\n )\n\n def ip_v4(self, with_port: bool = False,\n port_range: PortRange = PortRange.ALL) -> str:\n \"\"\"Generate a random IPv4 address as string.\n\n :param port_range: PortRange enum object.\n :param with_port: Add port from PortRange to IP.\n :return: IPv4 address as string.\n\n :Example:\n 19.121.223.58 or 19.121.223.58:8000\n \"\"\"\n ip = str(self.ip_v4_object())\n\n if with_port:\n port = self.port(port_range=port_range)\n return '{}:{}'.format(ip, port)\n\n return ip\n\n def ip_v6_object(self) -> IPv6Address:\n \"\"\"Generate random IPv6Address object.\n\n See documentation for module ipaddress:\n https://docs.python.org/3.7/library/ipaddress.html\n\n :return: IPv6Address object.\n \"\"\"\n return IPv6Address(\n self.random.randint(\n 0, self._MAX_IPV6,\n ),\n )\n\n def ip_v6(self) -> str:\n \"\"\"Generate a random IPv6 address as string.\n\n :return: IPv6 address string.\n\n :Example:\n 2001:c244:cf9d:1fb1:c56d:f52c:8a04:94f3\n \"\"\"\n return str(self.ip_v6_object())\n\n def mac_address(self) -> str:\n \"\"\"Generate a random MAC address.\n\n :return: Random MAC address.\n\n :Example:\n 00:16:3e:25:e7:b1\n \"\"\"\n mac_hex = [\n 0x00, 0x16, 0x3e,\n self.random.randint(0x00, 0x7f),\n self.random.randint(0x00, 0xff),\n self.random.randint(0x00, 0xff),\n ]\n mac = map(lambda x: '{:02x}'.format(x), mac_hex)\n return ':'.join(mac)\n\n def emoji(self) -> str:\n \"\"\"Get a random emoji shortcut code.\n\n :return: Emoji code.\n\n :Example:\n :kissing:\n \"\"\"\n return self.random.choice(EMOJI)\n\n @staticmethod\n def image_placeholder(width: Union[int, str] = 1920,\n height: Union[int, str] = 1080) -> str:\n \"\"\"Generate a link to the image placeholder.\n\n :param width: Width of image.\n :param height: Height of image.\n :return: URL to image placeholder.\n \"\"\"\n url = 'http://placehold.it/{width}x{height}'\n return url.format(width=width, height=height)\n\n @staticmethod\n def stock_image(width: Union[int, str] = 1920,\n height: Union[int, str] = 1080,\n keywords: Optional[List[str]] = None,\n writable: bool = False) -> Union[str, bytes]:\n \"\"\"Generate random stock image (JPEG) hosted on Unsplash.\n\n See \u00abRandom search term\u00bb on https://source.unsplash.com/\n for more details.\n\n .. note:: This method required an active HTTP connection.\n\n :param width: Width of the image.\n :param height: Height of the image.\n :param keywords: List of search keywords.\n :param writable: Return image as sequence ob bytes.\n :return: Link to the image.\n \"\"\"\n api = 'https://source.unsplash.com/{}x{}?{}'\n\n if keywords is not None:\n keywords_str = ','.join(keywords)\n else:\n keywords_str = ''\n\n url = api.format(width, height, keywords_str)\n\n try:\n response = urllib.request.urlopen(url)\n if writable:\n return response.read()\n url = response.geturl()\n return url\n except urllib.error.URLError:\n raise urllib.error.URLError(\n 'Required an active HTTP connection')\n\n def hashtags(self, quantity: int = 4) -> Union[str, list]:\n \"\"\"Generate a list of hashtags.\n\n :param quantity: The quantity of hashtags.\n :return: The list of hashtags.\n :raises NonEnumerableError: if category is not in Hashtag.\n\n :Example:\n ['#love', '#sky', '#nice']\n \"\"\"\n tags = ['#' + self.random.choice(HASHTAGS)\n for _ in range(quantity)]\n\n if int(quantity) == 1:\n return tags[0]\n\n return tags\n\n def home_page(self, tld_type: Optional[TLDType] = None) -> str:\n \"\"\"Generate a random home page.\n\n :param tld_type: TLD type.\n :return: Random home page.\n\n :Example:\n https://fontir.info\n \"\"\"\n resource = self.random.choice(USERNAMES)\n domain = self.top_level_domain(\n tld_type=tld_type,\n )\n\n return 'https://{}{}'.format(\n resource, domain)\n\n def top_level_domain(self, tld_type: Optional[TLDType] = None) -> str:\n \"\"\"Return random top level domain.\n\n :param tld_type: Enum object DomainType\n :return: Top level domain.\n :raises NonEnumerableError: if tld_type not in DomainType.\n \"\"\"\n key = self._validate_enum(item=tld_type, enum=TLDType)\n return self.random.choice(TLD[key])\n\n def user_agent(self) -> str:\n \"\"\"Get a random user agent.\n\n :return: User agent.\n\n :Example:\n Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:15.0)\n Gecko/20100101 Firefox/15.0.1\n \"\"\"\n return self.random.choice(USER_AGENTS)\n\n def network_protocol(self, layer: Optional[Layer] = None) -> str:\n \"\"\"Get a random network protocol form OSI model.\n\n :param layer: Enum object Layer.\n :return: Protocol name.\n\n :Example:\n AMQP\n \"\"\"\n key = self._validate_enum(item=layer, enum=Layer)\n protocols = NETWORK_PROTOCOLS[key]\n return self.random.choice(protocols)\n\n def port(self, port_range: PortRange = PortRange.ALL) -> int:\n \"\"\"Generate random port.\n\n :param port_range: PortRange enum object.\n :return: Port number.\n :raises NonEnumerableError: if port_range is not in PortRange.\n\n :Example:\n 8080\n \"\"\"\n if isinstance(port_range, PortRange):\n return self.random.randint(*port_range.value)\n else:\n raise NonEnumerableError(PortRange)\n", "path": "mimesis/providers/internet.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"This module is provide internal util functions.\"\"\"\n\nimport ssl\nfrom os import path\nfrom typing import Union\nfrom urllib import request\nfrom uuid import uuid4\n\n__all__ = ['luhn_checksum']\n\n\ndef luhn_checksum(num: str) -> str:\n \"\"\"Calculate a checksum for num using the Luhn algorithm.\n\n :param num: The number to calculate a checksum for as a string.\n :return: Checksum for number.\n \"\"\"\n check = 0\n for i, s in enumerate(reversed(num)):\n sx = int(s)\n sx = sx * 2 if i % 2 == 0 else sx\n sx = sx - 9 if sx > 9 else sx\n check += sx\n return str(check * 9 % 10)\n", "path": "mimesis/shortcuts.py"}, {"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Provides data related to internet.\"\"\"\n\nimport urllib.error\nimport urllib.request\nfrom ipaddress import IPv4Address, IPv6Address\nfrom typing import List, Optional, Union\n\nfrom mimesis.data import (\n EMOJI,\n HASHTAGS,\n HTTP_METHODS,\n HTTP_STATUS_CODES,\n HTTP_STATUS_MSGS,\n NETWORK_PROTOCOLS,\n TLD,\n USER_AGENTS,\n USERNAMES,\n)\nfrom mimesis.enums import Layer, MimeType, PortRange, TLDType\nfrom mimesis.exceptions import NonEnumerableError\nfrom mimesis.providers.base import BaseProvider\nfrom mimesis.providers.file import File\n\n__all__ = ['Internet']\n\n\nclass Internet(BaseProvider):\n \"\"\"Class for generating data related to the internet.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize attributes.\n\n :param args: Arguments.\n :param kwargs: Keyword arguments.\n \"\"\"\n super().__init__(*args, **kwargs)\n self.__file = File(seed=self.seed)\n self._MAX_IPV4 = (2 ** 32) - 1\n self._MAX_IPV6 = (2 ** 128) - 1\n\n class Meta:\n \"\"\"Class for metadata.\"\"\"\n\n name = 'internet'\n\n def content_type(self, mime_type: Optional[MimeType] = None) -> str:\n \"\"\"Get a random HTTP content type.\n\n :return: Content type.\n\n :Example:\n Content-Type: application/json\n \"\"\"\n fmt = self.__file.mime_type(type_=mime_type)\n return 'Content-Type: {}'.format(fmt)\n\n def http_status_message(self) -> str:\n \"\"\"Get a random HTTP status message.\n\n :return: HTTP status message.\n\n :Example:\n 200 OK\n \"\"\"\n return self.random.choice(HTTP_STATUS_MSGS)\n\n def http_status_code(self) -> int:\n \"\"\"Get a random HTTP status code.\n\n :return: HTTP status.\n\n :Example:\n 200\n \"\"\"\n return self.random.choice(HTTP_STATUS_CODES)\n\n def http_method(self) -> str:\n \"\"\"Get a random HTTP method.\n\n :return: HTTP method.\n\n :Example:\n POST\n \"\"\"\n return self.random.choice(HTTP_METHODS)\n\n def ip_v4_object(self) -> IPv4Address:\n \"\"\"Generate random IPv4Address object.\n\n See documentation for module ipaddress:\n https://docs.python.org/3.7/library/ipaddress.html\n\n :return: IPv4Address object.\n \"\"\"\n return IPv4Address(\n self.random.randint(0, self._MAX_IPV4),\n )\n\n def ip_v4(self, with_port: bool = False,\n port_range: PortRange = PortRange.ALL) -> str:\n \"\"\"Generate a random IPv4 address as string.\n\n :param port_range: PortRange enum object.\n :param with_port: Add port from PortRange to IP.\n :return: IPv4 address as string.\n\n :Example:\n 19.121.223.58 or 19.121.223.58:8000\n \"\"\"\n ip = str(self.ip_v4_object())\n\n if with_port:\n port = self.port(port_range=port_range)\n return '{}:{}'.format(ip, port)\n\n return ip\n\n def ip_v6_object(self) -> IPv6Address:\n \"\"\"Generate random IPv6Address object.\n\n See documentation for module ipaddress:\n https://docs.python.org/3.7/library/ipaddress.html\n\n :return: IPv6Address object.\n \"\"\"\n return IPv6Address(\n self.random.randint(\n 0, self._MAX_IPV6,\n ),\n )\n\n def ip_v6(self) -> str:\n \"\"\"Generate a random IPv6 address as string.\n\n :return: IPv6 address string.\n\n :Example:\n 2001:c244:cf9d:1fb1:c56d:f52c:8a04:94f3\n \"\"\"\n return str(self.ip_v6_object())\n\n def mac_address(self) -> str:\n \"\"\"Generate a random MAC address.\n\n :return: Random MAC address.\n\n :Example:\n 00:16:3e:25:e7:b1\n \"\"\"\n mac_hex = [\n 0x00, 0x16, 0x3e,\n self.random.randint(0x00, 0x7f),\n self.random.randint(0x00, 0xff),\n self.random.randint(0x00, 0xff),\n ]\n mac = map(lambda x: '{:02x}'.format(x), mac_hex)\n return ':'.join(mac)\n\n def emoji(self) -> str:\n \"\"\"Get a random emoji shortcut code.\n\n :return: Emoji code.\n\n :Example:\n :kissing:\n \"\"\"\n return self.random.choice(EMOJI)\n\n @staticmethod\n def image_placeholder(width: Union[int, str] = 1920,\n height: Union[int, str] = 1080) -> str:\n \"\"\"Generate a link to the image placeholder.\n\n :param width: Width of image.\n :param height: Height of image.\n :return: URL to image placeholder.\n \"\"\"\n url = 'http://placehold.it/{width}x{height}'\n return url.format(width=width, height=height)\n\n @staticmethod\n def stock_image(width: Union[int, str] = 1920,\n height: Union[int, str] = 1080,\n keywords: Optional[List[str]] = None,\n writable: bool = False) -> Union[str, bytes]:\n \"\"\"Generate random stock image (JPG/JPEG) hosted on Unsplash.\n\n See \u00abRandom search term\u00bb on https://source.unsplash.com/\n for more details.\n\n .. note:: This method required an active HTTP connection\n if you want to get writable object\n\n :param width: Width of the image.\n :param height: Height of the image.\n :param keywords: List of search keywords.\n :param writable: Return image as sequence ob bytes.\n :return: Link to the image.\n \"\"\"\n api_url = 'https://source.unsplash.com/{}x{}?{}'\n\n if keywords is not None:\n keywords_str = ','.join(keywords)\n else:\n keywords_str = ''\n\n url = api_url.format(width, height, keywords_str)\n\n if writable:\n try:\n response = urllib.request.urlopen(url)\n return response.read()\n except urllib.error.URLError:\n raise urllib.error.URLError(\n 'Required an active HTTP connection')\n return url\n\n def hashtags(self, quantity: int = 4) -> Union[str, list]:\n \"\"\"Generate a list of hashtags.\n\n :param quantity: The quantity of hashtags.\n :return: The list of hashtags.\n :raises NonEnumerableError: if category is not in Hashtag.\n\n :Example:\n ['#love', '#sky', '#nice']\n \"\"\"\n tags = ['#' + self.random.choice(HASHTAGS)\n for _ in range(quantity)]\n\n if int(quantity) == 1:\n return tags[0]\n\n return tags\n\n def home_page(self, tld_type: Optional[TLDType] = None) -> str:\n \"\"\"Generate a random home page.\n\n :param tld_type: TLD type.\n :return: Random home page.\n\n :Example:\n https://fontir.info\n \"\"\"\n resource = self.random.choice(USERNAMES)\n domain = self.top_level_domain(\n tld_type=tld_type,\n )\n\n return 'https://{}{}'.format(\n resource, domain)\n\n def top_level_domain(self, tld_type: Optional[TLDType] = None) -> str:\n \"\"\"Return random top level domain.\n\n :param tld_type: Enum object DomainType\n :return: Top level domain.\n :raises NonEnumerableError: if tld_type not in DomainType.\n \"\"\"\n key = self._validate_enum(item=tld_type, enum=TLDType)\n return self.random.choice(TLD[key])\n\n def user_agent(self) -> str:\n \"\"\"Get a random user agent.\n\n :return: User agent.\n\n :Example:\n Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:15.0)\n Gecko/20100101 Firefox/15.0.1\n \"\"\"\n return self.random.choice(USER_AGENTS)\n\n def network_protocol(self, layer: Optional[Layer] = None) -> str:\n \"\"\"Get a random network protocol form OSI model.\n\n :param layer: Enum object Layer.\n :return: Protocol name.\n\n :Example:\n AMQP\n \"\"\"\n key = self._validate_enum(item=layer, enum=Layer)\n protocols = NETWORK_PROTOCOLS[key]\n return self.random.choice(protocols)\n\n def port(self, port_range: PortRange = PortRange.ALL) -> int:\n \"\"\"Generate random port.\n\n :param port_range: PortRange enum object.\n :return: Port number.\n :raises NonEnumerableError: if port_range is not in PortRange.\n\n :Example:\n 8080\n \"\"\"\n if isinstance(port_range, PortRange):\n return self.random.randint(*port_range.value)\n else:\n raise NonEnumerableError(PortRange)\n", "path": "mimesis/providers/internet.py"}]}
3,725
847
gh_patches_debug_12789
rasdani/github-patches
git_diff
strawberry-graphql__strawberry-1463
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Error validating return types when using a Generic in Union There seems to be an issue with types. Generics worked fine and dandy until I've tried to use them in a Union. ```python @strawberry.type class FaultyType: code_sent: bool result: Union[Pet, ErrorNode[Codes]] ``` Here returning an ErrorNode in "result" field fails type check. Fails with strawberry.exceptions.UnallowedReturnTypeForUnion: The type "<class 'strawberry_sample.ErrorNode'>" of the field "result" is not in the list of the types of the union: "['CodesErrorNode', 'Pet']" Returning the same generic without unions in type declaration works fine. Full sample: https://gist.github.com/MeRuslan/5bd179f28fc5ae05e815429ee02ebdf6 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `strawberry/types/types.py` Content: ``` 1 from __future__ import annotations 2 3 import dataclasses 4 from typing import ( 5 TYPE_CHECKING, 6 List, 7 Mapping, 8 Optional, 9 Sequence, 10 Type, 11 TypeVar, 12 Union, 13 ) 14 15 from strawberry.type import StrawberryType, StrawberryTypeVar 16 from strawberry.utils.typing import is_generic as is_type_generic 17 18 19 if TYPE_CHECKING: 20 from strawberry.field import StrawberryField 21 from strawberry.schema_directive import StrawberrySchemaDirective 22 23 24 @dataclasses.dataclass(eq=False) 25 class TypeDefinition(StrawberryType): 26 name: str 27 is_input: bool 28 is_interface: bool 29 origin: Type 30 description: Optional[str] 31 interfaces: List["TypeDefinition"] 32 extend: bool 33 directives: Optional[Sequence[StrawberrySchemaDirective]] 34 35 _fields: List["StrawberryField"] 36 37 concrete_of: Optional["TypeDefinition"] = None 38 """Concrete implementations of Generic TypeDefinitions fill this in""" 39 type_var_map: Mapping[TypeVar, Union[StrawberryType, type]] = dataclasses.field( 40 default_factory=dict 41 ) 42 43 # TODO: remove wrapped cls when we "merge" this with `StrawberryObject` 44 def resolve_generic(self, wrapped_cls: type) -> type: 45 from strawberry.annotation import StrawberryAnnotation 46 47 passed_types = wrapped_cls.__args__ # type: ignore 48 params = wrapped_cls.__origin__.__parameters__ # type: ignore 49 50 # Make sure all passed_types are turned into StrawberryTypes 51 resolved_types = [] 52 for passed_type in passed_types: 53 resolved_type = StrawberryAnnotation(passed_type).resolve() 54 resolved_types.append(resolved_type) 55 56 type_var_map = dict(zip(params, resolved_types)) 57 58 return self.copy_with(type_var_map) 59 60 # TODO: Return a StrawberryObject 61 def copy_with( 62 self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]] 63 ) -> type: 64 fields = [] 65 for field in self.fields: 66 # TODO: Logic unnecessary with StrawberryObject 67 field_type = field.type 68 if hasattr(field_type, "_type_definition"): 69 field_type = field_type._type_definition # type: ignore 70 71 # TODO: All types should end up being StrawberryTypes 72 # The first check is here as a symptom of strawberry.ID being a 73 # Scalar, but not a StrawberryType 74 if isinstance(field_type, StrawberryType) and field_type.is_generic: 75 field = field.copy_with(type_var_map) 76 77 fields.append(field) 78 79 new_type_definition = TypeDefinition( 80 name=self.name, 81 is_input=self.is_input, 82 origin=self.origin, 83 is_interface=self.is_interface, 84 directives=self.directives, 85 interfaces=self.interfaces, 86 description=self.description, 87 extend=self.extend, 88 _fields=fields, 89 concrete_of=self, 90 type_var_map=type_var_map, 91 ) 92 93 new_type = type( 94 new_type_definition.name, 95 (self.origin,), 96 {"_type_definition": new_type_definition}, 97 ) 98 99 new_type_definition.origin = new_type 100 101 return new_type 102 103 def get_field(self, python_name: str) -> Optional["StrawberryField"]: 104 return next( 105 (field for field in self.fields if field.python_name == python_name), None 106 ) 107 108 @property 109 def fields(self) -> List["StrawberryField"]: 110 # TODO: rename _fields to fields and remove this property 111 return self._fields 112 113 @property 114 def is_generic(self) -> bool: 115 return is_type_generic(self.origin) 116 117 @property 118 def type_params(self) -> List[TypeVar]: 119 type_params: List[TypeVar] = [] 120 for field in self.fields: 121 type_params.extend(field.type_params) 122 123 return type_params 124 125 def is_implemented_by(self, root: Union[type, dict]) -> bool: 126 # TODO: Accept StrawberryObject instead 127 # TODO: Support dicts 128 if isinstance(root, dict): 129 raise NotImplementedError() 130 131 type_definition = root._type_definition # type: ignore 132 133 if type_definition is self: 134 # No generics involved. Exact type match 135 return True 136 137 if type_definition is not self.concrete_of: 138 # Either completely different type, or concrete type of a different generic 139 return False 140 141 # Check the mapping of all fields' TypeVars 142 for generic_field in type_definition.fields: 143 generic_field_type = generic_field.type 144 if not isinstance(generic_field_type, StrawberryTypeVar): 145 continue 146 147 # For each TypeVar found, get the expected type from the copy's type map 148 expected_concrete_type = self.type_var_map.get(generic_field_type.type_var) 149 if expected_concrete_type is None: 150 # TODO: Should this return False? 151 continue 152 153 # Check if the expected type matches the type found on the type_map 154 real_concrete_type = type(getattr(root, generic_field.name)) 155 if real_concrete_type is not expected_concrete_type: 156 return False 157 158 # All field mappings succeeded. This is a match 159 return True 160 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/strawberry/types/types.py b/strawberry/types/types.py --- a/strawberry/types/types.py +++ b/strawberry/types/types.py @@ -152,6 +152,13 @@ # Check if the expected type matches the type found on the type_map real_concrete_type = type(getattr(root, generic_field.name)) + + # TODO: uniform type var map, at the moment we map object types + # to their class (not to TypeDefinition) while we map enum to + # the EnumDefinition class. This is why we do this check here: + if hasattr(real_concrete_type, "_enum_definition"): + real_concrete_type = real_concrete_type._enum_definition + if real_concrete_type is not expected_concrete_type: return False
{"golden_diff": "diff --git a/strawberry/types/types.py b/strawberry/types/types.py\n--- a/strawberry/types/types.py\n+++ b/strawberry/types/types.py\n@@ -152,6 +152,13 @@\n \n # Check if the expected type matches the type found on the type_map\n real_concrete_type = type(getattr(root, generic_field.name))\n+\n+ # TODO: uniform type var map, at the moment we map object types\n+ # to their class (not to TypeDefinition) while we map enum to\n+ # the EnumDefinition class. This is why we do this check here:\n+ if hasattr(real_concrete_type, \"_enum_definition\"):\n+ real_concrete_type = real_concrete_type._enum_definition\n+\n if real_concrete_type is not expected_concrete_type:\n return False\n", "issue": "Error validating return types when using a Generic in Union\nThere seems to be an issue with types.\r\nGenerics worked fine and dandy until I've tried to use them in a Union.\r\n\r\n```python\r\[email protected]\r\nclass FaultyType:\r\n code_sent: bool\r\n result: Union[Pet, ErrorNode[Codes]]\r\n```\r\nHere returning an ErrorNode in \"result\" field fails type check.\r\nFails with strawberry.exceptions.UnallowedReturnTypeForUnion: The type \"<class 'strawberry_sample.ErrorNode'>\" of the field \"result\" is not in the list of the types of the union: \"['CodesErrorNode', 'Pet']\"\r\n\r\nReturning the same generic without unions in type declaration works fine.\r\n\r\nFull sample:\r\nhttps://gist.github.com/MeRuslan/5bd179f28fc5ae05e815429ee02ebdf6\n", "before_files": [{"content": "from __future__ import annotations\n\nimport dataclasses\nfrom typing import (\n TYPE_CHECKING,\n List,\n Mapping,\n Optional,\n Sequence,\n Type,\n TypeVar,\n Union,\n)\n\nfrom strawberry.type import StrawberryType, StrawberryTypeVar\nfrom strawberry.utils.typing import is_generic as is_type_generic\n\n\nif TYPE_CHECKING:\n from strawberry.field import StrawberryField\n from strawberry.schema_directive import StrawberrySchemaDirective\n\n\[email protected](eq=False)\nclass TypeDefinition(StrawberryType):\n name: str\n is_input: bool\n is_interface: bool\n origin: Type\n description: Optional[str]\n interfaces: List[\"TypeDefinition\"]\n extend: bool\n directives: Optional[Sequence[StrawberrySchemaDirective]]\n\n _fields: List[\"StrawberryField\"]\n\n concrete_of: Optional[\"TypeDefinition\"] = None\n \"\"\"Concrete implementations of Generic TypeDefinitions fill this in\"\"\"\n type_var_map: Mapping[TypeVar, Union[StrawberryType, type]] = dataclasses.field(\n default_factory=dict\n )\n\n # TODO: remove wrapped cls when we \"merge\" this with `StrawberryObject`\n def resolve_generic(self, wrapped_cls: type) -> type:\n from strawberry.annotation import StrawberryAnnotation\n\n passed_types = wrapped_cls.__args__ # type: ignore\n params = wrapped_cls.__origin__.__parameters__ # type: ignore\n\n # Make sure all passed_types are turned into StrawberryTypes\n resolved_types = []\n for passed_type in passed_types:\n resolved_type = StrawberryAnnotation(passed_type).resolve()\n resolved_types.append(resolved_type)\n\n type_var_map = dict(zip(params, resolved_types))\n\n return self.copy_with(type_var_map)\n\n # TODO: Return a StrawberryObject\n def copy_with(\n self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]\n ) -> type:\n fields = []\n for field in self.fields:\n # TODO: Logic unnecessary with StrawberryObject\n field_type = field.type\n if hasattr(field_type, \"_type_definition\"):\n field_type = field_type._type_definition # type: ignore\n\n # TODO: All types should end up being StrawberryTypes\n # The first check is here as a symptom of strawberry.ID being a\n # Scalar, but not a StrawberryType\n if isinstance(field_type, StrawberryType) and field_type.is_generic:\n field = field.copy_with(type_var_map)\n\n fields.append(field)\n\n new_type_definition = TypeDefinition(\n name=self.name,\n is_input=self.is_input,\n origin=self.origin,\n is_interface=self.is_interface,\n directives=self.directives,\n interfaces=self.interfaces,\n description=self.description,\n extend=self.extend,\n _fields=fields,\n concrete_of=self,\n type_var_map=type_var_map,\n )\n\n new_type = type(\n new_type_definition.name,\n (self.origin,),\n {\"_type_definition\": new_type_definition},\n )\n\n new_type_definition.origin = new_type\n\n return new_type\n\n def get_field(self, python_name: str) -> Optional[\"StrawberryField\"]:\n return next(\n (field for field in self.fields if field.python_name == python_name), None\n )\n\n @property\n def fields(self) -> List[\"StrawberryField\"]:\n # TODO: rename _fields to fields and remove this property\n return self._fields\n\n @property\n def is_generic(self) -> bool:\n return is_type_generic(self.origin)\n\n @property\n def type_params(self) -> List[TypeVar]:\n type_params: List[TypeVar] = []\n for field in self.fields:\n type_params.extend(field.type_params)\n\n return type_params\n\n def is_implemented_by(self, root: Union[type, dict]) -> bool:\n # TODO: Accept StrawberryObject instead\n # TODO: Support dicts\n if isinstance(root, dict):\n raise NotImplementedError()\n\n type_definition = root._type_definition # type: ignore\n\n if type_definition is self:\n # No generics involved. Exact type match\n return True\n\n if type_definition is not self.concrete_of:\n # Either completely different type, or concrete type of a different generic\n return False\n\n # Check the mapping of all fields' TypeVars\n for generic_field in type_definition.fields:\n generic_field_type = generic_field.type\n if not isinstance(generic_field_type, StrawberryTypeVar):\n continue\n\n # For each TypeVar found, get the expected type from the copy's type map\n expected_concrete_type = self.type_var_map.get(generic_field_type.type_var)\n if expected_concrete_type is None:\n # TODO: Should this return False?\n continue\n\n # Check if the expected type matches the type found on the type_map\n real_concrete_type = type(getattr(root, generic_field.name))\n if real_concrete_type is not expected_concrete_type:\n return False\n\n # All field mappings succeeded. This is a match\n return True\n", "path": "strawberry/types/types.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport dataclasses\nfrom typing import (\n TYPE_CHECKING,\n List,\n Mapping,\n Optional,\n Sequence,\n Type,\n TypeVar,\n Union,\n)\n\nfrom strawberry.type import StrawberryType, StrawberryTypeVar\nfrom strawberry.utils.typing import is_generic as is_type_generic\n\n\nif TYPE_CHECKING:\n from strawberry.field import StrawberryField\n from strawberry.schema_directive import StrawberrySchemaDirective\n\n\[email protected](eq=False)\nclass TypeDefinition(StrawberryType):\n name: str\n is_input: bool\n is_interface: bool\n origin: Type\n description: Optional[str]\n interfaces: List[\"TypeDefinition\"]\n extend: bool\n directives: Optional[Sequence[StrawberrySchemaDirective]]\n\n _fields: List[\"StrawberryField\"]\n\n concrete_of: Optional[\"TypeDefinition\"] = None\n \"\"\"Concrete implementations of Generic TypeDefinitions fill this in\"\"\"\n type_var_map: Mapping[TypeVar, Union[StrawberryType, type]] = dataclasses.field(\n default_factory=dict\n )\n\n # TODO: remove wrapped cls when we \"merge\" this with `StrawberryObject`\n def resolve_generic(self, wrapped_cls: type) -> type:\n from strawberry.annotation import StrawberryAnnotation\n\n passed_types = wrapped_cls.__args__ # type: ignore\n params = wrapped_cls.__origin__.__parameters__ # type: ignore\n\n # Make sure all passed_types are turned into StrawberryTypes\n resolved_types = []\n for passed_type in passed_types:\n resolved_type = StrawberryAnnotation(passed_type).resolve()\n resolved_types.append(resolved_type)\n\n type_var_map = dict(zip(params, resolved_types))\n\n return self.copy_with(type_var_map)\n\n # TODO: Return a StrawberryObject\n def copy_with(\n self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]\n ) -> type:\n fields = []\n for field in self.fields:\n # TODO: Logic unnecessary with StrawberryObject\n field_type = field.type\n if hasattr(field_type, \"_type_definition\"):\n field_type = field_type._type_definition # type: ignore\n\n # TODO: All types should end up being StrawberryTypes\n # The first check is here as a symptom of strawberry.ID being a\n # Scalar, but not a StrawberryType\n if isinstance(field_type, StrawberryType) and field_type.is_generic:\n field = field.copy_with(type_var_map)\n\n fields.append(field)\n\n new_type_definition = TypeDefinition(\n name=self.name,\n is_input=self.is_input,\n origin=self.origin,\n is_interface=self.is_interface,\n directives=self.directives,\n interfaces=self.interfaces,\n description=self.description,\n extend=self.extend,\n _fields=fields,\n concrete_of=self,\n type_var_map=type_var_map,\n )\n\n new_type = type(\n new_type_definition.name,\n (self.origin,),\n {\"_type_definition\": new_type_definition},\n )\n\n new_type_definition.origin = new_type\n\n return new_type\n\n def get_field(self, python_name: str) -> Optional[\"StrawberryField\"]:\n return next(\n (field for field in self.fields if field.python_name == python_name), None\n )\n\n @property\n def fields(self) -> List[\"StrawberryField\"]:\n # TODO: rename _fields to fields and remove this property\n return self._fields\n\n @property\n def is_generic(self) -> bool:\n return is_type_generic(self.origin)\n\n @property\n def type_params(self) -> List[TypeVar]:\n type_params: List[TypeVar] = []\n for field in self.fields:\n type_params.extend(field.type_params)\n\n return type_params\n\n def is_implemented_by(self, root: Union[type, dict]) -> bool:\n # TODO: Accept StrawberryObject instead\n # TODO: Support dicts\n if isinstance(root, dict):\n raise NotImplementedError()\n\n type_definition = root._type_definition # type: ignore\n\n if type_definition is self:\n # No generics involved. Exact type match\n return True\n\n if type_definition is not self.concrete_of:\n # Either completely different type, or concrete type of a different generic\n return False\n\n # Check the mapping of all fields' TypeVars\n for generic_field in type_definition.fields:\n generic_field_type = generic_field.type\n if not isinstance(generic_field_type, StrawberryTypeVar):\n continue\n\n # For each TypeVar found, get the expected type from the copy's type map\n expected_concrete_type = self.type_var_map.get(generic_field_type.type_var)\n if expected_concrete_type is None:\n # TODO: Should this return False?\n continue\n\n # Check if the expected type matches the type found on the type_map\n real_concrete_type = type(getattr(root, generic_field.name))\n\n # TODO: uniform type var map, at the moment we map object types\n # to their class (not to TypeDefinition) while we map enum to\n # the EnumDefinition class. This is why we do this check here:\n if hasattr(real_concrete_type, \"_enum_definition\"):\n real_concrete_type = real_concrete_type._enum_definition\n\n if real_concrete_type is not expected_concrete_type:\n return False\n\n # All field mappings succeeded. This is a match\n return True\n", "path": "strawberry/types/types.py"}]}
1,930
187
gh_patches_debug_15273
rasdani/github-patches
git_diff
nextcloud__appstore-110
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- App title and description must be available in English Both can be localized however we don't expect everything to be translated. Because we use English as fallback if a language is not present we should require the English contents in the info.xml --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `nextcloudappstore/core/api/v1/release/parser.py` Content: ``` 1 import re 2 import tarfile # type: ignore 3 import lxml.etree # type: ignore 4 from typing import Dict, Any, Tuple, List, Set 5 6 from nextcloudappstore.core.api.v1.release import ReleaseConfig 7 from nextcloudappstore.core.versioning import pad_max_version, pad_min_version 8 from rest_framework.exceptions import APIException # type: ignore 9 10 11 class MaxSizeAppMetadataXmlException(APIException): 12 pass 13 14 15 class InvalidAppMetadataXmlException(APIException): 16 pass 17 18 19 class UnsupportedAppArchiveException(APIException): 20 pass 21 22 23 class InvalidAppPackageStructureException(APIException): 24 pass 25 26 27 class XMLSyntaxError(APIException): 28 pass 29 30 31 class GunZipAppMetadataExtractor: 32 def __init__(self, config: ReleaseConfig) -> None: 33 """ 34 :argument config the config 35 """ 36 self.config = config 37 self.app_folder_regex = re.compile(r'^[a-z]+[a-z_]*(?:/.*)*$') 38 39 def extract_app_metadata(self, archive_path: str) -> Tuple[str, str]: 40 """ 41 Extracts the info.xml from an tar.gz archive 42 :argument archive_path the path to the tar.gz archive 43 :raises InvalidAppPackageStructureException if the first level folder 44 does not equal the app_id or no info.xml file could be found in the 45 appinfo folder 46 :return the info.xml as string 47 """ 48 if not tarfile.is_tarfile(archive_path): 49 msg = '%s is not a valid tar.gz archive ' % archive_path 50 raise UnsupportedAppArchiveException(msg) 51 52 with tarfile.open(archive_path, 'r:gz') as tar: 53 result = self._parse_archive(tar) 54 return result 55 56 def _parse_archive(self, tar: Any) -> Tuple[str, str]: 57 folders = self._find_app_folders(tar.getnames()) 58 if len(folders) > 1: 59 msg = 'More than one possible app folder found' 60 raise InvalidAppPackageStructureException(msg) 61 elif len(folders) == 0: 62 msg = 'No possible app folder found. App folder must contain ' \ 63 'only lowercase ASCII characters or underscores' 64 raise InvalidAppPackageStructureException(msg) 65 66 app_id = folders.pop() 67 info_path = '%s/appinfo/info.xml' % app_id 68 try: 69 info_member = tar.getmember(info_path) 70 possible_links = [info_member] 71 # its complicated, sometimes there are single members, sometimes 72 # there aren't 73 try: 74 possible_links.append(tar.getmember(app_id)) 75 except KeyError: 76 pass 77 try: 78 possible_links.append(tar.getmember('%s/appinfo' % app_id)) 79 except KeyError: 80 pass 81 82 for possible_link in possible_links: 83 if possible_link.issym() or possible_link.islnk(): 84 msg = 'Symlinks and hard links can not be used for %s' % \ 85 possible_link 86 raise InvalidAppPackageStructureException(msg) 87 info_file = tar.extractfile(info_member) 88 contents = self._stream_read_file(info_file, 89 self.config.max_info_size) 90 return contents, app_id 91 except KeyError: 92 msg = 'Could not find %s file inside the archive' % info_path 93 raise InvalidAppPackageStructureException(msg) 94 95 def _stream_read_file(self, info_file: Any, max_info_size: int) -> str: 96 """ 97 Instead of reading everything in one go which is vulnerable to 98 zip bombs, stream and accumulate the bytes 99 :argument info_file: buffered io reader 100 :argument max_info_size: maximum file size in bytes 101 :raises MaxSizeAppMetadataXmlException if the maximum size was reached 102 :return: the parsed info.xml 103 """ 104 # FIXME: If someone finds a less ugly version, please feel free to 105 # improve it 106 size = 0 107 result = b'' 108 while True: 109 size += 1024 110 if size > max_info_size: 111 msg = 'info.xml was bigger than allowed %i bytes' % \ 112 max_info_size 113 raise MaxSizeAppMetadataXmlException(msg) 114 115 chunk = info_file.read(1024) 116 if not chunk: 117 break 118 result += chunk 119 120 return result.decode('utf-8') 121 122 def _find_app_folders(self, members: List[str]) -> Set[str]: 123 regex = self.app_folder_regex 124 matching_members = filter(lambda f: re.match(regex, f), members) 125 folders = map(lambda m: m.split('/')[0], matching_members) 126 return set(folders) 127 128 129 def element_to_dict(element: Any) -> Dict: 130 type = element.get('type') 131 key = element.tag.replace('-', '_') 132 if type == 'int': 133 return {key: int(element.text)} 134 elif type == 'list': 135 return {key: list(map(element_to_dict, element.iterchildren()))} 136 elif type == 'min-version': 137 return {key: pad_min_version(element.text)} 138 elif type == 'max-version': 139 return {key: pad_max_version(element.text)} 140 elif len(list(element)) > 0: 141 contents = {} 142 for child in element.iterchildren(): 143 contents.update(element_to_dict(child)) 144 return {key: contents} 145 else: 146 return {key: element.text} 147 148 149 def parse_app_metadata(xml: str, schema: str, pre_xslt: str, 150 xslt: str) -> Dict: 151 """ 152 Parses, validates and maps the xml onto a dict 153 :argument xml the info.xml string to parse 154 :argument schema the schema xml as string 155 :argument pre_xslt xslt which is run before validation to ensure that 156 everything is in the correct order and that unknown elements are excluded 157 :argument xslt the xslt to transform it to a matching structure 158 :raises InvalidAppMetadataXmlException if the schema does not validate 159 :return the parsed xml as dict 160 """ 161 parser = lxml.etree.XMLParser(resolve_entities=False, no_network=True, 162 remove_comments=True, load_dtd=False, 163 remove_blank_text=True, dtd_validation=False 164 ) 165 try: 166 doc = lxml.etree.fromstring(bytes(xml, encoding='utf-8'), parser) 167 except lxml.etree.XMLSyntaxError as e: 168 msg = 'info.xml contains malformed xml: %s' % e 169 raise XMLSyntaxError(msg) 170 for _ in doc.iter(lxml.etree.Entity): 171 raise InvalidAppMetadataXmlException('Must not contain entities') 172 pre_transform = lxml.etree.XSLT(lxml.etree.XML(pre_xslt)) 173 pre_transformed_doc = pre_transform(doc) 174 schema_doc = lxml.etree.fromstring(bytes(schema, encoding='utf-8'), parser) 175 schema = lxml.etree.XMLSchema(schema_doc) 176 try: 177 schema.assertValid(pre_transformed_doc) # type: ignore 178 except lxml.etree.DocumentInvalid as e: 179 msg = 'info.xml did not validate: %s' % e 180 raise InvalidAppMetadataXmlException(msg) 181 transform = lxml.etree.XSLT(lxml.etree.XML(xslt)) 182 transformed_doc = transform(pre_transformed_doc) 183 mapped = element_to_dict(transformed_doc.getroot()) 184 return mapped 185 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/nextcloudappstore/core/api/v1/release/parser.py b/nextcloudappstore/core/api/v1/release/parser.py --- a/nextcloudappstore/core/api/v1/release/parser.py +++ b/nextcloudappstore/core/api/v1/release/parser.py @@ -181,4 +181,20 @@ transform = lxml.etree.XSLT(lxml.etree.XML(xslt)) transformed_doc = transform(pre_transformed_doc) mapped = element_to_dict(transformed_doc.getroot()) + validate_english_present(mapped) return mapped + + +def validate_english_present(info: Dict) -> None: + """ + Validates that name, summary and description are present in english + :param info: the parsed xml + :raises: InvalidAppMetadataXmlException if at least one of the required + fields is not present in english + """ + app = info['app'] + translated_fields = ['name', 'summary', 'description'] + for field in translated_fields: + if 'en' not in app[field]: + msg = 'At least one element "%s" with lang "en" required' % field + raise InvalidAppMetadataXmlException(msg)
{"golden_diff": "diff --git a/nextcloudappstore/core/api/v1/release/parser.py b/nextcloudappstore/core/api/v1/release/parser.py\n--- a/nextcloudappstore/core/api/v1/release/parser.py\n+++ b/nextcloudappstore/core/api/v1/release/parser.py\n@@ -181,4 +181,20 @@\n transform = lxml.etree.XSLT(lxml.etree.XML(xslt))\n transformed_doc = transform(pre_transformed_doc)\n mapped = element_to_dict(transformed_doc.getroot())\n+ validate_english_present(mapped)\n return mapped\n+\n+\n+def validate_english_present(info: Dict) -> None:\n+ \"\"\"\n+ Validates that name, summary and description are present in english\n+ :param info: the parsed xml\n+ :raises: InvalidAppMetadataXmlException if at least one of the required\n+ fields is not present in english\n+ \"\"\"\n+ app = info['app']\n+ translated_fields = ['name', 'summary', 'description']\n+ for field in translated_fields:\n+ if 'en' not in app[field]:\n+ msg = 'At least one element \"%s\" with lang \"en\" required' % field\n+ raise InvalidAppMetadataXmlException(msg)\n", "issue": "App title and description must be available in English\nBoth can be localized however we don't expect everything to be translated. Because we use English as fallback if a language is not present we should require the English contents in the info.xml\n\n", "before_files": [{"content": "import re\nimport tarfile # type: ignore\nimport lxml.etree # type: ignore\nfrom typing import Dict, Any, Tuple, List, Set\n\nfrom nextcloudappstore.core.api.v1.release import ReleaseConfig\nfrom nextcloudappstore.core.versioning import pad_max_version, pad_min_version\nfrom rest_framework.exceptions import APIException # type: ignore\n\n\nclass MaxSizeAppMetadataXmlException(APIException):\n pass\n\n\nclass InvalidAppMetadataXmlException(APIException):\n pass\n\n\nclass UnsupportedAppArchiveException(APIException):\n pass\n\n\nclass InvalidAppPackageStructureException(APIException):\n pass\n\n\nclass XMLSyntaxError(APIException):\n pass\n\n\nclass GunZipAppMetadataExtractor:\n def __init__(self, config: ReleaseConfig) -> None:\n \"\"\"\n :argument config the config\n \"\"\"\n self.config = config\n self.app_folder_regex = re.compile(r'^[a-z]+[a-z_]*(?:/.*)*$')\n\n def extract_app_metadata(self, archive_path: str) -> Tuple[str, str]:\n \"\"\"\n Extracts the info.xml from an tar.gz archive\n :argument archive_path the path to the tar.gz archive\n :raises InvalidAppPackageStructureException if the first level folder\n does not equal the app_id or no info.xml file could be found in the\n appinfo folder\n :return the info.xml as string\n \"\"\"\n if not tarfile.is_tarfile(archive_path):\n msg = '%s is not a valid tar.gz archive ' % archive_path\n raise UnsupportedAppArchiveException(msg)\n\n with tarfile.open(archive_path, 'r:gz') as tar:\n result = self._parse_archive(tar)\n return result\n\n def _parse_archive(self, tar: Any) -> Tuple[str, str]:\n folders = self._find_app_folders(tar.getnames())\n if len(folders) > 1:\n msg = 'More than one possible app folder found'\n raise InvalidAppPackageStructureException(msg)\n elif len(folders) == 0:\n msg = 'No possible app folder found. App folder must contain ' \\\n 'only lowercase ASCII characters or underscores'\n raise InvalidAppPackageStructureException(msg)\n\n app_id = folders.pop()\n info_path = '%s/appinfo/info.xml' % app_id\n try:\n info_member = tar.getmember(info_path)\n possible_links = [info_member]\n # its complicated, sometimes there are single members, sometimes\n # there aren't\n try:\n possible_links.append(tar.getmember(app_id))\n except KeyError:\n pass\n try:\n possible_links.append(tar.getmember('%s/appinfo' % app_id))\n except KeyError:\n pass\n\n for possible_link in possible_links:\n if possible_link.issym() or possible_link.islnk():\n msg = 'Symlinks and hard links can not be used for %s' % \\\n possible_link\n raise InvalidAppPackageStructureException(msg)\n info_file = tar.extractfile(info_member)\n contents = self._stream_read_file(info_file,\n self.config.max_info_size)\n return contents, app_id\n except KeyError:\n msg = 'Could not find %s file inside the archive' % info_path\n raise InvalidAppPackageStructureException(msg)\n\n def _stream_read_file(self, info_file: Any, max_info_size: int) -> str:\n \"\"\"\n Instead of reading everything in one go which is vulnerable to\n zip bombs, stream and accumulate the bytes\n :argument info_file: buffered io reader\n :argument max_info_size: maximum file size in bytes\n :raises MaxSizeAppMetadataXmlException if the maximum size was reached\n :return: the parsed info.xml\n \"\"\"\n # FIXME: If someone finds a less ugly version, please feel free to\n # improve it\n size = 0\n result = b''\n while True:\n size += 1024\n if size > max_info_size:\n msg = 'info.xml was bigger than allowed %i bytes' % \\\n max_info_size\n raise MaxSizeAppMetadataXmlException(msg)\n\n chunk = info_file.read(1024)\n if not chunk:\n break\n result += chunk\n\n return result.decode('utf-8')\n\n def _find_app_folders(self, members: List[str]) -> Set[str]:\n regex = self.app_folder_regex\n matching_members = filter(lambda f: re.match(regex, f), members)\n folders = map(lambda m: m.split('/')[0], matching_members)\n return set(folders)\n\n\ndef element_to_dict(element: Any) -> Dict:\n type = element.get('type')\n key = element.tag.replace('-', '_')\n if type == 'int':\n return {key: int(element.text)}\n elif type == 'list':\n return {key: list(map(element_to_dict, element.iterchildren()))}\n elif type == 'min-version':\n return {key: pad_min_version(element.text)}\n elif type == 'max-version':\n return {key: pad_max_version(element.text)}\n elif len(list(element)) > 0:\n contents = {}\n for child in element.iterchildren():\n contents.update(element_to_dict(child))\n return {key: contents}\n else:\n return {key: element.text}\n\n\ndef parse_app_metadata(xml: str, schema: str, pre_xslt: str,\n xslt: str) -> Dict:\n \"\"\"\n Parses, validates and maps the xml onto a dict\n :argument xml the info.xml string to parse\n :argument schema the schema xml as string\n :argument pre_xslt xslt which is run before validation to ensure that\n everything is in the correct order and that unknown elements are excluded\n :argument xslt the xslt to transform it to a matching structure\n :raises InvalidAppMetadataXmlException if the schema does not validate\n :return the parsed xml as dict\n \"\"\"\n parser = lxml.etree.XMLParser(resolve_entities=False, no_network=True,\n remove_comments=True, load_dtd=False,\n remove_blank_text=True, dtd_validation=False\n )\n try:\n doc = lxml.etree.fromstring(bytes(xml, encoding='utf-8'), parser)\n except lxml.etree.XMLSyntaxError as e:\n msg = 'info.xml contains malformed xml: %s' % e\n raise XMLSyntaxError(msg)\n for _ in doc.iter(lxml.etree.Entity):\n raise InvalidAppMetadataXmlException('Must not contain entities')\n pre_transform = lxml.etree.XSLT(lxml.etree.XML(pre_xslt))\n pre_transformed_doc = pre_transform(doc)\n schema_doc = lxml.etree.fromstring(bytes(schema, encoding='utf-8'), parser)\n schema = lxml.etree.XMLSchema(schema_doc)\n try:\n schema.assertValid(pre_transformed_doc) # type: ignore\n except lxml.etree.DocumentInvalid as e:\n msg = 'info.xml did not validate: %s' % e\n raise InvalidAppMetadataXmlException(msg)\n transform = lxml.etree.XSLT(lxml.etree.XML(xslt))\n transformed_doc = transform(pre_transformed_doc)\n mapped = element_to_dict(transformed_doc.getroot())\n return mapped\n", "path": "nextcloudappstore/core/api/v1/release/parser.py"}], "after_files": [{"content": "import re\nimport tarfile # type: ignore\nimport lxml.etree # type: ignore\nfrom typing import Dict, Any, Tuple, List, Set\n\nfrom nextcloudappstore.core.api.v1.release import ReleaseConfig\nfrom nextcloudappstore.core.versioning import pad_max_version, pad_min_version\nfrom rest_framework.exceptions import APIException # type: ignore\n\n\nclass MaxSizeAppMetadataXmlException(APIException):\n pass\n\n\nclass InvalidAppMetadataXmlException(APIException):\n pass\n\n\nclass UnsupportedAppArchiveException(APIException):\n pass\n\n\nclass InvalidAppPackageStructureException(APIException):\n pass\n\n\nclass XMLSyntaxError(APIException):\n pass\n\n\nclass GunZipAppMetadataExtractor:\n def __init__(self, config: ReleaseConfig) -> None:\n \"\"\"\n :argument config the config\n \"\"\"\n self.config = config\n self.app_folder_regex = re.compile(r'^[a-z]+[a-z_]*(?:/.*)*$')\n\n def extract_app_metadata(self, archive_path: str) -> Tuple[str, str]:\n \"\"\"\n Extracts the info.xml from an tar.gz archive\n :argument archive_path the path to the tar.gz archive\n :raises InvalidAppPackageStructureException if the first level folder\n does not equal the app_id or no info.xml file could be found in the\n appinfo folder\n :return the info.xml as string\n \"\"\"\n if not tarfile.is_tarfile(archive_path):\n msg = '%s is not a valid tar.gz archive ' % archive_path\n raise UnsupportedAppArchiveException(msg)\n\n with tarfile.open(archive_path, 'r:gz') as tar:\n result = self._parse_archive(tar)\n return result\n\n def _parse_archive(self, tar: Any) -> Tuple[str, str]:\n folders = self._find_app_folders(tar.getnames())\n if len(folders) > 1:\n msg = 'More than one possible app folder found'\n raise InvalidAppPackageStructureException(msg)\n elif len(folders) == 0:\n msg = 'No possible app folder found. App folder must contain ' \\\n 'only lowercase ASCII characters or underscores'\n raise InvalidAppPackageStructureException(msg)\n\n app_id = folders.pop()\n info_path = '%s/appinfo/info.xml' % app_id\n try:\n info_member = tar.getmember(info_path)\n possible_links = [info_member]\n # its complicated, sometimes there are single members, sometimes\n # there aren't\n try:\n possible_links.append(tar.getmember(app_id))\n except KeyError:\n pass\n try:\n possible_links.append(tar.getmember('%s/appinfo' % app_id))\n except KeyError:\n pass\n\n for possible_link in possible_links:\n if possible_link.issym() or possible_link.islnk():\n msg = 'Symlinks and hard links can not be used for %s' % \\\n possible_link\n raise InvalidAppPackageStructureException(msg)\n info_file = tar.extractfile(info_member)\n contents = self._stream_read_file(info_file,\n self.config.max_info_size)\n return contents, app_id\n except KeyError:\n msg = 'Could not find %s file inside the archive' % info_path\n raise InvalidAppPackageStructureException(msg)\n\n def _stream_read_file(self, info_file: Any, max_info_size: int) -> str:\n \"\"\"\n Instead of reading everything in one go which is vulnerable to\n zip bombs, stream and accumulate the bytes\n :argument info_file: buffered io reader\n :argument max_info_size: maximum file size in bytes\n :raises MaxSizeAppMetadataXmlException if the maximum size was reached\n :return: the parsed info.xml\n \"\"\"\n # FIXME: If someone finds a less ugly version, please feel free to\n # improve it\n size = 0\n result = b''\n while True:\n size += 1024\n if size > max_info_size:\n msg = 'info.xml was bigger than allowed %i bytes' % \\\n max_info_size\n raise MaxSizeAppMetadataXmlException(msg)\n\n chunk = info_file.read(1024)\n if not chunk:\n break\n result += chunk\n\n return result.decode('utf-8')\n\n def _find_app_folders(self, members: List[str]) -> Set[str]:\n regex = self.app_folder_regex\n matching_members = filter(lambda f: re.match(regex, f), members)\n folders = map(lambda m: m.split('/')[0], matching_members)\n return set(folders)\n\n\ndef element_to_dict(element: Any) -> Dict:\n type = element.get('type')\n key = element.tag.replace('-', '_')\n if type == 'int':\n return {key: int(element.text)}\n elif type == 'list':\n return {key: list(map(element_to_dict, element.iterchildren()))}\n elif type == 'min-version':\n return {key: pad_min_version(element.text)}\n elif type == 'max-version':\n return {key: pad_max_version(element.text)}\n elif len(list(element)) > 0:\n contents = {}\n for child in element.iterchildren():\n contents.update(element_to_dict(child))\n return {key: contents}\n else:\n return {key: element.text}\n\n\ndef parse_app_metadata(xml: str, schema: str, pre_xslt: str,\n xslt: str) -> Dict:\n \"\"\"\n Parses, validates and maps the xml onto a dict\n :argument xml the info.xml string to parse\n :argument schema the schema xml as string\n :argument pre_xslt xslt which is run before validation to ensure that\n everything is in the correct order and that unknown elements are excluded\n :argument xslt the xslt to transform it to a matching structure\n :raises InvalidAppMetadataXmlException if the schema does not validate\n :return the parsed xml as dict\n \"\"\"\n parser = lxml.etree.XMLParser(resolve_entities=False, no_network=True,\n remove_comments=True, load_dtd=False,\n remove_blank_text=True, dtd_validation=False\n )\n try:\n doc = lxml.etree.fromstring(bytes(xml, encoding='utf-8'), parser)\n except lxml.etree.XMLSyntaxError as e:\n msg = 'info.xml contains malformed xml: %s' % e\n raise XMLSyntaxError(msg)\n for _ in doc.iter(lxml.etree.Entity):\n raise InvalidAppMetadataXmlException('Must not contain entities')\n pre_transform = lxml.etree.XSLT(lxml.etree.XML(pre_xslt))\n pre_transformed_doc = pre_transform(doc)\n schema_doc = lxml.etree.fromstring(bytes(schema, encoding='utf-8'), parser)\n schema = lxml.etree.XMLSchema(schema_doc)\n try:\n schema.assertValid(pre_transformed_doc) # type: ignore\n except lxml.etree.DocumentInvalid as e:\n msg = 'info.xml did not validate: %s' % e\n raise InvalidAppMetadataXmlException(msg)\n transform = lxml.etree.XSLT(lxml.etree.XML(xslt))\n transformed_doc = transform(pre_transformed_doc)\n mapped = element_to_dict(transformed_doc.getroot())\n validate_english_present(mapped)\n return mapped\n\n\ndef validate_english_present(info: Dict) -> None:\n \"\"\"\n Validates that name, summary and description are present in english\n :param info: the parsed xml\n :raises: InvalidAppMetadataXmlException if at least one of the required\n fields is not present in english\n \"\"\"\n app = info['app']\n translated_fields = ['name', 'summary', 'description']\n for field in translated_fields:\n if 'en' not in app[field]:\n msg = 'At least one element \"%s\" with lang \"en\" required' % field\n raise InvalidAppMetadataXmlException(msg)\n", "path": "nextcloudappstore/core/api/v1/release/parser.py"}]}
2,326
271
gh_patches_debug_18543
rasdani/github-patches
git_diff
mne-tools__mne-python-9055
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- use bibtex in multi_comp.py convert references in `mne/stats/multi_comp.py` to use footcite / footbibliography --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mne/stats/multi_comp.py` Content: ``` 1 # Authors: Josef Pktd and example from H Raja and rewrite from Vincent Davis 2 # Alexandre Gramfort <[email protected]> 3 # 4 # Code borrowed from statsmodels 5 # 6 # License: BSD (3-clause) 7 8 import numpy as np 9 10 11 def _ecdf(x): 12 """No frills empirical cdf used in fdrcorrection.""" 13 nobs = len(x) 14 return np.arange(1, nobs + 1) / float(nobs) 15 16 17 def fdr_correction(pvals, alpha=0.05, method='indep'): 18 """P-value correction with False Discovery Rate (FDR). 19 20 Correction for multiple comparison using FDR [1]_. 21 22 This covers Benjamini/Hochberg for independent or positively correlated and 23 Benjamini/Yekutieli for general or negatively correlated tests. 24 25 Parameters 26 ---------- 27 pvals : array_like 28 Set of p-values of the individual tests. 29 alpha : float 30 Error rate. 31 method : 'indep' | 'negcorr' 32 If 'indep' it implements Benjamini/Hochberg for independent or if 33 'negcorr' it corresponds to Benjamini/Yekutieli. 34 35 Returns 36 ------- 37 reject : array, bool 38 True if a hypothesis is rejected, False if not. 39 pval_corrected : array 40 P-values adjusted for multiple hypothesis testing to limit FDR. 41 42 References 43 ---------- 44 .. [1] Genovese CR, Lazar NA, Nichols T. Thresholding of statistical maps 45 in functional neuroimaging using the false discovery rate. 46 Neuroimage. 2002 Apr;15(4):870-8. 47 """ 48 pvals = np.asarray(pvals) 49 shape_init = pvals.shape 50 pvals = pvals.ravel() 51 52 pvals_sortind = np.argsort(pvals) 53 pvals_sorted = pvals[pvals_sortind] 54 sortrevind = pvals_sortind.argsort() 55 56 if method in ['i', 'indep', 'p', 'poscorr']: 57 ecdffactor = _ecdf(pvals_sorted) 58 elif method in ['n', 'negcorr']: 59 cm = np.sum(1. / np.arange(1, len(pvals_sorted) + 1)) 60 ecdffactor = _ecdf(pvals_sorted) / cm 61 else: 62 raise ValueError("Method should be 'indep' and 'negcorr'") 63 64 reject = pvals_sorted < (ecdffactor * alpha) 65 if reject.any(): 66 rejectmax = max(np.nonzero(reject)[0]) 67 else: 68 rejectmax = 0 69 reject[:rejectmax] = True 70 71 pvals_corrected_raw = pvals_sorted / ecdffactor 72 pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1] 73 pvals_corrected[pvals_corrected > 1.0] = 1.0 74 pvals_corrected = pvals_corrected[sortrevind].reshape(shape_init) 75 reject = reject[sortrevind].reshape(shape_init) 76 return reject, pvals_corrected 77 78 79 def bonferroni_correction(pval, alpha=0.05): 80 """P-value correction with Bonferroni method. 81 82 Parameters 83 ---------- 84 pval : array_like 85 Set of p-values of the individual tests. 86 alpha : float 87 Error rate. 88 89 Returns 90 ------- 91 reject : array, bool 92 True if a hypothesis is rejected, False if not. 93 pval_corrected : array 94 P-values adjusted for multiple hypothesis testing to limit FDR. 95 """ 96 pval = np.asarray(pval) 97 pval_corrected = pval * float(pval.size) 98 # p-values must not be larger than 1. 99 pval_corrected = pval_corrected.clip(max=1.) 100 reject = pval_corrected < alpha 101 return reject, pval_corrected 102 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mne/stats/multi_comp.py b/mne/stats/multi_comp.py --- a/mne/stats/multi_comp.py +++ b/mne/stats/multi_comp.py @@ -17,7 +17,7 @@ def fdr_correction(pvals, alpha=0.05, method='indep'): """P-value correction with False Discovery Rate (FDR). - Correction for multiple comparison using FDR [1]_. + Correction for multiple comparison using FDR :footcite:`GenoveseEtAl2002`. This covers Benjamini/Hochberg for independent or positively correlated and Benjamini/Yekutieli for general or negatively correlated tests. @@ -41,9 +41,7 @@ References ---------- - .. [1] Genovese CR, Lazar NA, Nichols T. Thresholding of statistical maps - in functional neuroimaging using the false discovery rate. - Neuroimage. 2002 Apr;15(4):870-8. + .. footbibliography:: """ pvals = np.asarray(pvals) shape_init = pvals.shape
{"golden_diff": "diff --git a/mne/stats/multi_comp.py b/mne/stats/multi_comp.py\n--- a/mne/stats/multi_comp.py\n+++ b/mne/stats/multi_comp.py\n@@ -17,7 +17,7 @@\n def fdr_correction(pvals, alpha=0.05, method='indep'):\n \"\"\"P-value correction with False Discovery Rate (FDR).\n \n- Correction for multiple comparison using FDR [1]_.\n+ Correction for multiple comparison using FDR :footcite:`GenoveseEtAl2002`.\n \n This covers Benjamini/Hochberg for independent or positively correlated and\n Benjamini/Yekutieli for general or negatively correlated tests.\n@@ -41,9 +41,7 @@\n \n References\n ----------\n- .. [1] Genovese CR, Lazar NA, Nichols T. Thresholding of statistical maps\n- in functional neuroimaging using the false discovery rate.\n- Neuroimage. 2002 Apr;15(4):870-8.\n+ .. footbibliography::\n \"\"\"\n pvals = np.asarray(pvals)\n shape_init = pvals.shape\n", "issue": "use bibtex in multi_comp.py\nconvert references in `mne/stats/multi_comp.py` to use footcite / footbibliography\r\n\n", "before_files": [{"content": "# Authors: Josef Pktd and example from H Raja and rewrite from Vincent Davis\n# Alexandre Gramfort <[email protected]>\n#\n# Code borrowed from statsmodels\n#\n# License: BSD (3-clause)\n\nimport numpy as np\n\n\ndef _ecdf(x):\n \"\"\"No frills empirical cdf used in fdrcorrection.\"\"\"\n nobs = len(x)\n return np.arange(1, nobs + 1) / float(nobs)\n\n\ndef fdr_correction(pvals, alpha=0.05, method='indep'):\n \"\"\"P-value correction with False Discovery Rate (FDR).\n\n Correction for multiple comparison using FDR [1]_.\n\n This covers Benjamini/Hochberg for independent or positively correlated and\n Benjamini/Yekutieli for general or negatively correlated tests.\n\n Parameters\n ----------\n pvals : array_like\n Set of p-values of the individual tests.\n alpha : float\n Error rate.\n method : 'indep' | 'negcorr'\n If 'indep' it implements Benjamini/Hochberg for independent or if\n 'negcorr' it corresponds to Benjamini/Yekutieli.\n\n Returns\n -------\n reject : array, bool\n True if a hypothesis is rejected, False if not.\n pval_corrected : array\n P-values adjusted for multiple hypothesis testing to limit FDR.\n\n References\n ----------\n .. [1] Genovese CR, Lazar NA, Nichols T. Thresholding of statistical maps\n in functional neuroimaging using the false discovery rate.\n Neuroimage. 2002 Apr;15(4):870-8.\n \"\"\"\n pvals = np.asarray(pvals)\n shape_init = pvals.shape\n pvals = pvals.ravel()\n\n pvals_sortind = np.argsort(pvals)\n pvals_sorted = pvals[pvals_sortind]\n sortrevind = pvals_sortind.argsort()\n\n if method in ['i', 'indep', 'p', 'poscorr']:\n ecdffactor = _ecdf(pvals_sorted)\n elif method in ['n', 'negcorr']:\n cm = np.sum(1. / np.arange(1, len(pvals_sorted) + 1))\n ecdffactor = _ecdf(pvals_sorted) / cm\n else:\n raise ValueError(\"Method should be 'indep' and 'negcorr'\")\n\n reject = pvals_sorted < (ecdffactor * alpha)\n if reject.any():\n rejectmax = max(np.nonzero(reject)[0])\n else:\n rejectmax = 0\n reject[:rejectmax] = True\n\n pvals_corrected_raw = pvals_sorted / ecdffactor\n pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1]\n pvals_corrected[pvals_corrected > 1.0] = 1.0\n pvals_corrected = pvals_corrected[sortrevind].reshape(shape_init)\n reject = reject[sortrevind].reshape(shape_init)\n return reject, pvals_corrected\n\n\ndef bonferroni_correction(pval, alpha=0.05):\n \"\"\"P-value correction with Bonferroni method.\n\n Parameters\n ----------\n pval : array_like\n Set of p-values of the individual tests.\n alpha : float\n Error rate.\n\n Returns\n -------\n reject : array, bool\n True if a hypothesis is rejected, False if not.\n pval_corrected : array\n P-values adjusted for multiple hypothesis testing to limit FDR.\n \"\"\"\n pval = np.asarray(pval)\n pval_corrected = pval * float(pval.size)\n # p-values must not be larger than 1.\n pval_corrected = pval_corrected.clip(max=1.)\n reject = pval_corrected < alpha\n return reject, pval_corrected\n", "path": "mne/stats/multi_comp.py"}], "after_files": [{"content": "# Authors: Josef Pktd and example from H Raja and rewrite from Vincent Davis\n# Alexandre Gramfort <[email protected]>\n#\n# Code borrowed from statsmodels\n#\n# License: BSD (3-clause)\n\nimport numpy as np\n\n\ndef _ecdf(x):\n \"\"\"No frills empirical cdf used in fdrcorrection.\"\"\"\n nobs = len(x)\n return np.arange(1, nobs + 1) / float(nobs)\n\n\ndef fdr_correction(pvals, alpha=0.05, method='indep'):\n \"\"\"P-value correction with False Discovery Rate (FDR).\n\n Correction for multiple comparison using FDR :footcite:`GenoveseEtAl2002`.\n\n This covers Benjamini/Hochberg for independent or positively correlated and\n Benjamini/Yekutieli for general or negatively correlated tests.\n\n Parameters\n ----------\n pvals : array_like\n Set of p-values of the individual tests.\n alpha : float\n Error rate.\n method : 'indep' | 'negcorr'\n If 'indep' it implements Benjamini/Hochberg for independent or if\n 'negcorr' it corresponds to Benjamini/Yekutieli.\n\n Returns\n -------\n reject : array, bool\n True if a hypothesis is rejected, False if not.\n pval_corrected : array\n P-values adjusted for multiple hypothesis testing to limit FDR.\n\n References\n ----------\n .. footbibliography::\n \"\"\"\n pvals = np.asarray(pvals)\n shape_init = pvals.shape\n pvals = pvals.ravel()\n\n pvals_sortind = np.argsort(pvals)\n pvals_sorted = pvals[pvals_sortind]\n sortrevind = pvals_sortind.argsort()\n\n if method in ['i', 'indep', 'p', 'poscorr']:\n ecdffactor = _ecdf(pvals_sorted)\n elif method in ['n', 'negcorr']:\n cm = np.sum(1. / np.arange(1, len(pvals_sorted) + 1))\n ecdffactor = _ecdf(pvals_sorted) / cm\n else:\n raise ValueError(\"Method should be 'indep' and 'negcorr'\")\n\n reject = pvals_sorted < (ecdffactor * alpha)\n if reject.any():\n rejectmax = max(np.nonzero(reject)[0])\n else:\n rejectmax = 0\n reject[:rejectmax] = True\n\n pvals_corrected_raw = pvals_sorted / ecdffactor\n pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1]\n pvals_corrected[pvals_corrected > 1.0] = 1.0\n pvals_corrected = pvals_corrected[sortrevind].reshape(shape_init)\n reject = reject[sortrevind].reshape(shape_init)\n return reject, pvals_corrected\n\n\ndef bonferroni_correction(pval, alpha=0.05):\n \"\"\"P-value correction with Bonferroni method.\n\n Parameters\n ----------\n pval : array_like\n Set of p-values of the individual tests.\n alpha : float\n Error rate.\n\n Returns\n -------\n reject : array, bool\n True if a hypothesis is rejected, False if not.\n pval_corrected : array\n P-values adjusted for multiple hypothesis testing to limit FDR.\n \"\"\"\n pval = np.asarray(pval)\n pval_corrected = pval * float(pval.size)\n # p-values must not be larger than 1.\n pval_corrected = pval_corrected.clip(max=1.)\n reject = pval_corrected < alpha\n return reject, pval_corrected\n", "path": "mne/stats/multi_comp.py"}]}
1,359
258
gh_patches_debug_30754
rasdani/github-patches
git_diff
cal-itp__benefits-441
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add logging for OAuth flows We merged the basic implementation in #414, but neglected to include any additional logging around the new flows/logic. Some ideas of what we should log: - [x] The `OAUTH_CLIENT_NAME` used - [x] The `redirect_uri` sent to the authorization server with the `authorize_redirect` request - [x] If an access token fails to be authorized --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `benefits/oauth/views.py` Content: ``` 1 from django.shortcuts import redirect 2 from django.urls import reverse 3 4 from authlib.integrations.django_client import OAuth 5 6 from benefits.core import session 7 from benefits.settings import OAUTH_CLIENT_NAME 8 9 10 if OAUTH_CLIENT_NAME: 11 _oauth = OAuth() 12 _oauth.register(OAUTH_CLIENT_NAME) 13 oauth_client = _oauth.create_client(OAUTH_CLIENT_NAME) 14 15 16 ROUTE_AUTH = "oauth:authorize" 17 ROUTE_START = "eligibility:start" 18 ROUTE_CONFIRM = "eligibility:confirm" 19 20 21 def login(request): 22 if not oauth_client: 23 raise Exception("No OAuth client") 24 25 route = reverse(ROUTE_AUTH) 26 redirect_uri = request.build_absolute_uri(route) 27 28 return oauth_client.authorize_redirect(request, redirect_uri) 29 30 31 def authorize(request): 32 if not oauth_client: 33 raise Exception("No OAuth client") 34 35 token = oauth_client.authorize_access_token(request) 36 37 if token is None: 38 return redirect(ROUTE_START) 39 else: 40 # we are intentionally not storing anything about the user, including their token 41 session.update(request, auth=True) 42 return redirect(ROUTE_CONFIRM) 43 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/benefits/oauth/views.py b/benefits/oauth/views.py --- a/benefits/oauth/views.py +++ b/benefits/oauth/views.py @@ -1,3 +1,5 @@ +import logging + from django.shortcuts import redirect from django.urls import reverse @@ -7,7 +9,12 @@ from benefits.settings import OAUTH_CLIENT_NAME +logger = logging.getLogger(__name__) + + if OAUTH_CLIENT_NAME: + logger.debug(f"Using OAuth client configuration: {OAUTH_CLIENT_NAME}") + _oauth = OAuth() _oauth.register(OAUTH_CLIENT_NAME) oauth_client = _oauth.create_client(OAUTH_CLIENT_NAME) @@ -25,6 +32,8 @@ route = reverse(ROUTE_AUTH) redirect_uri = request.build_absolute_uri(route) + logger.debug(f"OAuth authorize_redirect with redirect_uri: {redirect_uri}") + return oauth_client.authorize_redirect(request, redirect_uri) @@ -32,11 +41,14 @@ if not oauth_client: raise Exception("No OAuth client") + logger.debug("Attempting to authorize OAuth access token") token = oauth_client.authorize_access_token(request) if token is None: + logger.warning("Could not authorize OAuth access token") return redirect(ROUTE_START) else: # we are intentionally not storing anything about the user, including their token + logger.debug("OAuth access token authorized") session.update(request, auth=True) return redirect(ROUTE_CONFIRM)
{"golden_diff": "diff --git a/benefits/oauth/views.py b/benefits/oauth/views.py\n--- a/benefits/oauth/views.py\n+++ b/benefits/oauth/views.py\n@@ -1,3 +1,5 @@\n+import logging\n+\n from django.shortcuts import redirect\n from django.urls import reverse\n \n@@ -7,7 +9,12 @@\n from benefits.settings import OAUTH_CLIENT_NAME\n \n \n+logger = logging.getLogger(__name__)\n+\n+\n if OAUTH_CLIENT_NAME:\n+ logger.debug(f\"Using OAuth client configuration: {OAUTH_CLIENT_NAME}\")\n+\n _oauth = OAuth()\n _oauth.register(OAUTH_CLIENT_NAME)\n oauth_client = _oauth.create_client(OAUTH_CLIENT_NAME)\n@@ -25,6 +32,8 @@\n route = reverse(ROUTE_AUTH)\n redirect_uri = request.build_absolute_uri(route)\n \n+ logger.debug(f\"OAuth authorize_redirect with redirect_uri: {redirect_uri}\")\n+\n return oauth_client.authorize_redirect(request, redirect_uri)\n \n \n@@ -32,11 +41,14 @@\n if not oauth_client:\n raise Exception(\"No OAuth client\")\n \n+ logger.debug(\"Attempting to authorize OAuth access token\")\n token = oauth_client.authorize_access_token(request)\n \n if token is None:\n+ logger.warning(\"Could not authorize OAuth access token\")\n return redirect(ROUTE_START)\n else:\n # we are intentionally not storing anything about the user, including their token\n+ logger.debug(\"OAuth access token authorized\")\n session.update(request, auth=True)\n return redirect(ROUTE_CONFIRM)\n", "issue": "Add logging for OAuth flows\nWe merged the basic implementation in #414, but neglected to include any additional logging around the new flows/logic.\r\n\r\nSome ideas of what we should log:\r\n\r\n- [x] The `OAUTH_CLIENT_NAME` used\r\n- [x] The `redirect_uri` sent to the authorization server with the `authorize_redirect` request\r\n- [x] If an access token fails to be authorized\n", "before_files": [{"content": "from django.shortcuts import redirect\nfrom django.urls import reverse\n\nfrom authlib.integrations.django_client import OAuth\n\nfrom benefits.core import session\nfrom benefits.settings import OAUTH_CLIENT_NAME\n\n\nif OAUTH_CLIENT_NAME:\n _oauth = OAuth()\n _oauth.register(OAUTH_CLIENT_NAME)\n oauth_client = _oauth.create_client(OAUTH_CLIENT_NAME)\n\n\nROUTE_AUTH = \"oauth:authorize\"\nROUTE_START = \"eligibility:start\"\nROUTE_CONFIRM = \"eligibility:confirm\"\n\n\ndef login(request):\n if not oauth_client:\n raise Exception(\"No OAuth client\")\n\n route = reverse(ROUTE_AUTH)\n redirect_uri = request.build_absolute_uri(route)\n\n return oauth_client.authorize_redirect(request, redirect_uri)\n\n\ndef authorize(request):\n if not oauth_client:\n raise Exception(\"No OAuth client\")\n\n token = oauth_client.authorize_access_token(request)\n\n if token is None:\n return redirect(ROUTE_START)\n else:\n # we are intentionally not storing anything about the user, including their token\n session.update(request, auth=True)\n return redirect(ROUTE_CONFIRM)\n", "path": "benefits/oauth/views.py"}], "after_files": [{"content": "import logging\n\nfrom django.shortcuts import redirect\nfrom django.urls import reverse\n\nfrom authlib.integrations.django_client import OAuth\n\nfrom benefits.core import session\nfrom benefits.settings import OAUTH_CLIENT_NAME\n\n\nlogger = logging.getLogger(__name__)\n\n\nif OAUTH_CLIENT_NAME:\n logger.debug(f\"Using OAuth client configuration: {OAUTH_CLIENT_NAME}\")\n\n _oauth = OAuth()\n _oauth.register(OAUTH_CLIENT_NAME)\n oauth_client = _oauth.create_client(OAUTH_CLIENT_NAME)\n\n\nROUTE_AUTH = \"oauth:authorize\"\nROUTE_START = \"eligibility:start\"\nROUTE_CONFIRM = \"eligibility:confirm\"\n\n\ndef login(request):\n if not oauth_client:\n raise Exception(\"No OAuth client\")\n\n route = reverse(ROUTE_AUTH)\n redirect_uri = request.build_absolute_uri(route)\n\n logger.debug(f\"OAuth authorize_redirect with redirect_uri: {redirect_uri}\")\n\n return oauth_client.authorize_redirect(request, redirect_uri)\n\n\ndef authorize(request):\n if not oauth_client:\n raise Exception(\"No OAuth client\")\n\n logger.debug(\"Attempting to authorize OAuth access token\")\n token = oauth_client.authorize_access_token(request)\n\n if token is None:\n logger.warning(\"Could not authorize OAuth access token\")\n return redirect(ROUTE_START)\n else:\n # we are intentionally not storing anything about the user, including their token\n logger.debug(\"OAuth access token authorized\")\n session.update(request, auth=True)\n return redirect(ROUTE_CONFIRM)\n", "path": "benefits/oauth/views.py"}]}
662
330
gh_patches_debug_13770
rasdani/github-patches
git_diff
openstates__openstates-scrapers-1435
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- RI: state legislator first names contain middle initials For example, https://openstates.org/api/v1/legislators/RIL000179/ has first_name of "Moira J." and nothing in middle_name for Moira J. Walsh. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `openstates/ri/legislators.py` Content: ``` 1 import re 2 import string 3 4 from billy.scrape import NoDataForPeriod 5 from billy.scrape.legislators import LegislatorScraper, Legislator 6 from openstates.utils import LXMLMixin 7 8 import lxml.html 9 import xlrd 10 11 excel_mapping = { 12 'district': 0, 13 'town_represented': 1, 14 'full_name': 2, 15 'party': 3, 16 'address': 4, 17 'email': 5, 18 } 19 20 class RILegislatorScraper(LegislatorScraper, LXMLMixin): 21 jurisdiction = 'ri' 22 latest_only = True 23 24 def scrape(self, chamber, term): 25 if chamber == 'upper': 26 url = ('http://webserver.rilin.state.ri.us/Documents/Senators.xls') 27 rep_type = 'Senator' 28 source_url = 'http://www.rilin.state.ri.us/senators/default.aspx' 29 source_url_title_replacement = rep_type 30 contact_url = 'http://webserver.rilin.state.ri.us/Email/SenEmailListDistrict.asp' 31 elif chamber == 'lower': 32 url = ('http://webserver.rilin.state.ri.us/Documents/Representatives.xls') 33 rep_type = 'Representative' 34 source_url = 'http://www.rilin.state.ri.us/representatives/default.aspx' 35 source_url_title_replacement = 'Rep. ' 36 contact_url = 'http://webserver.rilin.state.ri.us/Email/RepEmailListDistrict.asp' 37 38 self.urlretrieve(url, 'ri_leg.xls') 39 40 wb = xlrd.open_workbook('ri_leg.xls') 41 sh = wb.sheet_by_index(0) 42 43 # This isn't perfect but it's cheap and better than using the 44 # XLS doc as the source URL for all legislators. 45 # 374: RI: legislator url 46 leg_source_url_map = {} 47 leg_page = self.lxmlize(source_url) 48 49 for link in leg_page.xpath('//td[@class="ms-vb2"]'): 50 leg_name = link.text_content().replace(source_url_title_replacement,'') 51 leg_url = link.xpath("..//a")[0].attrib['href'] 52 leg_source_url_map[leg_name] = leg_url 53 54 for rownum in xrange(1, sh.nrows): 55 d = {} 56 for field, col_num in excel_mapping.iteritems(): 57 d[field] = sh.cell(rownum, col_num).value 58 59 if d['full_name'].upper() == "VACANT": 60 self.warning( 61 "District {}'s seat is vacant".format(int(d['district']))) 62 continue 63 64 slug = re.match( 65 "(?P<class>sen|rep)-(?P<slug>.*)@(rilin\.state\.ri\.us|rilegislature\.gov)", d['email'] 66 ) 67 68 if 'asp' in d['email']: 69 d['email'] = None 70 71 if d['email'] is not None: 72 info = slug.groupdict() 73 info['chamber'] = "senators" if info['class'] == 'sen' else "representatives" 74 75 url = ("http://www.rilin.state.ri.us/{chamber}/" 76 "{slug}/Pages/Biography.aspx".format(**info)) 77 78 dist = str(int(d['district'])) 79 district_name = dist 80 81 assert d['full_name'].startswith(rep_type), "Improper name found" 82 full_name = re.sub(r"^{}(?=\s?[A-Z].*$)".format(rep_type), '', d['full_name']).strip() 83 translate = { 84 "Democrat" : "Democratic", 85 "Republican" : "Republican", 86 "Independent" : "Independent" 87 } 88 89 homepage_url = None 90 url_names = lxml.html.fromstring(self.get(source_url).text) 91 url_names = url_names.xpath('//td[@class="ms-vb2"]/a/@href') 92 modified_name = re.sub(r'[^\w\s]', '', full_name) 93 modified_name = modified_name.replace(' ', '').strip('').lower() 94 95 for el in url_names: 96 if 'default.aspx' in el: 97 el = el.replace('default.aspx', '') 98 el = el.strip('') 99 if el[-1] == '/': 100 el = el[:-1] 101 el = el.lower() 102 url_name_array = el.split('/') 103 if url_name_array[-1] in modified_name: 104 #remove '/default.aspx' and add last name 105 homepage_url = source_url[:-12] + url_name_array[-1] 106 107 kwargs = { 108 "town_represented": d['town_represented'], 109 } 110 111 contact = self.lxmlize(contact_url) 112 contact_phone = contact.xpath('//tr[@valign="TOP"]//td[@class="bodyCopy"]/text() | //td[@class="bodyCopy"]//center/text()') 113 114 phone = None 115 for el in contact_phone: 116 if len(el) <= 2 and dist == el: 117 number = contact_phone.index(el) 118 phone = contact_phone[number + 2] 119 phone = phone.strip() 120 121 email = None 122 if d['email'] is not None: 123 email = d['email'] 124 125 if homepage_url is not None: 126 kwargs['url'] = homepage_url 127 128 if d['address'] is '': 129 d['address'] = 'No Address Found' 130 131 leg = Legislator(term, chamber, district_name, full_name, 132 '', '', '', 133 translate[d['party']], 134 **kwargs) 135 136 leg.add_office('district', 'Dictrict Office', address=d['address'], phone=phone, email=email) 137 leg.add_source(source_url) 138 leg.add_source(contact_url) 139 if homepage_url: 140 leg.add_source(homepage_url) 141 self.save_legislator(leg) 142 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/openstates/ri/legislators.py b/openstates/ri/legislators.py --- a/openstates/ri/legislators.py +++ b/openstates/ri/legislators.py @@ -128,8 +128,14 @@ if d['address'] is '': d['address'] = 'No Address Found' + # RI is very fond of First M. Last name formats and + # they're being misparsed upstream, so fix here + (first, middle, last) = ('','','') + if re.match(r'^\S+\s[A-Z]\.\s\S+$', full_name): + (first, middle, last) = full_name.split() + leg = Legislator(term, chamber, district_name, full_name, - '', '', '', + first, last, middle, translate[d['party']], **kwargs)
{"golden_diff": "diff --git a/openstates/ri/legislators.py b/openstates/ri/legislators.py\n--- a/openstates/ri/legislators.py\n+++ b/openstates/ri/legislators.py\n@@ -128,8 +128,14 @@\n if d['address'] is '':\n d['address'] = 'No Address Found'\n \n+ # RI is very fond of First M. Last name formats and\n+ # they're being misparsed upstream, so fix here\n+ (first, middle, last) = ('','','')\n+ if re.match(r'^\\S+\\s[A-Z]\\.\\s\\S+$', full_name):\n+ (first, middle, last) = full_name.split()\n+ \n leg = Legislator(term, chamber, district_name, full_name,\n- '', '', '',\n+ first, last, middle,\n translate[d['party']],\n **kwargs)\n", "issue": "RI: state legislator first names contain middle initials\nFor example, https://openstates.org/api/v1/legislators/RIL000179/ has first_name of \"Moira J.\" and nothing in middle_name for Moira J. Walsh.\n", "before_files": [{"content": "import re\nimport string\n\nfrom billy.scrape import NoDataForPeriod\nfrom billy.scrape.legislators import LegislatorScraper, Legislator\nfrom openstates.utils import LXMLMixin\n\nimport lxml.html\nimport xlrd\n\nexcel_mapping = {\n 'district': 0,\n 'town_represented': 1,\n 'full_name': 2,\n 'party': 3,\n 'address': 4,\n 'email': 5,\n}\n\nclass RILegislatorScraper(LegislatorScraper, LXMLMixin):\n jurisdiction = 'ri'\n latest_only = True\n\n def scrape(self, chamber, term):\n if chamber == 'upper':\n url = ('http://webserver.rilin.state.ri.us/Documents/Senators.xls')\n rep_type = 'Senator'\n source_url = 'http://www.rilin.state.ri.us/senators/default.aspx'\n source_url_title_replacement = rep_type\n contact_url = 'http://webserver.rilin.state.ri.us/Email/SenEmailListDistrict.asp'\n elif chamber == 'lower':\n url = ('http://webserver.rilin.state.ri.us/Documents/Representatives.xls')\n rep_type = 'Representative'\n source_url = 'http://www.rilin.state.ri.us/representatives/default.aspx'\n source_url_title_replacement = 'Rep. '\n contact_url = 'http://webserver.rilin.state.ri.us/Email/RepEmailListDistrict.asp'\n\n self.urlretrieve(url, 'ri_leg.xls')\n\n wb = xlrd.open_workbook('ri_leg.xls')\n sh = wb.sheet_by_index(0)\n\n # This isn't perfect but it's cheap and better than using the\n # XLS doc as the source URL for all legislators.\n # 374: RI: legislator url\n leg_source_url_map = {}\n leg_page = self.lxmlize(source_url)\n\n for link in leg_page.xpath('//td[@class=\"ms-vb2\"]'):\n leg_name = link.text_content().replace(source_url_title_replacement,'')\n leg_url = link.xpath(\"..//a\")[0].attrib['href']\n leg_source_url_map[leg_name] = leg_url\n\n for rownum in xrange(1, sh.nrows):\n d = {}\n for field, col_num in excel_mapping.iteritems():\n d[field] = sh.cell(rownum, col_num).value\n\n if d['full_name'].upper() == \"VACANT\":\n self.warning(\n \"District {}'s seat is vacant\".format(int(d['district'])))\n continue\n\n slug = re.match(\n \"(?P<class>sen|rep)-(?P<slug>.*)@(rilin\\.state\\.ri\\.us|rilegislature\\.gov)\", d['email']\n )\n \n if 'asp' in d['email']:\n d['email'] = None\n\n if d['email'] is not None:\n info = slug.groupdict()\n info['chamber'] = \"senators\" if info['class'] == 'sen' else \"representatives\"\n\n url = (\"http://www.rilin.state.ri.us/{chamber}/\"\n \"{slug}/Pages/Biography.aspx\".format(**info))\n\n dist = str(int(d['district']))\n district_name = dist\n\n assert d['full_name'].startswith(rep_type), \"Improper name found\"\n full_name = re.sub(r\"^{}(?=\\s?[A-Z].*$)\".format(rep_type), '', d['full_name']).strip()\n translate = {\n \"Democrat\" : \"Democratic\",\n \"Republican\" : \"Republican\",\n \"Independent\" : \"Independent\"\n }\n\n homepage_url = None\n url_names = lxml.html.fromstring(self.get(source_url).text)\n url_names = url_names.xpath('//td[@class=\"ms-vb2\"]/a/@href')\n modified_name = re.sub(r'[^\\w\\s]', '', full_name)\n modified_name = modified_name.replace(' ', '').strip('').lower()\n\n for el in url_names:\n if 'default.aspx' in el:\n el = el.replace('default.aspx', '')\n el = el.strip('')\n if el[-1] == '/':\n el = el[:-1]\n el = el.lower()\n url_name_array = el.split('/')\n if url_name_array[-1] in modified_name:\n #remove '/default.aspx' and add last name\n homepage_url = source_url[:-12] + url_name_array[-1]\n\n kwargs = {\n \"town_represented\": d['town_represented'],\n }\n\n contact = self.lxmlize(contact_url)\n contact_phone = contact.xpath('//tr[@valign=\"TOP\"]//td[@class=\"bodyCopy\"]/text() | //td[@class=\"bodyCopy\"]//center/text()')\n\n phone = None\n for el in contact_phone:\n if len(el) <= 2 and dist == el:\n number = contact_phone.index(el)\n phone = contact_phone[number + 2]\n phone = phone.strip()\n\n email = None\n if d['email'] is not None:\n email = d['email']\n\n if homepage_url is not None:\n kwargs['url'] = homepage_url\n\n if d['address'] is '':\n d['address'] = 'No Address Found'\n\n leg = Legislator(term, chamber, district_name, full_name,\n '', '', '',\n translate[d['party']],\n **kwargs)\n\n leg.add_office('district', 'Dictrict Office', address=d['address'], phone=phone, email=email)\n leg.add_source(source_url)\n leg.add_source(contact_url)\n if homepage_url:\n leg.add_source(homepage_url)\n self.save_legislator(leg)\n", "path": "openstates/ri/legislators.py"}], "after_files": [{"content": "import re\nimport string\n\nfrom billy.scrape import NoDataForPeriod\nfrom billy.scrape.legislators import LegislatorScraper, Legislator\nfrom openstates.utils import LXMLMixin\n\nimport lxml.html\nimport xlrd\n\nexcel_mapping = {\n 'district': 0,\n 'town_represented': 1,\n 'full_name': 2,\n 'party': 3,\n 'address': 4,\n 'email': 5,\n}\n\nclass RILegislatorScraper(LegislatorScraper, LXMLMixin):\n jurisdiction = 'ri'\n latest_only = True\n\n def scrape(self, chamber, term):\n if chamber == 'upper':\n url = ('http://webserver.rilin.state.ri.us/Documents/Senators.xls')\n rep_type = 'Senator'\n source_url = 'http://www.rilin.state.ri.us/senators/default.aspx'\n source_url_title_replacement = rep_type\n contact_url = 'http://webserver.rilin.state.ri.us/Email/SenEmailListDistrict.asp'\n elif chamber == 'lower':\n url = ('http://webserver.rilin.state.ri.us/Documents/Representatives.xls')\n rep_type = 'Representative'\n source_url = 'http://www.rilin.state.ri.us/representatives/default.aspx'\n source_url_title_replacement = 'Rep. '\n contact_url = 'http://webserver.rilin.state.ri.us/Email/RepEmailListDistrict.asp'\n\n self.urlretrieve(url, 'ri_leg.xls')\n\n wb = xlrd.open_workbook('ri_leg.xls')\n sh = wb.sheet_by_index(0)\n\n # This isn't perfect but it's cheap and better than using the\n # XLS doc as the source URL for all legislators.\n # 374: RI: legislator url\n leg_source_url_map = {}\n leg_page = self.lxmlize(source_url)\n\n for link in leg_page.xpath('//td[@class=\"ms-vb2\"]'):\n leg_name = link.text_content().replace(source_url_title_replacement,'')\n leg_url = link.xpath(\"..//a\")[0].attrib['href']\n leg_source_url_map[leg_name] = leg_url\n\n for rownum in xrange(1, sh.nrows):\n d = {}\n for field, col_num in excel_mapping.iteritems():\n d[field] = sh.cell(rownum, col_num).value\n\n if d['full_name'].upper() == \"VACANT\":\n self.warning(\n \"District {}'s seat is vacant\".format(int(d['district'])))\n continue\n\n slug = re.match(\n \"(?P<class>sen|rep)-(?P<slug>.*)@(rilin\\.state\\.ri\\.us|rilegislature\\.gov)\", d['email']\n )\n \n if 'asp' in d['email']:\n d['email'] = None\n\n if d['email'] is not None:\n info = slug.groupdict()\n info['chamber'] = \"senators\" if info['class'] == 'sen' else \"representatives\"\n\n url = (\"http://www.rilin.state.ri.us/{chamber}/\"\n \"{slug}/Pages/Biography.aspx\".format(**info))\n\n dist = str(int(d['district']))\n district_name = dist\n\n assert d['full_name'].startswith(rep_type), \"Improper name found\"\n full_name = re.sub(r\"^{}(?=\\s?[A-Z].*$)\".format(rep_type), '', d['full_name']).strip()\n translate = {\n \"Democrat\" : \"Democratic\",\n \"Republican\" : \"Republican\",\n \"Independent\" : \"Independent\"\n }\n\n homepage_url = None\n url_names = lxml.html.fromstring(self.get(source_url).text)\n url_names = url_names.xpath('//td[@class=\"ms-vb2\"]/a/@href')\n modified_name = re.sub(r'[^\\w\\s]', '', full_name)\n modified_name = modified_name.replace(' ', '').strip('').lower()\n\n for el in url_names:\n if 'default.aspx' in el:\n el = el.replace('default.aspx', '')\n el = el.strip('')\n if el[-1] == '/':\n el = el[:-1]\n el = el.lower()\n url_name_array = el.split('/')\n if url_name_array[-1] in modified_name:\n #remove '/default.aspx' and add last name\n homepage_url = source_url[:-12] + url_name_array[-1]\n\n kwargs = {\n \"town_represented\": d['town_represented'],\n }\n\n contact = self.lxmlize(contact_url)\n contact_phone = contact.xpath('//tr[@valign=\"TOP\"]//td[@class=\"bodyCopy\"]/text() | //td[@class=\"bodyCopy\"]//center/text()')\n\n phone = None\n for el in contact_phone:\n if len(el) <= 2 and dist == el:\n number = contact_phone.index(el)\n phone = contact_phone[number + 2]\n phone = phone.strip()\n\n email = None\n if d['email'] is not None:\n email = d['email']\n\n if homepage_url is not None:\n kwargs['url'] = homepage_url\n\n if d['address'] is '':\n d['address'] = 'No Address Found'\n\n # RI is very fond of First M. Last name formats and\n # they're being misparsed upstream, so fix here\n (first, middle, last) = ('','','')\n if re.match(r'^\\S+\\s[A-Z]\\.\\s\\S+$', full_name):\n (first, middle, last) = full_name.split()\n \n leg = Legislator(term, chamber, district_name, full_name,\n first, last, middle,\n translate[d['party']],\n **kwargs)\n\n leg.add_office('district', 'Dictrict Office', address=d['address'], phone=phone, email=email)\n leg.add_source(source_url)\n leg.add_source(contact_url)\n if homepage_url:\n leg.add_source(homepage_url)\n self.save_legislator(leg)\n", "path": "openstates/ri/legislators.py"}]}
1,904
203
gh_patches_debug_1787
rasdani/github-patches
git_diff
dbt-labs__dbt-core-9068
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [CT-3377] [Regression] `dbt deps` fails on tarball dependencies ### Is this a regression in a recent version of dbt-core? - [X] I believe this is a regression in dbt-core functionality - [X] I have searched the existing issues, and I could not find an existing issue for this regression ### Current Behavior When `dependencies.yml` includes a tarball dependency, I get an error message from `dbt deps`: ``` 11:18:06 Running with dbt=1.7.1 11:18:06 Updating lock file in file path: /workspace/dbt-deps-tarball-failure/asdf/package-lock.yml 11:18:06 Encountered an error: Runtime Error The packages.yml file in this project is malformed. Please double check the contents of this file and fix any errors before retrying. You can find more information on the syntax for this file here: https://docs.getdbt.com/docs/package-management Validator Error: dbt_utils was not found in the package index. Packages on the index require a namespace, e.g dbt-labs/dbt_utils ``` ### Expected/Previous Behavior Expected output: ``` 11:27:03 Running with dbt=1.6.8 11:27:03 Installing dbt_utils 11:27:03 Installed from tarball (url: https://codeload.github.com/dbt-labs/dbt-utils/tar.gz/0.9.6) ``` The validator should - not check the index for tarball dependencies - not validate the `namespace/package-name` for tarball dependencies - mention the correct filename (this is a minor thing) ### Steps To Reproduce 1. In a new dbt project 2. With the following `dependencies.yml`: ```yaml packages: - tarball: https://codeload.github.com/dbt-labs/dbt-utils/tar.gz/0.9.6 name: 'dbt_utils' ``` 3. Run `dbt deps` 4. See error message above ### Relevant log output _No response_ ### Environment ```markdown - OS: Ubuntu 22.04.3 - Python: 3.11.1 - dbt-core (latest working version): 1.6.8 - dbt-core (earliest regression version): 1.7.0 - dbt-core (latest version): 1.7.1 ``` ### Which database adapter are you using with dbt? _No response_ ### Additional Context _No response_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `core/dbt/deps/tarball.py` Content: ``` 1 from typing import Dict 2 3 from dbt.contracts.project import RegistryPackageMetadata, TarballPackage 4 from dbt.deps.base import PinnedPackage, UnpinnedPackage 5 6 7 class TarballPackageMixin: 8 def __init__(self, tarball: str) -> None: 9 super().__init__() 10 self.tarball = tarball 11 12 @property 13 def name(self): 14 return self.tarball 15 16 def source_type(self) -> str: 17 return "tarball" 18 19 20 class TarballPinnedPackage(TarballPackageMixin, PinnedPackage): 21 def __init__(self, tarball: str, package: str) -> None: 22 super().__init__(tarball) 23 # setup to recycle RegistryPinnedPackage fns 24 self.package = package 25 self.version = "tarball" 26 27 @property 28 def name(self): 29 return self.package 30 31 def to_dict(self) -> Dict[str, str]: 32 return { 33 "tarball": self.tarball, 34 "version": self.version, 35 "package": self.package, 36 } 37 38 def get_version(self): 39 return self.version 40 41 def nice_version_name(self): 42 return f"tarball (url: {self.tarball})" 43 44 def _fetch_metadata(self, project, renderer): 45 """ 46 recycle RegistryPackageMetadata so that we can use the install and 47 download_and_untar from RegistryPinnedPackage next. 48 build RegistryPackageMetadata from info passed via packages.yml since no 49 'metadata' service exists in this case. 50 """ 51 52 dct = { 53 "name": self.package, 54 "packages": [], # note: required by RegistryPackageMetadata 55 "downloads": {"tarball": self.tarball}, 56 } 57 58 return RegistryPackageMetadata.from_dict(dct) 59 60 def install(self, project, renderer): 61 self._install(project, renderer) 62 63 64 class TarballUnpinnedPackage(TarballPackageMixin, UnpinnedPackage[TarballPinnedPackage]): 65 def __init__( 66 self, 67 tarball: str, 68 package: str, 69 ) -> None: 70 super().__init__(tarball) 71 # setup to recycle RegistryPinnedPackage fns 72 self.package = package 73 self.version = "tarball" 74 75 @classmethod 76 def from_contract(cls, contract: TarballPackage) -> "TarballUnpinnedPackage": 77 return cls(tarball=contract.tarball, package=contract.name) 78 79 def incorporate(self, other: "TarballUnpinnedPackage") -> "TarballUnpinnedPackage": 80 return TarballUnpinnedPackage(tarball=self.tarball, package=self.package) 81 82 def resolved(self) -> TarballPinnedPackage: 83 return TarballPinnedPackage(tarball=self.tarball, package=self.package) 84 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/core/dbt/deps/tarball.py b/core/dbt/deps/tarball.py --- a/core/dbt/deps/tarball.py +++ b/core/dbt/deps/tarball.py @@ -31,8 +31,7 @@ def to_dict(self) -> Dict[str, str]: return { "tarball": self.tarball, - "version": self.version, - "package": self.package, + "name": self.package, } def get_version(self):
{"golden_diff": "diff --git a/core/dbt/deps/tarball.py b/core/dbt/deps/tarball.py\n--- a/core/dbt/deps/tarball.py\n+++ b/core/dbt/deps/tarball.py\n@@ -31,8 +31,7 @@\n def to_dict(self) -> Dict[str, str]:\n return {\n \"tarball\": self.tarball,\n- \"version\": self.version,\n- \"package\": self.package,\n+ \"name\": self.package,\n }\n \n def get_version(self):\n", "issue": "[CT-3377] [Regression] `dbt deps` fails on tarball dependencies\n### Is this a regression in a recent version of dbt-core?\n\n- [X] I believe this is a regression in dbt-core functionality\n- [X] I have searched the existing issues, and I could not find an existing issue for this regression\n\n### Current Behavior\n\nWhen `dependencies.yml` includes a tarball dependency, I get an error message from `dbt deps`:\r\n\r\n```\r\n11:18:06 Running with dbt=1.7.1\r\n11:18:06 Updating lock file in file path: /workspace/dbt-deps-tarball-failure/asdf/package-lock.yml\r\n11:18:06 Encountered an error:\r\nRuntime Error\r\n The packages.yml file in this project is malformed. Please double check\r\n the contents of this file and fix any errors before retrying.\r\n \r\n You can find more information on the syntax for this file here:\r\n https://docs.getdbt.com/docs/package-management\r\n \r\n Validator Error:\r\n dbt_utils was not found in the package index. Packages on the index require a namespace, e.g dbt-labs/dbt_utils\r\n```\n\n### Expected/Previous Behavior\n\nExpected output:\r\n```\r\n11:27:03 Running with dbt=1.6.8\r\n11:27:03 Installing dbt_utils\r\n11:27:03 Installed from tarball (url: https://codeload.github.com/dbt-labs/dbt-utils/tar.gz/0.9.6)\r\n```\r\n\r\nThe validator should \r\n- not check the index for tarball dependencies\r\n- not validate the `namespace/package-name` for tarball dependencies\r\n- mention the correct filename (this is a minor thing)\n\n### Steps To Reproduce\n\n1. In a new dbt project\r\n2. With the following `dependencies.yml`:\r\n```yaml\r\npackages:\r\n - tarball: https://codeload.github.com/dbt-labs/dbt-utils/tar.gz/0.9.6\r\n name: 'dbt_utils'\r\n```\r\n3. Run `dbt deps`\r\n4. See error message above\n\n### Relevant log output\n\n_No response_\n\n### Environment\n\n```markdown\n- OS: Ubuntu 22.04.3\r\n- Python: 3.11.1\r\n- dbt-core (latest working version): 1.6.8\r\n- dbt-core (earliest regression version): 1.7.0\r\n- dbt-core (latest version): 1.7.1\n```\n\n\n### Which database adapter are you using with dbt?\n\n_No response_\n\n### Additional Context\n\n_No response_\n", "before_files": [{"content": "from typing import Dict\n\nfrom dbt.contracts.project import RegistryPackageMetadata, TarballPackage\nfrom dbt.deps.base import PinnedPackage, UnpinnedPackage\n\n\nclass TarballPackageMixin:\n def __init__(self, tarball: str) -> None:\n super().__init__()\n self.tarball = tarball\n\n @property\n def name(self):\n return self.tarball\n\n def source_type(self) -> str:\n return \"tarball\"\n\n\nclass TarballPinnedPackage(TarballPackageMixin, PinnedPackage):\n def __init__(self, tarball: str, package: str) -> None:\n super().__init__(tarball)\n # setup to recycle RegistryPinnedPackage fns\n self.package = package\n self.version = \"tarball\"\n\n @property\n def name(self):\n return self.package\n\n def to_dict(self) -> Dict[str, str]:\n return {\n \"tarball\": self.tarball,\n \"version\": self.version,\n \"package\": self.package,\n }\n\n def get_version(self):\n return self.version\n\n def nice_version_name(self):\n return f\"tarball (url: {self.tarball})\"\n\n def _fetch_metadata(self, project, renderer):\n \"\"\"\n recycle RegistryPackageMetadata so that we can use the install and\n download_and_untar from RegistryPinnedPackage next.\n build RegistryPackageMetadata from info passed via packages.yml since no\n 'metadata' service exists in this case.\n \"\"\"\n\n dct = {\n \"name\": self.package,\n \"packages\": [], # note: required by RegistryPackageMetadata\n \"downloads\": {\"tarball\": self.tarball},\n }\n\n return RegistryPackageMetadata.from_dict(dct)\n\n def install(self, project, renderer):\n self._install(project, renderer)\n\n\nclass TarballUnpinnedPackage(TarballPackageMixin, UnpinnedPackage[TarballPinnedPackage]):\n def __init__(\n self,\n tarball: str,\n package: str,\n ) -> None:\n super().__init__(tarball)\n # setup to recycle RegistryPinnedPackage fns\n self.package = package\n self.version = \"tarball\"\n\n @classmethod\n def from_contract(cls, contract: TarballPackage) -> \"TarballUnpinnedPackage\":\n return cls(tarball=contract.tarball, package=contract.name)\n\n def incorporate(self, other: \"TarballUnpinnedPackage\") -> \"TarballUnpinnedPackage\":\n return TarballUnpinnedPackage(tarball=self.tarball, package=self.package)\n\n def resolved(self) -> TarballPinnedPackage:\n return TarballPinnedPackage(tarball=self.tarball, package=self.package)\n", "path": "core/dbt/deps/tarball.py"}], "after_files": [{"content": "from typing import Dict\n\nfrom dbt.contracts.project import RegistryPackageMetadata, TarballPackage\nfrom dbt.deps.base import PinnedPackage, UnpinnedPackage\n\n\nclass TarballPackageMixin:\n def __init__(self, tarball: str) -> None:\n super().__init__()\n self.tarball = tarball\n\n @property\n def name(self):\n return self.tarball\n\n def source_type(self) -> str:\n return \"tarball\"\n\n\nclass TarballPinnedPackage(TarballPackageMixin, PinnedPackage):\n def __init__(self, tarball: str, package: str) -> None:\n super().__init__(tarball)\n # setup to recycle RegistryPinnedPackage fns\n self.package = package\n self.version = \"tarball\"\n\n @property\n def name(self):\n return self.package\n\n def to_dict(self) -> Dict[str, str]:\n return {\n \"tarball\": self.tarball,\n \"name\": self.package,\n }\n\n def get_version(self):\n return self.version\n\n def nice_version_name(self):\n return f\"tarball (url: {self.tarball})\"\n\n def _fetch_metadata(self, project, renderer):\n \"\"\"\n recycle RegistryPackageMetadata so that we can use the install and\n download_and_untar from RegistryPinnedPackage next.\n build RegistryPackageMetadata from info passed via packages.yml since no\n 'metadata' service exists in this case.\n \"\"\"\n\n dct = {\n \"name\": self.package,\n \"packages\": [], # note: required by RegistryPackageMetadata\n \"downloads\": {\"tarball\": self.tarball},\n }\n\n return RegistryPackageMetadata.from_dict(dct)\n\n def install(self, project, renderer):\n self._install(project, renderer)\n\n\nclass TarballUnpinnedPackage(TarballPackageMixin, UnpinnedPackage[TarballPinnedPackage]):\n def __init__(\n self,\n tarball: str,\n package: str,\n ) -> None:\n super().__init__(tarball)\n # setup to recycle RegistryPinnedPackage fns\n self.package = package\n self.version = \"tarball\"\n\n @classmethod\n def from_contract(cls, contract: TarballPackage) -> \"TarballUnpinnedPackage\":\n return cls(tarball=contract.tarball, package=contract.name)\n\n def incorporate(self, other: \"TarballUnpinnedPackage\") -> \"TarballUnpinnedPackage\":\n return TarballUnpinnedPackage(tarball=self.tarball, package=self.package)\n\n def resolved(self) -> TarballPinnedPackage:\n return TarballPinnedPackage(tarball=self.tarball, package=self.package)\n", "path": "core/dbt/deps/tarball.py"}]}
1,614
117
gh_patches_debug_38546
rasdani/github-patches
git_diff
beetbox__beets-1129
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- zero: Permit stripping album art It would be nice to have the option of automatically clearing embedded art when an item is imported. Whether or not a media item actually contains embedded art, beets should ensure the resulting media item has no embedded art after being import. There are two plugins which would offer a good place of implementation for this feature: the EmbedArt and the Zero plugins. The EmbedArt plugin already supports a command called `clearart` which allows for the manual stripping of embedded art from items which match a query. Since the the `clearart` operation is not automatic and there is no option for automation, an extra step is required on the importation of media. What probably makes more sense is implementing support for the art field in the Zero plugin. It can only be assumed that people who would use such a feature already have the Zero plugin deployed for clearing other fields. That said, it would require less configuration as all a user would need to do is drop the art field in their configuration for the Zero plugin. Moreover, with the EmbedArt plugin, it embeds art into media items by default. This feature would need to be disabled in the configuration as well. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `beetsplug/zero.py` Content: ``` 1 # This file is part of beets. 2 # Copyright 2013, Blemjhoo Tezoulbr <[email protected]>. 3 # 4 # Permission is hereby granted, free of charge, to any person obtaining 5 # a copy of this software and associated documentation files (the 6 # "Software"), to deal in the Software without restriction, including 7 # without limitation the rights to use, copy, modify, merge, publish, 8 # distribute, sublicense, and/or sell copies of the Software, and to 9 # permit persons to whom the Software is furnished to do so, subject to 10 # the following conditions: 11 # 12 # The above copyright notice and this permission notice shall be 13 # included in all copies or substantial portions of the Software. 14 15 """ Clears tag fields in media files.""" 16 17 import re 18 import logging 19 from beets.plugins import BeetsPlugin 20 from beets.library import Item 21 from beets.importer import action 22 from beets.util import confit 23 24 __author__ = '[email protected]' 25 __version__ = '0.10' 26 27 log = logging.getLogger('beets') 28 29 30 class ZeroPlugin(BeetsPlugin): 31 32 _instance = None 33 34 def __init__(self): 35 super(ZeroPlugin, self).__init__() 36 37 # Listeners. 38 self.register_listener('write', self.write_event) 39 self.register_listener('import_task_choice', 40 self.import_task_choice_event) 41 42 self.config.add({ 43 'fields': [], 44 }) 45 46 self.patterns = {} 47 self.warned = False 48 49 for field in self.config['fields'].as_str_seq(): 50 if field in ('id', 'path', 'album_id'): 51 log.warn(u'[zero] field \'{0}\' ignored, zeroing ' 52 u'it would be dangerous'.format(field)) 53 continue 54 if field not in Item._fields.keys(): 55 log.error(u'[zero] invalid field: {0}'.format(field)) 56 continue 57 58 try: 59 self.patterns[field] = self.config[field].as_str_seq() 60 except confit.NotFoundError: 61 # Matches everything 62 self.patterns[field] = [u''] 63 64 def import_task_choice_event(self, session, task): 65 """Listen for import_task_choice event.""" 66 if task.choice_flag == action.ASIS and not self.warned: 67 log.warn(u'[zero] cannot zero in \"as-is\" mode') 68 self.warned = True 69 # TODO request write in as-is mode 70 71 @classmethod 72 def match_patterns(cls, field, patterns): 73 """Check if field (as string) is matching any of the patterns in 74 the list. 75 """ 76 for p in patterns: 77 if re.search(p, unicode(field), flags=re.IGNORECASE): 78 return True 79 return False 80 81 def write_event(self, item, path, tags): 82 """Listen for write event.""" 83 if not self.patterns: 84 log.warn(u'[zero] no fields, nothing to do') 85 return 86 87 for field, patterns in self.patterns.items(): 88 if field not in tags: 89 log.error(u'[zero] no such field: {0}'.format(field)) 90 continue 91 92 value = tags[field] 93 if self.match_patterns(value, patterns): 94 log.debug(u'[zero] {0}: {1} -> None'.format(field, value)) 95 tags[field] = None 96 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/beetsplug/zero.py b/beetsplug/zero.py --- a/beetsplug/zero.py +++ b/beetsplug/zero.py @@ -17,7 +17,7 @@ import re import logging from beets.plugins import BeetsPlugin -from beets.library import Item +from beets.mediafile import MediaFile from beets.importer import action from beets.util import confit @@ -51,7 +51,7 @@ log.warn(u'[zero] field \'{0}\' ignored, zeroing ' u'it would be dangerous'.format(field)) continue - if field not in Item._fields.keys(): + if field not in MediaFile.fields(): log.error(u'[zero] invalid field: {0}'.format(field)) continue @@ -59,7 +59,7 @@ self.patterns[field] = self.config[field].as_str_seq() except confit.NotFoundError: # Matches everything - self.patterns[field] = [u''] + self.patterns[field] = True def import_task_choice_event(self, session, task): """Listen for import_task_choice event.""" @@ -73,23 +73,29 @@ """Check if field (as string) is matching any of the patterns in the list. """ + if patterns is True: + return True for p in patterns: if re.search(p, unicode(field), flags=re.IGNORECASE): return True return False def write_event(self, item, path, tags): - """Listen for write event.""" + """Set values in tags to `None` if the key and value are matched + by `self.patterns`. + """ if not self.patterns: log.warn(u'[zero] no fields, nothing to do') return for field, patterns in self.patterns.items(): - if field not in tags: - log.error(u'[zero] no such field: {0}'.format(field)) - continue - - value = tags[field] - if self.match_patterns(value, patterns): + if field in tags: + value = tags[field] + match = self.match_patterns(tags[field], patterns) + else: + value = '' + match = patterns is True + + if match: log.debug(u'[zero] {0}: {1} -> None'.format(field, value)) tags[field] = None
{"golden_diff": "diff --git a/beetsplug/zero.py b/beetsplug/zero.py\n--- a/beetsplug/zero.py\n+++ b/beetsplug/zero.py\n@@ -17,7 +17,7 @@\n import re\n import logging\n from beets.plugins import BeetsPlugin\n-from beets.library import Item\n+from beets.mediafile import MediaFile\n from beets.importer import action\n from beets.util import confit\n \n@@ -51,7 +51,7 @@\n log.warn(u'[zero] field \\'{0}\\' ignored, zeroing '\n u'it would be dangerous'.format(field))\n continue\n- if field not in Item._fields.keys():\n+ if field not in MediaFile.fields():\n log.error(u'[zero] invalid field: {0}'.format(field))\n continue\n \n@@ -59,7 +59,7 @@\n self.patterns[field] = self.config[field].as_str_seq()\n except confit.NotFoundError:\n # Matches everything\n- self.patterns[field] = [u'']\n+ self.patterns[field] = True\n \n def import_task_choice_event(self, session, task):\n \"\"\"Listen for import_task_choice event.\"\"\"\n@@ -73,23 +73,29 @@\n \"\"\"Check if field (as string) is matching any of the patterns in\n the list.\n \"\"\"\n+ if patterns is True:\n+ return True\n for p in patterns:\n if re.search(p, unicode(field), flags=re.IGNORECASE):\n return True\n return False\n \n def write_event(self, item, path, tags):\n- \"\"\"Listen for write event.\"\"\"\n+ \"\"\"Set values in tags to `None` if the key and value are matched\n+ by `self.patterns`.\n+ \"\"\"\n if not self.patterns:\n log.warn(u'[zero] no fields, nothing to do')\n return\n \n for field, patterns in self.patterns.items():\n- if field not in tags:\n- log.error(u'[zero] no such field: {0}'.format(field))\n- continue\n-\n- value = tags[field]\n- if self.match_patterns(value, patterns):\n+ if field in tags:\n+ value = tags[field]\n+ match = self.match_patterns(tags[field], patterns)\n+ else:\n+ value = ''\n+ match = patterns is True\n+\n+ if match:\n log.debug(u'[zero] {0}: {1} -> None'.format(field, value))\n tags[field] = None\n", "issue": "zero: Permit stripping album art\nIt would be nice to have the option of automatically clearing embedded art when an item is imported. Whether or not a media item actually contains embedded art, beets should ensure the resulting media item has no embedded art after being import. There are two plugins which would offer a good place of implementation for this feature: the EmbedArt and the Zero plugins.\n\nThe EmbedArt plugin already supports a command called `clearart` which allows for the manual stripping of embedded art from items which match a query. Since the the `clearart` operation is not automatic and there is no option for automation, an extra step is required on the importation of media.\n\nWhat probably makes more sense is implementing support for the art field in the Zero plugin. It can only be assumed that people who would use such a feature already have the Zero plugin deployed for clearing other fields. That said, it would require less configuration as all a user would need to do is drop the art field in their configuration for the Zero plugin. Moreover, with the EmbedArt plugin, it embeds art into media items by default. This feature would need to be disabled in the configuration as well.\n\n", "before_files": [{"content": "# This file is part of beets.\n# Copyright 2013, Blemjhoo Tezoulbr <[email protected]>.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\" Clears tag fields in media files.\"\"\"\n\nimport re\nimport logging\nfrom beets.plugins import BeetsPlugin\nfrom beets.library import Item\nfrom beets.importer import action\nfrom beets.util import confit\n\n__author__ = '[email protected]'\n__version__ = '0.10'\n\nlog = logging.getLogger('beets')\n\n\nclass ZeroPlugin(BeetsPlugin):\n\n _instance = None\n\n def __init__(self):\n super(ZeroPlugin, self).__init__()\n\n # Listeners.\n self.register_listener('write', self.write_event)\n self.register_listener('import_task_choice',\n self.import_task_choice_event)\n\n self.config.add({\n 'fields': [],\n })\n\n self.patterns = {}\n self.warned = False\n\n for field in self.config['fields'].as_str_seq():\n if field in ('id', 'path', 'album_id'):\n log.warn(u'[zero] field \\'{0}\\' ignored, zeroing '\n u'it would be dangerous'.format(field))\n continue\n if field not in Item._fields.keys():\n log.error(u'[zero] invalid field: {0}'.format(field))\n continue\n\n try:\n self.patterns[field] = self.config[field].as_str_seq()\n except confit.NotFoundError:\n # Matches everything\n self.patterns[field] = [u'']\n\n def import_task_choice_event(self, session, task):\n \"\"\"Listen for import_task_choice event.\"\"\"\n if task.choice_flag == action.ASIS and not self.warned:\n log.warn(u'[zero] cannot zero in \\\"as-is\\\" mode')\n self.warned = True\n # TODO request write in as-is mode\n\n @classmethod\n def match_patterns(cls, field, patterns):\n \"\"\"Check if field (as string) is matching any of the patterns in\n the list.\n \"\"\"\n for p in patterns:\n if re.search(p, unicode(field), flags=re.IGNORECASE):\n return True\n return False\n\n def write_event(self, item, path, tags):\n \"\"\"Listen for write event.\"\"\"\n if not self.patterns:\n log.warn(u'[zero] no fields, nothing to do')\n return\n\n for field, patterns in self.patterns.items():\n if field not in tags:\n log.error(u'[zero] no such field: {0}'.format(field))\n continue\n\n value = tags[field]\n if self.match_patterns(value, patterns):\n log.debug(u'[zero] {0}: {1} -> None'.format(field, value))\n tags[field] = None\n", "path": "beetsplug/zero.py"}], "after_files": [{"content": "# This file is part of beets.\n# Copyright 2013, Blemjhoo Tezoulbr <[email protected]>.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\" Clears tag fields in media files.\"\"\"\n\nimport re\nimport logging\nfrom beets.plugins import BeetsPlugin\nfrom beets.mediafile import MediaFile\nfrom beets.importer import action\nfrom beets.util import confit\n\n__author__ = '[email protected]'\n__version__ = '0.10'\n\nlog = logging.getLogger('beets')\n\n\nclass ZeroPlugin(BeetsPlugin):\n\n _instance = None\n\n def __init__(self):\n super(ZeroPlugin, self).__init__()\n\n # Listeners.\n self.register_listener('write', self.write_event)\n self.register_listener('import_task_choice',\n self.import_task_choice_event)\n\n self.config.add({\n 'fields': [],\n })\n\n self.patterns = {}\n self.warned = False\n\n for field in self.config['fields'].as_str_seq():\n if field in ('id', 'path', 'album_id'):\n log.warn(u'[zero] field \\'{0}\\' ignored, zeroing '\n u'it would be dangerous'.format(field))\n continue\n if field not in MediaFile.fields():\n log.error(u'[zero] invalid field: {0}'.format(field))\n continue\n\n try:\n self.patterns[field] = self.config[field].as_str_seq()\n except confit.NotFoundError:\n # Matches everything\n self.patterns[field] = True\n\n def import_task_choice_event(self, session, task):\n \"\"\"Listen for import_task_choice event.\"\"\"\n if task.choice_flag == action.ASIS and not self.warned:\n log.warn(u'[zero] cannot zero in \\\"as-is\\\" mode')\n self.warned = True\n # TODO request write in as-is mode\n\n @classmethod\n def match_patterns(cls, field, patterns):\n \"\"\"Check if field (as string) is matching any of the patterns in\n the list.\n \"\"\"\n if patterns is True:\n return True\n for p in patterns:\n if re.search(p, unicode(field), flags=re.IGNORECASE):\n return True\n return False\n\n def write_event(self, item, path, tags):\n \"\"\"Set values in tags to `None` if the key and value are matched\n by `self.patterns`.\n \"\"\"\n if not self.patterns:\n log.warn(u'[zero] no fields, nothing to do')\n return\n\n for field, patterns in self.patterns.items():\n if field in tags:\n value = tags[field]\n match = self.match_patterns(tags[field], patterns)\n else:\n value = ''\n match = patterns is True\n\n if match:\n log.debug(u'[zero] {0}: {1} -> None'.format(field, value))\n tags[field] = None\n", "path": "beetsplug/zero.py"}]}
1,415
549
gh_patches_debug_32722
rasdani/github-patches
git_diff
pypa__pip-3443
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Protect a few more requests imports In Debian, we unbundle requests, and further, we unbundled all vendored packages from requests. This causes pip's vendoring algorithm to fail. I had to add this patch to the Debian packaging. ``` From 144ba146cde273b815a80859537b09c068fd47e6 Mon Sep 17 00:00:00 2001 From: Barry Warsaw <[email protected]> Date: Fri, 29 Jan 2016 16:56:43 -0500 Subject: Debian already unbundles things from requests. Patch-Name: handle-unbundled-requests.patch --- pip/_vendor/__init__.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/pip/_vendor/__init__.py b/pip/_vendor/__init__.py index 1cb35a1..c64896a 100644 --- a/pip/_vendor/__init__.py +++ b/pip/_vendor/__init__.py @@ -82,8 +82,12 @@ if DEBUNDLED: vendored("requests.packages.urllib3.fields") vendored("requests.packages.urllib3.filepost") vendored("requests.packages.urllib3.packages") - vendored("requests.packages.urllib3.packages.ordered_dict") - vendored("requests.packages.urllib3.packages.six") + try: + vendored("requests.packages.urllib3.packages.ordered_dict") + vendored("requests.packages.urllib3.packages.six") + except ImportError: + # Debian already unbundles these from requests. + pass vendored("requests.packages.urllib3.packages.ssl_match_hostname") vendored("requests.packages.urllib3.packages.ssl_match_hostname." "_implementation") ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pip/_vendor/__init__.py` Content: ``` 1 """ 2 pip._vendor is for vendoring dependencies of pip to prevent needing pip to 3 depend on something external. 4 5 Files inside of pip._vendor should be considered immutable and should only be 6 updated to versions from upstream. 7 """ 8 from __future__ import absolute_import 9 10 import glob 11 import os.path 12 import sys 13 14 # Downstream redistributors which have debundled our dependencies should also 15 # patch this value to be true. This will trigger the additional patching 16 # to cause things like "six" to be available as pip. 17 DEBUNDLED = False 18 19 # By default, look in this directory for a bunch of .whl files which we will 20 # add to the beginning of sys.path before attempting to import anything. This 21 # is done to support downstream re-distributors like Debian and Fedora who 22 # wish to create their own Wheels for our dependencies to aid in debundling. 23 WHEEL_DIR = os.path.abspath(os.path.dirname(__file__)) 24 25 26 # Define a small helper function to alias our vendored modules to the real ones 27 # if the vendored ones do not exist. This idea of this was taken from 28 # https://github.com/kennethreitz/requests/pull/2567. 29 def vendored(modulename): 30 vendored_name = "{0}.{1}".format(__name__, modulename) 31 32 try: 33 __import__(vendored_name, globals(), locals(), level=0) 34 except ImportError: 35 __import__(modulename, globals(), locals(), level=0) 36 sys.modules[vendored_name] = sys.modules[modulename] 37 base, head = vendored_name.rsplit(".", 1) 38 setattr(sys.modules[base], head, sys.modules[modulename]) 39 40 41 # If we're operating in a debundled setup, then we want to go ahead and trigger 42 # the aliasing of our vendored libraries as well as looking for wheels to add 43 # to our sys.path. This will cause all of this code to be a no-op typically 44 # however downstream redistributors can enable it in a consistent way across 45 # all platforms. 46 if DEBUNDLED: 47 # Actually look inside of WHEEL_DIR to find .whl files and add them to the 48 # front of our sys.path. 49 sys.path[:] = glob.glob(os.path.join(WHEEL_DIR, "*.whl")) + sys.path 50 51 # Actually alias all of our vendored dependencies. 52 vendored("cachecontrol") 53 vendored("colorama") 54 vendored("distlib") 55 vendored("html5lib") 56 vendored("lockfile") 57 vendored("six") 58 vendored("six.moves") 59 vendored("six.moves.urllib") 60 vendored("packaging") 61 vendored("packaging.version") 62 vendored("packaging.specifiers") 63 vendored("pkg_resources") 64 vendored("progress") 65 vendored("retrying") 66 vendored("requests") 67 vendored("requests.packages") 68 vendored("requests.packages.urllib3") 69 vendored("requests.packages.urllib3._collections") 70 vendored("requests.packages.urllib3.connection") 71 vendored("requests.packages.urllib3.connectionpool") 72 vendored("requests.packages.urllib3.contrib") 73 try: 74 vendored("requests.packages.urllib3.contrib.ntlmpool") 75 except ImportError: 76 pass 77 try: 78 vendored("requests.packages.urllib3.contrib.pyopenssl") 79 except ImportError: 80 pass 81 vendored("requests.packages.urllib3.exceptions") 82 vendored("requests.packages.urllib3.fields") 83 vendored("requests.packages.urllib3.filepost") 84 vendored("requests.packages.urllib3.packages") 85 vendored("requests.packages.urllib3.packages.ordered_dict") 86 vendored("requests.packages.urllib3.packages.six") 87 vendored("requests.packages.urllib3.packages.ssl_match_hostname") 88 vendored("requests.packages.urllib3.packages.ssl_match_hostname." 89 "_implementation") 90 vendored("requests.packages.urllib3.poolmanager") 91 vendored("requests.packages.urllib3.request") 92 vendored("requests.packages.urllib3.response") 93 vendored("requests.packages.urllib3.util") 94 vendored("requests.packages.urllib3.util.connection") 95 vendored("requests.packages.urllib3.util.request") 96 vendored("requests.packages.urllib3.util.response") 97 vendored("requests.packages.urllib3.util.retry") 98 vendored("requests.packages.urllib3.util.ssl_") 99 vendored("requests.packages.urllib3.util.timeout") 100 vendored("requests.packages.urllib3.util.url") 101 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pip/_vendor/__init__.py b/pip/_vendor/__init__.py --- a/pip/_vendor/__init__.py +++ b/pip/_vendor/__init__.py @@ -32,10 +32,22 @@ try: __import__(vendored_name, globals(), locals(), level=0) except ImportError: - __import__(modulename, globals(), locals(), level=0) - sys.modules[vendored_name] = sys.modules[modulename] - base, head = vendored_name.rsplit(".", 1) - setattr(sys.modules[base], head, sys.modules[modulename]) + try: + __import__(modulename, globals(), locals(), level=0) + except ImportError: + # We can just silently allow import failures to pass here. If we + # got to this point it means that ``import pip._vendor.whatever`` + # failed and so did ``import whatever``. Since we're importing this + # upfront in an attempt to alias imports, not erroring here will + # just mean we get a regular import error whenever pip *actually* + # tries to import one of these modules to use it, which actually + # gives us a better error message than we would have otherwise + # gotten. + pass + else: + sys.modules[vendored_name] = sys.modules[modulename] + base, head = vendored_name.rsplit(".", 1) + setattr(sys.modules[base], head, sys.modules[modulename]) # If we're operating in a debundled setup, then we want to go ahead and trigger @@ -70,14 +82,8 @@ vendored("requests.packages.urllib3.connection") vendored("requests.packages.urllib3.connectionpool") vendored("requests.packages.urllib3.contrib") - try: - vendored("requests.packages.urllib3.contrib.ntlmpool") - except ImportError: - pass - try: - vendored("requests.packages.urllib3.contrib.pyopenssl") - except ImportError: - pass + vendored("requests.packages.urllib3.contrib.ntlmpool") + vendored("requests.packages.urllib3.contrib.pyopenssl") vendored("requests.packages.urllib3.exceptions") vendored("requests.packages.urllib3.fields") vendored("requests.packages.urllib3.filepost")
{"golden_diff": "diff --git a/pip/_vendor/__init__.py b/pip/_vendor/__init__.py\n--- a/pip/_vendor/__init__.py\n+++ b/pip/_vendor/__init__.py\n@@ -32,10 +32,22 @@\n try:\n __import__(vendored_name, globals(), locals(), level=0)\n except ImportError:\n- __import__(modulename, globals(), locals(), level=0)\n- sys.modules[vendored_name] = sys.modules[modulename]\n- base, head = vendored_name.rsplit(\".\", 1)\n- setattr(sys.modules[base], head, sys.modules[modulename])\n+ try:\n+ __import__(modulename, globals(), locals(), level=0)\n+ except ImportError:\n+ # We can just silently allow import failures to pass here. If we\n+ # got to this point it means that ``import pip._vendor.whatever``\n+ # failed and so did ``import whatever``. Since we're importing this\n+ # upfront in an attempt to alias imports, not erroring here will\n+ # just mean we get a regular import error whenever pip *actually*\n+ # tries to import one of these modules to use it, which actually\n+ # gives us a better error message than we would have otherwise\n+ # gotten.\n+ pass\n+ else:\n+ sys.modules[vendored_name] = sys.modules[modulename]\n+ base, head = vendored_name.rsplit(\".\", 1)\n+ setattr(sys.modules[base], head, sys.modules[modulename])\n \n \n # If we're operating in a debundled setup, then we want to go ahead and trigger\n@@ -70,14 +82,8 @@\n vendored(\"requests.packages.urllib3.connection\")\n vendored(\"requests.packages.urllib3.connectionpool\")\n vendored(\"requests.packages.urllib3.contrib\")\n- try:\n- vendored(\"requests.packages.urllib3.contrib.ntlmpool\")\n- except ImportError:\n- pass\n- try:\n- vendored(\"requests.packages.urllib3.contrib.pyopenssl\")\n- except ImportError:\n- pass\n+ vendored(\"requests.packages.urllib3.contrib.ntlmpool\")\n+ vendored(\"requests.packages.urllib3.contrib.pyopenssl\")\n vendored(\"requests.packages.urllib3.exceptions\")\n vendored(\"requests.packages.urllib3.fields\")\n vendored(\"requests.packages.urllib3.filepost\")\n", "issue": "Protect a few more requests imports\nIn Debian, we unbundle requests, and further, we unbundled all vendored packages from requests. This causes pip's vendoring algorithm to fail. I had to add this patch to the Debian packaging.\n\n```\nFrom 144ba146cde273b815a80859537b09c068fd47e6 Mon Sep 17 00:00:00 2001\nFrom: Barry Warsaw <[email protected]>\nDate: Fri, 29 Jan 2016 16:56:43 -0500\nSubject: Debian already unbundles things from requests.\n\nPatch-Name: handle-unbundled-requests.patch\n\n---\n pip/_vendor/__init__.py | 8 ++++++--\n 1 file changed, 6 insertions(+), 2 deletions(-)\n\ndiff --git a/pip/_vendor/__init__.py b/pip/_vendor/__init__.py\nindex 1cb35a1..c64896a 100644\n--- a/pip/_vendor/__init__.py\n+++ b/pip/_vendor/__init__.py\n@@ -82,8 +82,12 @@ if DEBUNDLED:\n vendored(\"requests.packages.urllib3.fields\")\n vendored(\"requests.packages.urllib3.filepost\")\n vendored(\"requests.packages.urllib3.packages\")\n- vendored(\"requests.packages.urllib3.packages.ordered_dict\")\n- vendored(\"requests.packages.urllib3.packages.six\")\n+ try:\n+ vendored(\"requests.packages.urllib3.packages.ordered_dict\")\n+ vendored(\"requests.packages.urllib3.packages.six\")\n+ except ImportError:\n+ # Debian already unbundles these from requests.\n+ pass\n vendored(\"requests.packages.urllib3.packages.ssl_match_hostname\")\n vendored(\"requests.packages.urllib3.packages.ssl_match_hostname.\"\n \"_implementation\")\n```\n\n", "before_files": [{"content": "\"\"\"\npip._vendor is for vendoring dependencies of pip to prevent needing pip to\ndepend on something external.\n\nFiles inside of pip._vendor should be considered immutable and should only be\nupdated to versions from upstream.\n\"\"\"\nfrom __future__ import absolute_import\n\nimport glob\nimport os.path\nimport sys\n\n# Downstream redistributors which have debundled our dependencies should also\n# patch this value to be true. This will trigger the additional patching\n# to cause things like \"six\" to be available as pip.\nDEBUNDLED = False\n\n# By default, look in this directory for a bunch of .whl files which we will\n# add to the beginning of sys.path before attempting to import anything. This\n# is done to support downstream re-distributors like Debian and Fedora who\n# wish to create their own Wheels for our dependencies to aid in debundling.\nWHEEL_DIR = os.path.abspath(os.path.dirname(__file__))\n\n\n# Define a small helper function to alias our vendored modules to the real ones\n# if the vendored ones do not exist. This idea of this was taken from\n# https://github.com/kennethreitz/requests/pull/2567.\ndef vendored(modulename):\n vendored_name = \"{0}.{1}\".format(__name__, modulename)\n\n try:\n __import__(vendored_name, globals(), locals(), level=0)\n except ImportError:\n __import__(modulename, globals(), locals(), level=0)\n sys.modules[vendored_name] = sys.modules[modulename]\n base, head = vendored_name.rsplit(\".\", 1)\n setattr(sys.modules[base], head, sys.modules[modulename])\n\n\n# If we're operating in a debundled setup, then we want to go ahead and trigger\n# the aliasing of our vendored libraries as well as looking for wheels to add\n# to our sys.path. This will cause all of this code to be a no-op typically\n# however downstream redistributors can enable it in a consistent way across\n# all platforms.\nif DEBUNDLED:\n # Actually look inside of WHEEL_DIR to find .whl files and add them to the\n # front of our sys.path.\n sys.path[:] = glob.glob(os.path.join(WHEEL_DIR, \"*.whl\")) + sys.path\n\n # Actually alias all of our vendored dependencies.\n vendored(\"cachecontrol\")\n vendored(\"colorama\")\n vendored(\"distlib\")\n vendored(\"html5lib\")\n vendored(\"lockfile\")\n vendored(\"six\")\n vendored(\"six.moves\")\n vendored(\"six.moves.urllib\")\n vendored(\"packaging\")\n vendored(\"packaging.version\")\n vendored(\"packaging.specifiers\")\n vendored(\"pkg_resources\")\n vendored(\"progress\")\n vendored(\"retrying\")\n vendored(\"requests\")\n vendored(\"requests.packages\")\n vendored(\"requests.packages.urllib3\")\n vendored(\"requests.packages.urllib3._collections\")\n vendored(\"requests.packages.urllib3.connection\")\n vendored(\"requests.packages.urllib3.connectionpool\")\n vendored(\"requests.packages.urllib3.contrib\")\n try:\n vendored(\"requests.packages.urllib3.contrib.ntlmpool\")\n except ImportError:\n pass\n try:\n vendored(\"requests.packages.urllib3.contrib.pyopenssl\")\n except ImportError:\n pass\n vendored(\"requests.packages.urllib3.exceptions\")\n vendored(\"requests.packages.urllib3.fields\")\n vendored(\"requests.packages.urllib3.filepost\")\n vendored(\"requests.packages.urllib3.packages\")\n vendored(\"requests.packages.urllib3.packages.ordered_dict\")\n vendored(\"requests.packages.urllib3.packages.six\")\n vendored(\"requests.packages.urllib3.packages.ssl_match_hostname\")\n vendored(\"requests.packages.urllib3.packages.ssl_match_hostname.\"\n \"_implementation\")\n vendored(\"requests.packages.urllib3.poolmanager\")\n vendored(\"requests.packages.urllib3.request\")\n vendored(\"requests.packages.urllib3.response\")\n vendored(\"requests.packages.urllib3.util\")\n vendored(\"requests.packages.urllib3.util.connection\")\n vendored(\"requests.packages.urllib3.util.request\")\n vendored(\"requests.packages.urllib3.util.response\")\n vendored(\"requests.packages.urllib3.util.retry\")\n vendored(\"requests.packages.urllib3.util.ssl_\")\n vendored(\"requests.packages.urllib3.util.timeout\")\n vendored(\"requests.packages.urllib3.util.url\")\n", "path": "pip/_vendor/__init__.py"}], "after_files": [{"content": "\"\"\"\npip._vendor is for vendoring dependencies of pip to prevent needing pip to\ndepend on something external.\n\nFiles inside of pip._vendor should be considered immutable and should only be\nupdated to versions from upstream.\n\"\"\"\nfrom __future__ import absolute_import\n\nimport glob\nimport os.path\nimport sys\n\n# Downstream redistributors which have debundled our dependencies should also\n# patch this value to be true. This will trigger the additional patching\n# to cause things like \"six\" to be available as pip.\nDEBUNDLED = False\n\n# By default, look in this directory for a bunch of .whl files which we will\n# add to the beginning of sys.path before attempting to import anything. This\n# is done to support downstream re-distributors like Debian and Fedora who\n# wish to create their own Wheels for our dependencies to aid in debundling.\nWHEEL_DIR = os.path.abspath(os.path.dirname(__file__))\n\n\n# Define a small helper function to alias our vendored modules to the real ones\n# if the vendored ones do not exist. This idea of this was taken from\n# https://github.com/kennethreitz/requests/pull/2567.\ndef vendored(modulename):\n vendored_name = \"{0}.{1}\".format(__name__, modulename)\n\n try:\n __import__(vendored_name, globals(), locals(), level=0)\n except ImportError:\n try:\n __import__(modulename, globals(), locals(), level=0)\n except ImportError:\n # We can just silently allow import failures to pass here. If we\n # got to this point it means that ``import pip._vendor.whatever``\n # failed and so did ``import whatever``. Since we're importing this\n # upfront in an attempt to alias imports, not erroring here will\n # just mean we get a regular import error whenever pip *actually*\n # tries to import one of these modules to use it, which actually\n # gives us a better error message than we would have otherwise\n # gotten.\n pass\n else:\n sys.modules[vendored_name] = sys.modules[modulename]\n base, head = vendored_name.rsplit(\".\", 1)\n setattr(sys.modules[base], head, sys.modules[modulename])\n\n\n# If we're operating in a debundled setup, then we want to go ahead and trigger\n# the aliasing of our vendored libraries as well as looking for wheels to add\n# to our sys.path. This will cause all of this code to be a no-op typically\n# however downstream redistributors can enable it in a consistent way across\n# all platforms.\nif DEBUNDLED:\n # Actually look inside of WHEEL_DIR to find .whl files and add them to the\n # front of our sys.path.\n sys.path[:] = glob.glob(os.path.join(WHEEL_DIR, \"*.whl\")) + sys.path\n\n # Actually alias all of our vendored dependencies.\n vendored(\"cachecontrol\")\n vendored(\"colorama\")\n vendored(\"distlib\")\n vendored(\"html5lib\")\n vendored(\"lockfile\")\n vendored(\"six\")\n vendored(\"six.moves\")\n vendored(\"six.moves.urllib\")\n vendored(\"packaging\")\n vendored(\"packaging.version\")\n vendored(\"packaging.specifiers\")\n vendored(\"pkg_resources\")\n vendored(\"progress\")\n vendored(\"retrying\")\n vendored(\"requests\")\n vendored(\"requests.packages\")\n vendored(\"requests.packages.urllib3\")\n vendored(\"requests.packages.urllib3._collections\")\n vendored(\"requests.packages.urllib3.connection\")\n vendored(\"requests.packages.urllib3.connectionpool\")\n vendored(\"requests.packages.urllib3.contrib\")\n vendored(\"requests.packages.urllib3.contrib.ntlmpool\")\n vendored(\"requests.packages.urllib3.contrib.pyopenssl\")\n vendored(\"requests.packages.urllib3.exceptions\")\n vendored(\"requests.packages.urllib3.fields\")\n vendored(\"requests.packages.urllib3.filepost\")\n vendored(\"requests.packages.urllib3.packages\")\n vendored(\"requests.packages.urllib3.packages.ordered_dict\")\n vendored(\"requests.packages.urllib3.packages.six\")\n vendored(\"requests.packages.urllib3.packages.ssl_match_hostname\")\n vendored(\"requests.packages.urllib3.packages.ssl_match_hostname.\"\n \"_implementation\")\n vendored(\"requests.packages.urllib3.poolmanager\")\n vendored(\"requests.packages.urllib3.request\")\n vendored(\"requests.packages.urllib3.response\")\n vendored(\"requests.packages.urllib3.util\")\n vendored(\"requests.packages.urllib3.util.connection\")\n vendored(\"requests.packages.urllib3.util.request\")\n vendored(\"requests.packages.urllib3.util.response\")\n vendored(\"requests.packages.urllib3.util.retry\")\n vendored(\"requests.packages.urllib3.util.ssl_\")\n vendored(\"requests.packages.urllib3.util.timeout\")\n vendored(\"requests.packages.urllib3.util.url\")\n", "path": "pip/_vendor/__init__.py"}]}
1,918
553
gh_patches_debug_14511
rasdani/github-patches
git_diff
mozmeao__snippets-service-1437
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add Firefox VPN target It seems the Firefox VPN uses `e6eb0d1e856335fc`, previously named `Firefox FPN` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `snippets/base/admin/fields.py` Content: ``` 1 from django.core.exceptions import ValidationError 2 from django.forms import (ChoiceField, ModelChoiceField, ModelMultipleChoiceField, 3 MultiValueField, MultipleChoiceField) 4 5 from snippets.base.models import Addon, TargetedCountry 6 7 from .widgets import JEXLMultiWidget 8 9 10 class MultipleChoiceFieldCSV(MultipleChoiceField): 11 # To be used with in snippets.base.forms.SnippetAdminForm and in 12 # combination with DynamicField. We don't directly save() this field in the 13 # database so get_prep_value has not been implemented. 14 15 def prepare_value(self, value): 16 value = super(MultipleChoiceFieldCSV, self).prepare_value(value) 17 if not isinstance(value, list): 18 value = value.split(';') 19 return value 20 21 def clean(self, value): 22 value = super(MultipleChoiceFieldCSV, self).clean(value) 23 return ';'.join(value) 24 25 26 class JEXLBaseField(): 27 def to_jexl(self, value): 28 if value: 29 return self.jexl.format(attr_name=self.attr_name, value=value) 30 31 return None 32 33 34 class JEXLChoiceField(JEXLBaseField, ChoiceField): 35 def __init__(self, attr_name, *args, **kwargs): 36 self.attr_name = attr_name 37 self.jexl = '{attr_name} == {value}' 38 self.jexl = kwargs.pop('jexl', self.jexl) 39 return super().__init__(*args, **kwargs) 40 41 def to_jexl(self, value): 42 if value: 43 return self.jexl.format(attr_name=self.attr_name, value=value) 44 45 46 class JEXLModelMultipleChoiceField(JEXLBaseField, ModelMultipleChoiceField): 47 def __init__(self, attr_name, *args, **kwargs): 48 self.attr_name = attr_name 49 self.jexl = '{attr_name} in {value}' 50 self.jexl = kwargs.pop('jexl', self.jexl) 51 return super().__init__(*args, **kwargs) 52 53 def prepare_value(self, value): 54 if isinstance(value, str): 55 value = value.split(';') 56 return super().prepare_value(value) 57 58 def clean(self, value): 59 value = super().clean(value) 60 return ';'.join([str(x.id) for x in value]) 61 62 63 class JEXLCountryField(JEXLModelMultipleChoiceField): 64 def to_jexl(self, value): 65 if value: 66 values = TargetedCountry.objects.filter(id__in=value.split(";")) 67 return f'region in {[x.code for x in values]}' 68 return None 69 70 71 class JEXLRangeField(JEXLBaseField, MultiValueField): 72 def __init__(self, attr_name, choices, **kwargs): 73 self.attr_name = attr_name 74 self.jexl = { 75 'minimum': '{value} <= {attr_name}', 76 'maximum': '{attr_name} < {value}' 77 } 78 self.jexl = kwargs.pop('jexl', self.jexl) 79 fields = ( 80 ChoiceField(choices=choices), 81 ChoiceField(choices=choices), 82 ) 83 super().__init__(fields, **kwargs) 84 self.widget = JEXLMultiWidget(widgets=[f.widget for f in self.fields], 85 template_name='widgets/jexlrange.html') 86 87 def compress(self, data_list): 88 return ','.join(data_list) 89 90 def to_jexl(self, value): 91 final_jexl = [] 92 if value: 93 minimum, maximum = value.split(',') 94 if minimum: 95 final_jexl.append( 96 self.jexl['minimum'].format(attr_name=self.attr_name, value=minimum) 97 ) 98 if maximum: 99 final_jexl.append( 100 self.jexl['maximum'].format(attr_name=self.attr_name, value=maximum) 101 ) 102 return ' && '.join(final_jexl) 103 104 def validate(self, value): 105 minimum, maximum = value.split(',') 106 self.fields[0].validate(minimum) 107 self.fields[1].validate(maximum) 108 109 if minimum and maximum and int(minimum) > int(maximum): 110 raise ValidationError('Minimum value must be lower or equal to maximum value.') 111 return value 112 113 114 class JEXLFirefoxRangeField(JEXLRangeField): 115 def __init__(self, **kwargs): 116 # Include only versions greater than 63, where ASRSnippets exist. 117 min_version = 64 118 # Need to be able to dynamically change this, probably using 119 # product_details. Issue #855 120 max_version = 84 121 122 choices = ( 123 [(None, 'No limit')] + 124 [(x, x) for x in reversed(range(min_version, max_version + 1))] 125 ) 126 super().__init__('firefoxVersion', choices, **kwargs) 127 128 def validate(self, value): 129 minimum, maximum = value.split(',') 130 self.fields[0].validate(minimum) 131 self.fields[1].validate(maximum) 132 133 if minimum and maximum and minimum > maximum: 134 raise ValidationError('Minimum value must be lower or equal to maximum value.') 135 return value 136 137 138 class JEXLAddonField(MultiValueField): 139 def __init__(self, **kwargs): 140 choices = ( 141 (None, "I don't care"), 142 ('not_installed', 'Not Installed'), 143 ('installed', 'Installed'), 144 ) 145 fields = ( 146 ChoiceField(choices=choices), 147 ModelChoiceField(queryset=Addon.objects.all(), required=False), 148 ) 149 super().__init__(fields, **kwargs) 150 self.widget = JEXLMultiWidget(widgets=[f.widget for f in self.fields]) 151 152 def compress(self, data_list): 153 if data_list: 154 return '{},{}'.format(data_list[0], getattr(data_list[1], 'id', '')) 155 return '' 156 157 def to_jexl(self, value): 158 check, addon_id = value.split(',') 159 if not check or not addon_id: 160 return '' 161 162 addon = Addon.objects.get(id=addon_id) 163 if check == 'not_installed': 164 jexl = '("{}" in addonsInfo.addons|keys) == false'.format(addon.guid) 165 elif check == 'installed': 166 jexl = '("{}" in addonsInfo.addons|keys) == true'.format(addon.guid) 167 168 return jexl 169 170 def validate(self, value): 171 check, addon_id = value.split(',') 172 173 self.fields[0].validate(check) 174 self.fields[1].validate(addon_id) 175 176 if check and not addon_id: 177 raise ValidationError('You must select an add-on') 178 179 if not check and addon_id: 180 raise ValidationError('You must select a check') 181 return value 182 183 184 class JEXLFirefoxServicesField(MultiValueField): 185 def __init__(self, **kwargs): 186 check_choices = ( 187 (None, "I don't care"), 188 ('no_account', "User hasn't signed up for"), 189 ('has_account', 'User has signed up for'), 190 ) 191 # Verify IDs using 192 # curl -s https://oauth.stage.mozaws.net/v1/client/<ID> | jq . 193 # Incomplete list of IDs 194 # https://docs.telemetry.mozilla.org/datasets/fxa_metrics/attribution.html#service-attribution # noqa 195 service_choices = ( 196 (None, '---------'), 197 ('e7ce535d93522896|98adfa37698f255b', 'Firefox Lockwise'), 198 ('802d56ef2a9af9fa', 'Firefox Monitor'), 199 ('1f30e32975ae5112|20f7931c9054d833', 'Firefox Send'), 200 ('a8c528140153d1c6|565585c1745a144d|e6eb0d1e856335fc', 'Firefox Private Network'), 201 ('9ebfe2c2f9ea3c58', 'Firefox Private Relay'), 202 ('7ad9917f6c55fb77', 'Firefox Reality'), 203 ('7377719276ad44ee|749818d3f2e7857f', 'Pocket'), 204 205 ) 206 fields = ( 207 ChoiceField(choices=check_choices), 208 ChoiceField(choices=service_choices), 209 ) 210 super().__init__(fields, **kwargs) 211 self.widget = JEXLMultiWidget(widgets=[f.widget for f in self.fields]) 212 213 def compress(self, data_list): 214 if data_list: 215 return f'{data_list[0]},{data_list[1]}' 216 return '' 217 218 def to_jexl(self, value): 219 check, ids = value.split(',') 220 ids = ids.split('|') if ids else '' 221 222 if not check or not ids: 223 return '' 224 225 jexl = '(' 226 for id in ids: 227 jexl += f'("{id}" in attachedFxAOAuthClients|mapToProperty("id")) || ' 228 jexl = jexl[:-4] 229 230 if check == 'no_account': 231 jexl += ') == false' 232 elif check == 'has_account': 233 jexl += ') == true' 234 235 return jexl 236 237 def validate(self, value): 238 check, service_name = value.split(',') 239 240 self.fields[0].validate(check) 241 self.fields[1].validate(service_name) 242 243 if check and not service_name: 244 raise ValidationError('You must select an Service.') 245 246 if not check and service_name: 247 raise ValidationError('You must select a check.') 248 return value 249 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/snippets/base/admin/fields.py b/snippets/base/admin/fields.py --- a/snippets/base/admin/fields.py +++ b/snippets/base/admin/fields.py @@ -197,7 +197,8 @@ ('e7ce535d93522896|98adfa37698f255b', 'Firefox Lockwise'), ('802d56ef2a9af9fa', 'Firefox Monitor'), ('1f30e32975ae5112|20f7931c9054d833', 'Firefox Send'), - ('a8c528140153d1c6|565585c1745a144d|e6eb0d1e856335fc', 'Firefox Private Network'), + ('a8c528140153d1c6|565585c1745a144d', 'Firefox Private Network'), + ('e6eb0d1e856335fc', 'Firefox VPN'), ('9ebfe2c2f9ea3c58', 'Firefox Private Relay'), ('7ad9917f6c55fb77', 'Firefox Reality'), ('7377719276ad44ee|749818d3f2e7857f', 'Pocket'),
{"golden_diff": "diff --git a/snippets/base/admin/fields.py b/snippets/base/admin/fields.py\n--- a/snippets/base/admin/fields.py\n+++ b/snippets/base/admin/fields.py\n@@ -197,7 +197,8 @@\n ('e7ce535d93522896|98adfa37698f255b', 'Firefox Lockwise'),\n ('802d56ef2a9af9fa', 'Firefox Monitor'),\n ('1f30e32975ae5112|20f7931c9054d833', 'Firefox Send'),\n- ('a8c528140153d1c6|565585c1745a144d|e6eb0d1e856335fc', 'Firefox Private Network'),\n+ ('a8c528140153d1c6|565585c1745a144d', 'Firefox Private Network'),\n+ ('e6eb0d1e856335fc', 'Firefox VPN'),\n ('9ebfe2c2f9ea3c58', 'Firefox Private Relay'),\n ('7ad9917f6c55fb77', 'Firefox Reality'),\n ('7377719276ad44ee|749818d3f2e7857f', 'Pocket'),\n", "issue": "Add Firefox VPN target\nIt seems the Firefox VPN uses `e6eb0d1e856335fc`, previously named `Firefox FPN`\n", "before_files": [{"content": "from django.core.exceptions import ValidationError\nfrom django.forms import (ChoiceField, ModelChoiceField, ModelMultipleChoiceField,\n MultiValueField, MultipleChoiceField)\n\nfrom snippets.base.models import Addon, TargetedCountry\n\nfrom .widgets import JEXLMultiWidget\n\n\nclass MultipleChoiceFieldCSV(MultipleChoiceField):\n # To be used with in snippets.base.forms.SnippetAdminForm and in\n # combination with DynamicField. We don't directly save() this field in the\n # database so get_prep_value has not been implemented.\n\n def prepare_value(self, value):\n value = super(MultipleChoiceFieldCSV, self).prepare_value(value)\n if not isinstance(value, list):\n value = value.split(';')\n return value\n\n def clean(self, value):\n value = super(MultipleChoiceFieldCSV, self).clean(value)\n return ';'.join(value)\n\n\nclass JEXLBaseField():\n def to_jexl(self, value):\n if value:\n return self.jexl.format(attr_name=self.attr_name, value=value)\n\n return None\n\n\nclass JEXLChoiceField(JEXLBaseField, ChoiceField):\n def __init__(self, attr_name, *args, **kwargs):\n self.attr_name = attr_name\n self.jexl = '{attr_name} == {value}'\n self.jexl = kwargs.pop('jexl', self.jexl)\n return super().__init__(*args, **kwargs)\n\n def to_jexl(self, value):\n if value:\n return self.jexl.format(attr_name=self.attr_name, value=value)\n\n\nclass JEXLModelMultipleChoiceField(JEXLBaseField, ModelMultipleChoiceField):\n def __init__(self, attr_name, *args, **kwargs):\n self.attr_name = attr_name\n self.jexl = '{attr_name} in {value}'\n self.jexl = kwargs.pop('jexl', self.jexl)\n return super().__init__(*args, **kwargs)\n\n def prepare_value(self, value):\n if isinstance(value, str):\n value = value.split(';')\n return super().prepare_value(value)\n\n def clean(self, value):\n value = super().clean(value)\n return ';'.join([str(x.id) for x in value])\n\n\nclass JEXLCountryField(JEXLModelMultipleChoiceField):\n def to_jexl(self, value):\n if value:\n values = TargetedCountry.objects.filter(id__in=value.split(\";\"))\n return f'region in {[x.code for x in values]}'\n return None\n\n\nclass JEXLRangeField(JEXLBaseField, MultiValueField):\n def __init__(self, attr_name, choices, **kwargs):\n self.attr_name = attr_name\n self.jexl = {\n 'minimum': '{value} <= {attr_name}',\n 'maximum': '{attr_name} < {value}'\n }\n self.jexl = kwargs.pop('jexl', self.jexl)\n fields = (\n ChoiceField(choices=choices),\n ChoiceField(choices=choices),\n )\n super().__init__(fields, **kwargs)\n self.widget = JEXLMultiWidget(widgets=[f.widget for f in self.fields],\n template_name='widgets/jexlrange.html')\n\n def compress(self, data_list):\n return ','.join(data_list)\n\n def to_jexl(self, value):\n final_jexl = []\n if value:\n minimum, maximum = value.split(',')\n if minimum:\n final_jexl.append(\n self.jexl['minimum'].format(attr_name=self.attr_name, value=minimum)\n )\n if maximum:\n final_jexl.append(\n self.jexl['maximum'].format(attr_name=self.attr_name, value=maximum)\n )\n return ' && '.join(final_jexl)\n\n def validate(self, value):\n minimum, maximum = value.split(',')\n self.fields[0].validate(minimum)\n self.fields[1].validate(maximum)\n\n if minimum and maximum and int(minimum) > int(maximum):\n raise ValidationError('Minimum value must be lower or equal to maximum value.')\n return value\n\n\nclass JEXLFirefoxRangeField(JEXLRangeField):\n def __init__(self, **kwargs):\n # Include only versions greater than 63, where ASRSnippets exist.\n min_version = 64\n # Need to be able to dynamically change this, probably using\n # product_details. Issue #855\n max_version = 84\n\n choices = (\n [(None, 'No limit')] +\n [(x, x) for x in reversed(range(min_version, max_version + 1))]\n )\n super().__init__('firefoxVersion', choices, **kwargs)\n\n def validate(self, value):\n minimum, maximum = value.split(',')\n self.fields[0].validate(minimum)\n self.fields[1].validate(maximum)\n\n if minimum and maximum and minimum > maximum:\n raise ValidationError('Minimum value must be lower or equal to maximum value.')\n return value\n\n\nclass JEXLAddonField(MultiValueField):\n def __init__(self, **kwargs):\n choices = (\n (None, \"I don't care\"),\n ('not_installed', 'Not Installed'),\n ('installed', 'Installed'),\n )\n fields = (\n ChoiceField(choices=choices),\n ModelChoiceField(queryset=Addon.objects.all(), required=False),\n )\n super().__init__(fields, **kwargs)\n self.widget = JEXLMultiWidget(widgets=[f.widget for f in self.fields])\n\n def compress(self, data_list):\n if data_list:\n return '{},{}'.format(data_list[0], getattr(data_list[1], 'id', ''))\n return ''\n\n def to_jexl(self, value):\n check, addon_id = value.split(',')\n if not check or not addon_id:\n return ''\n\n addon = Addon.objects.get(id=addon_id)\n if check == 'not_installed':\n jexl = '(\"{}\" in addonsInfo.addons|keys) == false'.format(addon.guid)\n elif check == 'installed':\n jexl = '(\"{}\" in addonsInfo.addons|keys) == true'.format(addon.guid)\n\n return jexl\n\n def validate(self, value):\n check, addon_id = value.split(',')\n\n self.fields[0].validate(check)\n self.fields[1].validate(addon_id)\n\n if check and not addon_id:\n raise ValidationError('You must select an add-on')\n\n if not check and addon_id:\n raise ValidationError('You must select a check')\n return value\n\n\nclass JEXLFirefoxServicesField(MultiValueField):\n def __init__(self, **kwargs):\n check_choices = (\n (None, \"I don't care\"),\n ('no_account', \"User hasn't signed up for\"),\n ('has_account', 'User has signed up for'),\n )\n # Verify IDs using\n # curl -s https://oauth.stage.mozaws.net/v1/client/<ID> | jq .\n # Incomplete list of IDs\n # https://docs.telemetry.mozilla.org/datasets/fxa_metrics/attribution.html#service-attribution # noqa\n service_choices = (\n (None, '---------'),\n ('e7ce535d93522896|98adfa37698f255b', 'Firefox Lockwise'),\n ('802d56ef2a9af9fa', 'Firefox Monitor'),\n ('1f30e32975ae5112|20f7931c9054d833', 'Firefox Send'),\n ('a8c528140153d1c6|565585c1745a144d|e6eb0d1e856335fc', 'Firefox Private Network'),\n ('9ebfe2c2f9ea3c58', 'Firefox Private Relay'),\n ('7ad9917f6c55fb77', 'Firefox Reality'),\n ('7377719276ad44ee|749818d3f2e7857f', 'Pocket'),\n\n )\n fields = (\n ChoiceField(choices=check_choices),\n ChoiceField(choices=service_choices),\n )\n super().__init__(fields, **kwargs)\n self.widget = JEXLMultiWidget(widgets=[f.widget for f in self.fields])\n\n def compress(self, data_list):\n if data_list:\n return f'{data_list[0]},{data_list[1]}'\n return ''\n\n def to_jexl(self, value):\n check, ids = value.split(',')\n ids = ids.split('|') if ids else ''\n\n if not check or not ids:\n return ''\n\n jexl = '('\n for id in ids:\n jexl += f'(\"{id}\" in attachedFxAOAuthClients|mapToProperty(\"id\")) || '\n jexl = jexl[:-4]\n\n if check == 'no_account':\n jexl += ') == false'\n elif check == 'has_account':\n jexl += ') == true'\n\n return jexl\n\n def validate(self, value):\n check, service_name = value.split(',')\n\n self.fields[0].validate(check)\n self.fields[1].validate(service_name)\n\n if check and not service_name:\n raise ValidationError('You must select an Service.')\n\n if not check and service_name:\n raise ValidationError('You must select a check.')\n return value\n", "path": "snippets/base/admin/fields.py"}], "after_files": [{"content": "from django.core.exceptions import ValidationError\nfrom django.forms import (ChoiceField, ModelChoiceField, ModelMultipleChoiceField,\n MultiValueField, MultipleChoiceField)\n\nfrom snippets.base.models import Addon, TargetedCountry\n\nfrom .widgets import JEXLMultiWidget\n\n\nclass MultipleChoiceFieldCSV(MultipleChoiceField):\n # To be used with in snippets.base.forms.SnippetAdminForm and in\n # combination with DynamicField. We don't directly save() this field in the\n # database so get_prep_value has not been implemented.\n\n def prepare_value(self, value):\n value = super(MultipleChoiceFieldCSV, self).prepare_value(value)\n if not isinstance(value, list):\n value = value.split(';')\n return value\n\n def clean(self, value):\n value = super(MultipleChoiceFieldCSV, self).clean(value)\n return ';'.join(value)\n\n\nclass JEXLBaseField():\n def to_jexl(self, value):\n if value:\n return self.jexl.format(attr_name=self.attr_name, value=value)\n\n return None\n\n\nclass JEXLChoiceField(JEXLBaseField, ChoiceField):\n def __init__(self, attr_name, *args, **kwargs):\n self.attr_name = attr_name\n self.jexl = '{attr_name} == {value}'\n self.jexl = kwargs.pop('jexl', self.jexl)\n return super().__init__(*args, **kwargs)\n\n def to_jexl(self, value):\n if value:\n return self.jexl.format(attr_name=self.attr_name, value=value)\n\n\nclass JEXLModelMultipleChoiceField(JEXLBaseField, ModelMultipleChoiceField):\n def __init__(self, attr_name, *args, **kwargs):\n self.attr_name = attr_name\n self.jexl = '{attr_name} in {value}'\n self.jexl = kwargs.pop('jexl', self.jexl)\n return super().__init__(*args, **kwargs)\n\n def prepare_value(self, value):\n if isinstance(value, str):\n value = value.split(';')\n return super().prepare_value(value)\n\n def clean(self, value):\n value = super().clean(value)\n return ';'.join([str(x.id) for x in value])\n\n\nclass JEXLCountryField(JEXLModelMultipleChoiceField):\n def to_jexl(self, value):\n if value:\n values = TargetedCountry.objects.filter(id__in=value.split(\";\"))\n return f'region in {[x.code for x in values]}'\n return None\n\n\nclass JEXLRangeField(JEXLBaseField, MultiValueField):\n def __init__(self, attr_name, choices, **kwargs):\n self.attr_name = attr_name\n self.jexl = {\n 'minimum': '{value} <= {attr_name}',\n 'maximum': '{attr_name} < {value}'\n }\n self.jexl = kwargs.pop('jexl', self.jexl)\n fields = (\n ChoiceField(choices=choices),\n ChoiceField(choices=choices),\n )\n super().__init__(fields, **kwargs)\n self.widget = JEXLMultiWidget(widgets=[f.widget for f in self.fields],\n template_name='widgets/jexlrange.html')\n\n def compress(self, data_list):\n return ','.join(data_list)\n\n def to_jexl(self, value):\n final_jexl = []\n if value:\n minimum, maximum = value.split(',')\n if minimum:\n final_jexl.append(\n self.jexl['minimum'].format(attr_name=self.attr_name, value=minimum)\n )\n if maximum:\n final_jexl.append(\n self.jexl['maximum'].format(attr_name=self.attr_name, value=maximum)\n )\n return ' && '.join(final_jexl)\n\n def validate(self, value):\n minimum, maximum = value.split(',')\n self.fields[0].validate(minimum)\n self.fields[1].validate(maximum)\n\n if minimum and maximum and int(minimum) > int(maximum):\n raise ValidationError('Minimum value must be lower or equal to maximum value.')\n return value\n\n\nclass JEXLFirefoxRangeField(JEXLRangeField):\n def __init__(self, **kwargs):\n # Include only versions greater than 63, where ASRSnippets exist.\n min_version = 64\n # Need to be able to dynamically change this, probably using\n # product_details. Issue #855\n max_version = 84\n\n choices = (\n [(None, 'No limit')] +\n [(x, x) for x in reversed(range(min_version, max_version + 1))]\n )\n super().__init__('firefoxVersion', choices, **kwargs)\n\n def validate(self, value):\n minimum, maximum = value.split(',')\n self.fields[0].validate(minimum)\n self.fields[1].validate(maximum)\n\n if minimum and maximum and minimum > maximum:\n raise ValidationError('Minimum value must be lower or equal to maximum value.')\n return value\n\n\nclass JEXLAddonField(MultiValueField):\n def __init__(self, **kwargs):\n choices = (\n (None, \"I don't care\"),\n ('not_installed', 'Not Installed'),\n ('installed', 'Installed'),\n )\n fields = (\n ChoiceField(choices=choices),\n ModelChoiceField(queryset=Addon.objects.all(), required=False),\n )\n super().__init__(fields, **kwargs)\n self.widget = JEXLMultiWidget(widgets=[f.widget for f in self.fields])\n\n def compress(self, data_list):\n if data_list:\n return '{},{}'.format(data_list[0], getattr(data_list[1], 'id', ''))\n return ''\n\n def to_jexl(self, value):\n check, addon_id = value.split(',')\n if not check or not addon_id:\n return ''\n\n addon = Addon.objects.get(id=addon_id)\n if check == 'not_installed':\n jexl = '(\"{}\" in addonsInfo.addons|keys) == false'.format(addon.guid)\n elif check == 'installed':\n jexl = '(\"{}\" in addonsInfo.addons|keys) == true'.format(addon.guid)\n\n return jexl\n\n def validate(self, value):\n check, addon_id = value.split(',')\n\n self.fields[0].validate(check)\n self.fields[1].validate(addon_id)\n\n if check and not addon_id:\n raise ValidationError('You must select an add-on')\n\n if not check and addon_id:\n raise ValidationError('You must select a check')\n return value\n\n\nclass JEXLFirefoxServicesField(MultiValueField):\n def __init__(self, **kwargs):\n check_choices = (\n (None, \"I don't care\"),\n ('no_account', \"User hasn't signed up for\"),\n ('has_account', 'User has signed up for'),\n )\n # Verify IDs using\n # curl -s https://oauth.stage.mozaws.net/v1/client/<ID> | jq .\n # Incomplete list of IDs\n # https://docs.telemetry.mozilla.org/datasets/fxa_metrics/attribution.html#service-attribution # noqa\n service_choices = (\n (None, '---------'),\n ('e7ce535d93522896|98adfa37698f255b', 'Firefox Lockwise'),\n ('802d56ef2a9af9fa', 'Firefox Monitor'),\n ('1f30e32975ae5112|20f7931c9054d833', 'Firefox Send'),\n ('a8c528140153d1c6|565585c1745a144d', 'Firefox Private Network'),\n ('e6eb0d1e856335fc', 'Firefox VPN'),\n ('9ebfe2c2f9ea3c58', 'Firefox Private Relay'),\n ('7ad9917f6c55fb77', 'Firefox Reality'),\n ('7377719276ad44ee|749818d3f2e7857f', 'Pocket'),\n\n )\n fields = (\n ChoiceField(choices=check_choices),\n ChoiceField(choices=service_choices),\n )\n super().__init__(fields, **kwargs)\n self.widget = JEXLMultiWidget(widgets=[f.widget for f in self.fields])\n\n def compress(self, data_list):\n if data_list:\n return f'{data_list[0]},{data_list[1]}'\n return ''\n\n def to_jexl(self, value):\n check, ids = value.split(',')\n ids = ids.split('|') if ids else ''\n\n if not check or not ids:\n return ''\n\n jexl = '('\n for id in ids:\n jexl += f'(\"{id}\" in attachedFxAOAuthClients|mapToProperty(\"id\")) || '\n jexl = jexl[:-4]\n\n if check == 'no_account':\n jexl += ') == false'\n elif check == 'has_account':\n jexl += ') == true'\n\n return jexl\n\n def validate(self, value):\n check, service_name = value.split(',')\n\n self.fields[0].validate(check)\n self.fields[1].validate(service_name)\n\n if check and not service_name:\n raise ValidationError('You must select an Service.')\n\n if not check and service_name:\n raise ValidationError('You must select a check.')\n return value\n", "path": "snippets/base/admin/fields.py"}]}
3,061
351
gh_patches_debug_20500
rasdani/github-patches
git_diff
AlexsLemonade__refinebio-3299
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Cache Docker Images by Branch ### Context We want to be able to cache docker image layers that are created locally as testing artfacts locally to be used by github actions. The current prepare_images.sh does this but there was an issue with the definition for branch_name. We also don't want to remove support non-ccdl members developing locally. ![image](https://github.com/AlexsLemonade/refinebio/assets/1075609/e984ea3b-658e-41f4-ba61-7d652841e89e) ### Solution or next step - After #3285 is merged, we should set sensible defaults that can be overridden for external contributors. - Get current branch name or tag to be set when pushing images to ccdl(staging) repo. Determine: - If they don't have access to the docker repo should we just build locally and not push? - How long can docker tags be / are they compatible with our longer branch names. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `common/setup.py` Content: ``` 1 import os 2 3 from setuptools import find_packages, setup 4 5 # allow setup.py to be run from any path 6 os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir))) 7 8 VERSION_FILE = "version" 9 try: 10 with open(VERSION_FILE, "rt") as version_file: 11 version_string = version_file.read().strip().split("-")[0] 12 except OSError: 13 print( 14 "Cannot read version to determine System Version." 15 " Please create a file common/version containing an up to date System Version." 16 ) 17 raise 18 19 setup( 20 name="data-refinery-common", 21 version=version_string, 22 packages=find_packages(), 23 include_package_data=True, 24 # These values are based on what is in common/requirements.txt. 25 install_requires=[ 26 "boto3>=1.9.16", 27 "coverage>=4.5.1", 28 "daiquiri>=1.5.0", 29 "django>=3.2,<4", 30 "raven>=6.9.0", 31 "requests>=2.10.1", 32 "retrying>=1.3.3", 33 "psycopg2-binary>=2.7.5", 34 ], 35 license="BSD License", 36 description="Common functionality to be shared between Data Refinery sub-projects.", 37 url="https://www.greenelab.com", 38 author="Kurt Wheeler", 39 author_email="[email protected]", 40 classifiers=[ 41 "Environment :: Web Environment", 42 "Framework :: Django", 43 "Intended Audience :: Developers", 44 "License :: OSI Approved :: BSD License", 45 "Operating System :: Ubuntu", 46 "Programming Language :: Python", 47 "Programming Language :: Python :: 3.5", 48 "Programming Language :: Python :: 3.6", 49 "Topic :: Internet :: WWW/HTTP", 50 ], 51 ) 52 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/common/setup.py b/common/setup.py --- a/common/setup.py +++ b/common/setup.py @@ -1,4 +1,6 @@ import os +import re +from datetime import datetime from setuptools import find_packages, setup @@ -11,11 +13,21 @@ version_string = version_file.read().strip().split("-")[0] except OSError: print( - "Cannot read version to determine System Version." - " Please create a file common/version containing an up to date System Version." + "Cannot read version file to determine system version. " + "Please create a file common/version containing an up to date system version." ) raise +version_re = re.compile( + r"^([1-9][0-9]*!)?(0|[1-9][0-9]*)" + "(\.(0|[1-9][0-9]*))*((a|b|rc)(0|[1-9][0-9]*))" + "?(\.post(0|[1-9][0-9]*))?(\.dev(0|[1-9][0-9]*))?$" +) +if not version_re.match(version_string): + # Generate version based on the datetime.now(): e.g., 2023.5.17.dev1684352560. + now = datetime.now() + version_string = f"{now.strftime('%Y.%-m.%-d.dev')}{int(datetime.timestamp(now))}" + setup( name="data-refinery-common", version=version_string,
{"golden_diff": "diff --git a/common/setup.py b/common/setup.py\n--- a/common/setup.py\n+++ b/common/setup.py\n@@ -1,4 +1,6 @@\n import os\n+import re\n+from datetime import datetime\n \n from setuptools import find_packages, setup\n \n@@ -11,11 +13,21 @@\n version_string = version_file.read().strip().split(\"-\")[0]\n except OSError:\n print(\n- \"Cannot read version to determine System Version.\"\n- \" Please create a file common/version containing an up to date System Version.\"\n+ \"Cannot read version file to determine system version. \"\n+ \"Please create a file common/version containing an up to date system version.\"\n )\n raise\n \n+version_re = re.compile(\n+ r\"^([1-9][0-9]*!)?(0|[1-9][0-9]*)\"\n+ \"(\\.(0|[1-9][0-9]*))*((a|b|rc)(0|[1-9][0-9]*))\"\n+ \"?(\\.post(0|[1-9][0-9]*))?(\\.dev(0|[1-9][0-9]*))?$\"\n+)\n+if not version_re.match(version_string):\n+ # Generate version based on the datetime.now(): e.g., 2023.5.17.dev1684352560.\n+ now = datetime.now()\n+ version_string = f\"{now.strftime('%Y.%-m.%-d.dev')}{int(datetime.timestamp(now))}\"\n+\n setup(\n name=\"data-refinery-common\",\n version=version_string,\n", "issue": "Cache Docker Images by Branch\n### Context\r\n\r\nWe want to be able to cache docker image layers that are created locally as testing artfacts locally to be used by github actions.\r\nThe current prepare_images.sh does this but there was an issue with the definition for branch_name.\r\nWe also don't want to remove support non-ccdl members developing locally.\r\n\r\n\r\n![image](https://github.com/AlexsLemonade/refinebio/assets/1075609/e984ea3b-658e-41f4-ba61-7d652841e89e)\r\n\r\n\r\n### Solution or next step\r\n\r\n- After #3285 is merged, we should set sensible defaults that can be overridden for external contributors.\r\n- Get current branch name or tag to be set when pushing images to ccdl(staging) repo.\r\n\r\nDetermine:\r\n- If they don't have access to the docker repo should we just build locally and not push?\r\n- How long can docker tags be / are they compatible with our longer branch names.\r\n\n", "before_files": [{"content": "import os\n\nfrom setuptools import find_packages, setup\n\n# allow setup.py to be run from any path\nos.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))\n\nVERSION_FILE = \"version\"\ntry:\n with open(VERSION_FILE, \"rt\") as version_file:\n version_string = version_file.read().strip().split(\"-\")[0]\nexcept OSError:\n print(\n \"Cannot read version to determine System Version.\"\n \" Please create a file common/version containing an up to date System Version.\"\n )\n raise\n\nsetup(\n name=\"data-refinery-common\",\n version=version_string,\n packages=find_packages(),\n include_package_data=True,\n # These values are based on what is in common/requirements.txt.\n install_requires=[\n \"boto3>=1.9.16\",\n \"coverage>=4.5.1\",\n \"daiquiri>=1.5.0\",\n \"django>=3.2,<4\",\n \"raven>=6.9.0\",\n \"requests>=2.10.1\",\n \"retrying>=1.3.3\",\n \"psycopg2-binary>=2.7.5\",\n ],\n license=\"BSD License\",\n description=\"Common functionality to be shared between Data Refinery sub-projects.\",\n url=\"https://www.greenelab.com\",\n author=\"Kurt Wheeler\",\n author_email=\"[email protected]\",\n classifiers=[\n \"Environment :: Web Environment\",\n \"Framework :: Django\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: Ubuntu\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n)\n", "path": "common/setup.py"}], "after_files": [{"content": "import os\nimport re\nfrom datetime import datetime\n\nfrom setuptools import find_packages, setup\n\n# allow setup.py to be run from any path\nos.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))\n\nVERSION_FILE = \"version\"\ntry:\n with open(VERSION_FILE, \"rt\") as version_file:\n version_string = version_file.read().strip().split(\"-\")[0]\nexcept OSError:\n print(\n \"Cannot read version file to determine system version. \"\n \"Please create a file common/version containing an up to date system version.\"\n )\n raise\n\nversion_re = re.compile(\n r\"^([1-9][0-9]*!)?(0|[1-9][0-9]*)\"\n \"(\\.(0|[1-9][0-9]*))*((a|b|rc)(0|[1-9][0-9]*))\"\n \"?(\\.post(0|[1-9][0-9]*))?(\\.dev(0|[1-9][0-9]*))?$\"\n)\nif not version_re.match(version_string):\n # Generate version based on the datetime.now(): e.g., 2023.5.17.dev1684352560.\n now = datetime.now()\n version_string = f\"{now.strftime('%Y.%-m.%-d.dev')}{int(datetime.timestamp(now))}\"\n\nsetup(\n name=\"data-refinery-common\",\n version=version_string,\n packages=find_packages(),\n include_package_data=True,\n # These values are based on what is in common/requirements.txt.\n install_requires=[\n \"boto3>=1.9.16\",\n \"coverage>=4.5.1\",\n \"daiquiri>=1.5.0\",\n \"django>=3.2,<4\",\n \"raven>=6.9.0\",\n \"requests>=2.10.1\",\n \"retrying>=1.3.3\",\n \"psycopg2-binary>=2.7.5\",\n ],\n license=\"BSD License\",\n description=\"Common functionality to be shared between Data Refinery sub-projects.\",\n url=\"https://www.greenelab.com\",\n author=\"Kurt Wheeler\",\n author_email=\"[email protected]\",\n classifiers=[\n \"Environment :: Web Environment\",\n \"Framework :: Django\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: Ubuntu\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n)\n", "path": "common/setup.py"}]}
972
353
gh_patches_debug_25112
rasdani/github-patches
git_diff
scoutapp__scout_apm_python-668
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- capture_backtrace raises AttributeError on PEP-420 namespace packages The new `capture_backtrace` function in `scout_apm.core.backtrace` raises an AttributeError when the stack includes a [PEP-420] namespace package. This is caused by the [`module_filepath` function](https://github.com/scoutapp/scout_apm_python/blob/v2.21.0/src/scout_apm/core/backtrace.py#L26-L33), specifically line 32: ```python module_dir = sys.modules[root_module].__file__.rsplit(os.sep, 2)[0] ``` If `sys.modules[root_module]` is a [PEP-420] namespace package, this will raise ``` AttributeError: 'NoneType' object has no attribute 'rsplit' ``` ### Steps to reproduce Create a namespace package, with some modules inside, e.g.: ``` namespace/ foo/ __init__.py bar/ __init__.py ``` Then on an interactive Python shell: ``` >>> from scout_apm.core.backtrace import module_filepath >>> from namespace import foo >>> module_filepath("namespace.foo", "namespace") Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/jack/venvs/tmp-a17ac7185189989/lib/python3.8/site-packages/scout_apm/core/backtrace.py", line 32, in module_filepath module_dir = sys.modules[root_module].__file__.rsplit(os.sep, 2)[0] AttributeError: 'NoneType' object has no attribute 'rsplit' ``` ### Details - Tested with version 2.21.0 - Current workaround is to pin version to 2.20.0 [PEP-420]: https://www.python.org/dev/peps/pep-0420/ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/scout_apm/core/backtrace.py` Content: ``` 1 # coding=utf-8 2 from __future__ import absolute_import, division, print_function, unicode_literals 3 4 import itertools 5 import os 6 import sys 7 import sysconfig 8 import traceback 9 import warnings 10 11 # Maximum non-Scout frames to target retrieving 12 LIMIT = 50 13 # How many upper frames from inside Scout to ignore 14 IGNORED = 1 15 16 17 def filter_frames(frames): 18 """Filter the stack trace frames down to non-library code.""" 19 paths = sysconfig.get_paths() 20 library_paths = {paths["purelib"], paths["platlib"]} 21 for frame in frames: 22 if not any(frame["file"].startswith(exclusion) for exclusion in library_paths): 23 yield frame 24 25 26 def module_filepath(module, filepath): 27 """Get the filepath relative to the base module.""" 28 root_module = module.split(".", 1)[0] 29 if root_module == module: 30 return os.path.basename(filepath) 31 32 module_dir = sys.modules[root_module].__file__.rsplit(os.sep, 2)[0] 33 return filepath.split(module_dir, 1)[-1].lstrip(os.sep) 34 35 36 def filepath(frame): 37 """Get the filepath for frame.""" 38 module = frame.f_globals.get("__name__", None) 39 filepath = frame.f_code.co_filename 40 41 if filepath.endswith(".pyc"): 42 filepath = filepath[:-1] 43 44 if not module: 45 return filepath 46 return module_filepath(module, filepath) 47 48 49 if sys.version_info >= (3, 5): 50 51 def stacktrace_walker(tb): 52 """Iterate over each frame of the stack downards for exceptions.""" 53 for frame, lineno in traceback.walk_tb(tb): 54 name = frame.f_code.co_name 55 yield {"file": filepath(frame), "line": lineno, "function": name} 56 57 def backtrace_walker(): 58 """Iterate over each frame of the stack upwards. 59 60 Taken from python3/traceback.ExtractSummary.extract to support 61 iterating over the entire stack, but without creating a large 62 data structure. 63 """ 64 start_frame = sys._getframe().f_back 65 for frame, lineno in traceback.walk_stack(start_frame): 66 name = frame.f_code.co_name 67 yield {"file": filepath(frame), "line": lineno, "function": name} 68 69 70 else: 71 72 def stacktrace_walker(tb): 73 """Iterate over each frame of the stack downards for exceptions.""" 74 while tb is not None: 75 lineno = tb.tb_lineno 76 name = tb.tb_frame.f_code.co_name 77 yield { 78 "file": filepath(tb.tb_frame), 79 "line": lineno, 80 "function": name, 81 } 82 tb = tb.tb_next 83 84 def backtrace_walker(): 85 """Iterate over each frame of the stack upwards. 86 87 Taken from python2.7/traceback.extract_stack to support iterating 88 over the entire stack, but without creating a large data structure. 89 """ 90 try: 91 raise ZeroDivisionError 92 except ZeroDivisionError: 93 # Get the current frame 94 frame = sys.exc_info()[2].tb_frame.f_back 95 96 while frame is not None: 97 lineno = frame.f_lineno 98 name = frame.f_code.co_name 99 yield {"file": filepath(frame), "line": lineno, "function": name} 100 frame = frame.f_back 101 102 103 def capture_backtrace(): 104 walker = filter_frames(backtrace_walker()) 105 return list(itertools.islice(walker, LIMIT)) 106 107 108 def capture_stacktrace(tb): 109 walker = stacktrace_walker(tb) 110 return list(reversed(list(itertools.islice(walker, LIMIT)))) 111 112 113 def capture(): 114 warnings.warn( 115 "capture is deprecated, instead use capture_backtrace instead.", 116 DeprecationWarning, 117 2, 118 ) 119 return capture_backtrace() 120 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/scout_apm/core/backtrace.py b/src/scout_apm/core/backtrace.py --- a/src/scout_apm/core/backtrace.py +++ b/src/scout_apm/core/backtrace.py @@ -7,6 +7,9 @@ import sysconfig import traceback import warnings +from logging import getLogger + +logger = getLogger(__name__) # Maximum non-Scout frames to target retrieving LIMIT = 50 @@ -25,11 +28,25 @@ def module_filepath(module, filepath): """Get the filepath relative to the base module.""" - root_module = module.split(".", 1)[0] - if root_module == module: + root_module_name = module.split(".", 1)[0] + if root_module_name == module: return os.path.basename(filepath) - module_dir = sys.modules[root_module].__file__.rsplit(os.sep, 2)[0] + root_module = sys.modules[root_module_name] + if root_module.__file__: + module_dir = root_module.__file__.rsplit(os.sep, 2)[0] + elif root_module.__path__: + # Default to using the first path specified for the module. + module_dir = root_module.__path__[0].rsplit(os.sep, 1)[0] + if len(root_module.__path__) > 1: + logger.debug( + "{} has {} paths. Use the first and ignore the rest.".format( + root_module, len(root_module.__path__) + ) + ) + else: + # If the file path don't exist, then return the full path. + return filepath return filepath.split(module_dir, 1)[-1].lstrip(os.sep)
{"golden_diff": "diff --git a/src/scout_apm/core/backtrace.py b/src/scout_apm/core/backtrace.py\n--- a/src/scout_apm/core/backtrace.py\n+++ b/src/scout_apm/core/backtrace.py\n@@ -7,6 +7,9 @@\n import sysconfig\n import traceback\n import warnings\n+from logging import getLogger\n+\n+logger = getLogger(__name__)\n \n # Maximum non-Scout frames to target retrieving\n LIMIT = 50\n@@ -25,11 +28,25 @@\n \n def module_filepath(module, filepath):\n \"\"\"Get the filepath relative to the base module.\"\"\"\n- root_module = module.split(\".\", 1)[0]\n- if root_module == module:\n+ root_module_name = module.split(\".\", 1)[0]\n+ if root_module_name == module:\n return os.path.basename(filepath)\n \n- module_dir = sys.modules[root_module].__file__.rsplit(os.sep, 2)[0]\n+ root_module = sys.modules[root_module_name]\n+ if root_module.__file__:\n+ module_dir = root_module.__file__.rsplit(os.sep, 2)[0]\n+ elif root_module.__path__:\n+ # Default to using the first path specified for the module.\n+ module_dir = root_module.__path__[0].rsplit(os.sep, 1)[0]\n+ if len(root_module.__path__) > 1:\n+ logger.debug(\n+ \"{} has {} paths. Use the first and ignore the rest.\".format(\n+ root_module, len(root_module.__path__)\n+ )\n+ )\n+ else:\n+ # If the file path don't exist, then return the full path.\n+ return filepath\n return filepath.split(module_dir, 1)[-1].lstrip(os.sep)\n", "issue": "capture_backtrace raises AttributeError on PEP-420 namespace packages\nThe new `capture_backtrace` function in `scout_apm.core.backtrace` raises an AttributeError when the stack includes a [PEP-420] namespace package.\r\n\r\nThis is caused by the [`module_filepath` function](https://github.com/scoutapp/scout_apm_python/blob/v2.21.0/src/scout_apm/core/backtrace.py#L26-L33), specifically line 32:\r\n\r\n```python\r\n module_dir = sys.modules[root_module].__file__.rsplit(os.sep, 2)[0]\r\n```\r\n\r\nIf `sys.modules[root_module]` is a [PEP-420] namespace package, this will raise\r\n```\r\nAttributeError: 'NoneType' object has no attribute 'rsplit'\r\n```\r\n\r\n### Steps to reproduce\r\n\r\nCreate a namespace package, with some modules inside, e.g.:\r\n```\r\nnamespace/\r\n foo/\r\n __init__.py\r\n bar/\r\n __init__.py\r\n```\r\n\r\nThen on an interactive Python shell:\r\n\r\n```\r\n>>> from scout_apm.core.backtrace import module_filepath\r\n>>> from namespace import foo\r\n>>> module_filepath(\"namespace.foo\", \"namespace\")\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/jack/venvs/tmp-a17ac7185189989/lib/python3.8/site-packages/scout_apm/core/backtrace.py\", line 32, in module_filepath\r\n module_dir = sys.modules[root_module].__file__.rsplit(os.sep, 2)[0]\r\nAttributeError: 'NoneType' object has no attribute 'rsplit'\r\n```\r\n\r\n### Details\r\n\r\n- Tested with version 2.21.0\r\n- Current workaround is to pin version to 2.20.0\r\n\r\n[PEP-420]: https://www.python.org/dev/peps/pep-0420/\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport itertools\nimport os\nimport sys\nimport sysconfig\nimport traceback\nimport warnings\n\n# Maximum non-Scout frames to target retrieving\nLIMIT = 50\n# How many upper frames from inside Scout to ignore\nIGNORED = 1\n\n\ndef filter_frames(frames):\n \"\"\"Filter the stack trace frames down to non-library code.\"\"\"\n paths = sysconfig.get_paths()\n library_paths = {paths[\"purelib\"], paths[\"platlib\"]}\n for frame in frames:\n if not any(frame[\"file\"].startswith(exclusion) for exclusion in library_paths):\n yield frame\n\n\ndef module_filepath(module, filepath):\n \"\"\"Get the filepath relative to the base module.\"\"\"\n root_module = module.split(\".\", 1)[0]\n if root_module == module:\n return os.path.basename(filepath)\n\n module_dir = sys.modules[root_module].__file__.rsplit(os.sep, 2)[0]\n return filepath.split(module_dir, 1)[-1].lstrip(os.sep)\n\n\ndef filepath(frame):\n \"\"\"Get the filepath for frame.\"\"\"\n module = frame.f_globals.get(\"__name__\", None)\n filepath = frame.f_code.co_filename\n\n if filepath.endswith(\".pyc\"):\n filepath = filepath[:-1]\n\n if not module:\n return filepath\n return module_filepath(module, filepath)\n\n\nif sys.version_info >= (3, 5):\n\n def stacktrace_walker(tb):\n \"\"\"Iterate over each frame of the stack downards for exceptions.\"\"\"\n for frame, lineno in traceback.walk_tb(tb):\n name = frame.f_code.co_name\n yield {\"file\": filepath(frame), \"line\": lineno, \"function\": name}\n\n def backtrace_walker():\n \"\"\"Iterate over each frame of the stack upwards.\n\n Taken from python3/traceback.ExtractSummary.extract to support\n iterating over the entire stack, but without creating a large\n data structure.\n \"\"\"\n start_frame = sys._getframe().f_back\n for frame, lineno in traceback.walk_stack(start_frame):\n name = frame.f_code.co_name\n yield {\"file\": filepath(frame), \"line\": lineno, \"function\": name}\n\n\nelse:\n\n def stacktrace_walker(tb):\n \"\"\"Iterate over each frame of the stack downards for exceptions.\"\"\"\n while tb is not None:\n lineno = tb.tb_lineno\n name = tb.tb_frame.f_code.co_name\n yield {\n \"file\": filepath(tb.tb_frame),\n \"line\": lineno,\n \"function\": name,\n }\n tb = tb.tb_next\n\n def backtrace_walker():\n \"\"\"Iterate over each frame of the stack upwards.\n\n Taken from python2.7/traceback.extract_stack to support iterating\n over the entire stack, but without creating a large data structure.\n \"\"\"\n try:\n raise ZeroDivisionError\n except ZeroDivisionError:\n # Get the current frame\n frame = sys.exc_info()[2].tb_frame.f_back\n\n while frame is not None:\n lineno = frame.f_lineno\n name = frame.f_code.co_name\n yield {\"file\": filepath(frame), \"line\": lineno, \"function\": name}\n frame = frame.f_back\n\n\ndef capture_backtrace():\n walker = filter_frames(backtrace_walker())\n return list(itertools.islice(walker, LIMIT))\n\n\ndef capture_stacktrace(tb):\n walker = stacktrace_walker(tb)\n return list(reversed(list(itertools.islice(walker, LIMIT))))\n\n\ndef capture():\n warnings.warn(\n \"capture is deprecated, instead use capture_backtrace instead.\",\n DeprecationWarning,\n 2,\n )\n return capture_backtrace()\n", "path": "src/scout_apm/core/backtrace.py"}], "after_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport itertools\nimport os\nimport sys\nimport sysconfig\nimport traceback\nimport warnings\nfrom logging import getLogger\n\nlogger = getLogger(__name__)\n\n# Maximum non-Scout frames to target retrieving\nLIMIT = 50\n# How many upper frames from inside Scout to ignore\nIGNORED = 1\n\n\ndef filter_frames(frames):\n \"\"\"Filter the stack trace frames down to non-library code.\"\"\"\n paths = sysconfig.get_paths()\n library_paths = {paths[\"purelib\"], paths[\"platlib\"]}\n for frame in frames:\n if not any(frame[\"file\"].startswith(exclusion) for exclusion in library_paths):\n yield frame\n\n\ndef module_filepath(module, filepath):\n \"\"\"Get the filepath relative to the base module.\"\"\"\n root_module_name = module.split(\".\", 1)[0]\n if root_module_name == module:\n return os.path.basename(filepath)\n\n root_module = sys.modules[root_module_name]\n if root_module.__file__:\n module_dir = root_module.__file__.rsplit(os.sep, 2)[0]\n elif root_module.__path__:\n # Default to using the first path specified for the module.\n module_dir = root_module.__path__[0].rsplit(os.sep, 1)[0]\n if len(root_module.__path__) > 1:\n logger.debug(\n \"{} has {} paths. Use the first and ignore the rest.\".format(\n root_module, len(root_module.__path__)\n )\n )\n else:\n # If the file path don't exist, then return the full path.\n return filepath\n return filepath.split(module_dir, 1)[-1].lstrip(os.sep)\n\n\ndef filepath(frame):\n \"\"\"Get the filepath for frame.\"\"\"\n module = frame.f_globals.get(\"__name__\", None)\n filepath = frame.f_code.co_filename\n\n if filepath.endswith(\".pyc\"):\n filepath = filepath[:-1]\n\n if not module:\n return filepath\n return module_filepath(module, filepath)\n\n\nif sys.version_info >= (3, 5):\n\n def stacktrace_walker(tb):\n \"\"\"Iterate over each frame of the stack downards for exceptions.\"\"\"\n for frame, lineno in traceback.walk_tb(tb):\n name = frame.f_code.co_name\n yield {\"file\": filepath(frame), \"line\": lineno, \"function\": name}\n\n def backtrace_walker():\n \"\"\"Iterate over each frame of the stack upwards.\n\n Taken from python3/traceback.ExtractSummary.extract to support\n iterating over the entire stack, but without creating a large\n data structure.\n \"\"\"\n start_frame = sys._getframe().f_back\n for frame, lineno in traceback.walk_stack(start_frame):\n name = frame.f_code.co_name\n yield {\"file\": filepath(frame), \"line\": lineno, \"function\": name}\n\n\nelse:\n\n def stacktrace_walker(tb):\n \"\"\"Iterate over each frame of the stack downards for exceptions.\"\"\"\n while tb is not None:\n lineno = tb.tb_lineno\n name = tb.tb_frame.f_code.co_name\n yield {\n \"file\": filepath(tb.tb_frame),\n \"line\": lineno,\n \"function\": name,\n }\n tb = tb.tb_next\n\n def backtrace_walker():\n \"\"\"Iterate over each frame of the stack upwards.\n\n Taken from python2.7/traceback.extract_stack to support iterating\n over the entire stack, but without creating a large data structure.\n \"\"\"\n try:\n raise ZeroDivisionError\n except ZeroDivisionError:\n # Get the current frame\n frame = sys.exc_info()[2].tb_frame.f_back\n\n while frame is not None:\n lineno = frame.f_lineno\n name = frame.f_code.co_name\n yield {\"file\": filepath(frame), \"line\": lineno, \"function\": name}\n frame = frame.f_back\n\n\ndef capture_backtrace():\n walker = filter_frames(backtrace_walker())\n return list(itertools.islice(walker, LIMIT))\n\n\ndef capture_stacktrace(tb):\n walker = stacktrace_walker(tb)\n return list(reversed(list(itertools.islice(walker, LIMIT))))\n\n\ndef capture():\n warnings.warn(\n \"capture is deprecated, instead use capture_backtrace instead.\",\n DeprecationWarning,\n 2,\n )\n return capture_backtrace()\n", "path": "src/scout_apm/core/backtrace.py"}]}
1,748
388
gh_patches_debug_14653
rasdani/github-patches
git_diff
conda__conda-4327
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Channels in centrally installed .condarc file are being ignored in conda 4.3.4 Hi, I am testing a centrally installed Anaconda setup with Anaconda installed under `C:\Program Files\Anaconda3`. I have a condarc file under `C:\Program Files\Anaconda3\.condarc`. When I run `conda info` it tells me that my config file is under the correct location. config file : C:\Program Files\Anaconda3\.condarc I have configured a few custom channels in this `.condarc` file, e.g.: channels: - http://some.internal/url I can also use `conda config --system --add channels http://some.internal/url` to set this value and conda tells me that channels already contains this value. But when I run `conda config --system --show`, the list of channels is always set to: channels: - defaults It seems that the list of channels in the central `.condarc` file is completely ignored and always replaced by `defaults`. I have also tried to set the list of `default_channels` in the central `.condarc` file but without success. Using conda 4.3.4 on win-64. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `conda/__init__.py` Content: ``` 1 # (c) 2012-2016 Continuum Analytics, Inc. / http://continuum.io 2 # All Rights Reserved 3 # 4 # conda is distributed under the terms of the BSD 3-clause license. 5 # Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause. 6 """OS-agnostic, system-level binary package manager.""" 7 from __future__ import absolute_import, division, print_function, unicode_literals 8 9 from os.path import dirname 10 11 from ._vendor.auxlib.packaging import get_version 12 from .common.compat import iteritems, text_type 13 14 __all__ = [ 15 "__name__", "__version__", "__author__", 16 "__email__", "__license__", "__copyright__", 17 "__summary__", "__url__", 18 ] 19 20 __name__ = "conda" 21 __version__ = get_version(__file__) 22 __author__ = "Continuum Analytics, Inc." 23 __email__ = "[email protected]" 24 __license__ = "BSD" 25 __summary__ = __doc__ 26 __url__ = "https://github.com/conda/conda" 27 28 CONDA_PACKAGE_ROOT = dirname(__file__) 29 30 31 class CondaError(Exception): 32 def __init__(self, message, **kwargs): 33 self.message = message 34 self._kwargs = kwargs 35 super(CondaError, self).__init__(message) 36 37 def __repr__(self): 38 return '%s: %s\n' % (self.__class__.__name__, text_type(self)) 39 40 def __str__(self): 41 return text_type(self.message % self._kwargs) 42 43 def dump_map(self): 44 result = dict((k, v) for k, v in iteritems(vars(self)) if not k.startswith('_')) 45 result.update(exception_type=text_type(type(self)), 46 exception_name=self.__class__.__name__, 47 message=text_type(self), 48 error=repr(self), 49 **self._kwargs) 50 return result 51 52 53 class CondaMultiError(CondaError): 54 55 def __init__(self, errors): 56 self.errors = errors 57 super(CondaError, self).__init__(None) 58 59 def __repr__(self): 60 return '\n'.join(repr(e) for e in self.errors) + '\n' 61 62 def __str__(self): 63 return '\n'.join(text_type(e) for e in self.errors) + '\n' 64 65 def dump_map(self): 66 return dict(exception_type=text_type(type(self)), 67 exception_name=self.__class__.__name__, 68 errors=tuple(error.dump_map() for error in self.errors), 69 error="Multiple Errors Encountered.", 70 ) 71 72 73 class CondaExitZero(CondaError): 74 pass 75 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/conda/__init__.py b/conda/__init__.py --- a/conda/__init__.py +++ b/conda/__init__.py @@ -6,7 +6,9 @@ """OS-agnostic, system-level binary package manager.""" from __future__ import absolute_import, division, print_function, unicode_literals +import os from os.path import dirname +import sys from ._vendor.auxlib.packaging import get_version from .common.compat import iteritems, text_type @@ -25,6 +27,10 @@ __summary__ = __doc__ __url__ = "https://github.com/conda/conda" + +if os.getenv('CONDA_ROOT') is None: + os.environ['CONDA_ROOT'] = sys.prefix + CONDA_PACKAGE_ROOT = dirname(__file__)
{"golden_diff": "diff --git a/conda/__init__.py b/conda/__init__.py\n--- a/conda/__init__.py\n+++ b/conda/__init__.py\n@@ -6,7 +6,9 @@\n \"\"\"OS-agnostic, system-level binary package manager.\"\"\"\n from __future__ import absolute_import, division, print_function, unicode_literals\n \n+import os\n from os.path import dirname\n+import sys\n \n from ._vendor.auxlib.packaging import get_version\n from .common.compat import iteritems, text_type\n@@ -25,6 +27,10 @@\n __summary__ = __doc__\n __url__ = \"https://github.com/conda/conda\"\n \n+\n+if os.getenv('CONDA_ROOT') is None:\n+ os.environ['CONDA_ROOT'] = sys.prefix\n+\n CONDA_PACKAGE_ROOT = dirname(__file__)\n", "issue": "Channels in centrally installed .condarc file are being ignored in conda 4.3.4\nHi, I am testing a centrally installed Anaconda setup with Anaconda installed under `C:\\Program Files\\Anaconda3`. I have a condarc file under `C:\\Program Files\\Anaconda3\\.condarc`.\r\n\r\nWhen I run `conda info` it tells me that my config file is under the correct location.\r\n\r\n config file : C:\\Program Files\\Anaconda3\\.condarc\r\n\r\nI have configured a few custom channels in this `.condarc` file, e.g.:\r\n\r\n channels:\r\n - http://some.internal/url\r\n\r\nI can also use `conda config --system --add channels http://some.internal/url` to set this value and conda tells me that channels already contains this value.\r\n\r\nBut when I run `conda config --system --show`, the list of channels is always set to:\r\n\r\n channels:\r\n - defaults\r\n\r\nIt seems that the list of channels in the central `.condarc` file is completely ignored and always replaced by `defaults`. I have also tried to set the list of `default_channels` in the central `.condarc` file but without success.\r\n\r\nUsing conda 4.3.4 on win-64.\r\n\n", "before_files": [{"content": "# (c) 2012-2016 Continuum Analytics, Inc. / http://continuum.io\n# All Rights Reserved\n#\n# conda is distributed under the terms of the BSD 3-clause license.\n# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\n\"\"\"OS-agnostic, system-level binary package manager.\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom os.path import dirname\n\nfrom ._vendor.auxlib.packaging import get_version\nfrom .common.compat import iteritems, text_type\n\n__all__ = [\n \"__name__\", \"__version__\", \"__author__\",\n \"__email__\", \"__license__\", \"__copyright__\",\n \"__summary__\", \"__url__\",\n]\n\n__name__ = \"conda\"\n__version__ = get_version(__file__)\n__author__ = \"Continuum Analytics, Inc.\"\n__email__ = \"[email protected]\"\n__license__ = \"BSD\"\n__summary__ = __doc__\n__url__ = \"https://github.com/conda/conda\"\n\nCONDA_PACKAGE_ROOT = dirname(__file__)\n\n\nclass CondaError(Exception):\n def __init__(self, message, **kwargs):\n self.message = message\n self._kwargs = kwargs\n super(CondaError, self).__init__(message)\n\n def __repr__(self):\n return '%s: %s\\n' % (self.__class__.__name__, text_type(self))\n\n def __str__(self):\n return text_type(self.message % self._kwargs)\n\n def dump_map(self):\n result = dict((k, v) for k, v in iteritems(vars(self)) if not k.startswith('_'))\n result.update(exception_type=text_type(type(self)),\n exception_name=self.__class__.__name__,\n message=text_type(self),\n error=repr(self),\n **self._kwargs)\n return result\n\n\nclass CondaMultiError(CondaError):\n\n def __init__(self, errors):\n self.errors = errors\n super(CondaError, self).__init__(None)\n\n def __repr__(self):\n return '\\n'.join(repr(e) for e in self.errors) + '\\n'\n\n def __str__(self):\n return '\\n'.join(text_type(e) for e in self.errors) + '\\n'\n\n def dump_map(self):\n return dict(exception_type=text_type(type(self)),\n exception_name=self.__class__.__name__,\n errors=tuple(error.dump_map() for error in self.errors),\n error=\"Multiple Errors Encountered.\",\n )\n\n\nclass CondaExitZero(CondaError):\n pass\n", "path": "conda/__init__.py"}], "after_files": [{"content": "# (c) 2012-2016 Continuum Analytics, Inc. / http://continuum.io\n# All Rights Reserved\n#\n# conda is distributed under the terms of the BSD 3-clause license.\n# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\n\"\"\"OS-agnostic, system-level binary package manager.\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nfrom os.path import dirname\nimport sys\n\nfrom ._vendor.auxlib.packaging import get_version\nfrom .common.compat import iteritems, text_type\n\n__all__ = [\n \"__name__\", \"__version__\", \"__author__\",\n \"__email__\", \"__license__\", \"__copyright__\",\n \"__summary__\", \"__url__\",\n]\n\n__name__ = \"conda\"\n__version__ = get_version(__file__)\n__author__ = \"Continuum Analytics, Inc.\"\n__email__ = \"[email protected]\"\n__license__ = \"BSD\"\n__summary__ = __doc__\n__url__ = \"https://github.com/conda/conda\"\n\n\nif os.getenv('CONDA_ROOT') is None:\n os.environ['CONDA_ROOT'] = sys.prefix\n\nCONDA_PACKAGE_ROOT = dirname(__file__)\n\n\nclass CondaError(Exception):\n def __init__(self, message, **kwargs):\n self.message = message\n self._kwargs = kwargs\n super(CondaError, self).__init__(message)\n\n def __repr__(self):\n return '%s: %s\\n' % (self.__class__.__name__, text_type(self))\n\n def __str__(self):\n return text_type(self.message % self._kwargs)\n\n def dump_map(self):\n result = dict((k, v) for k, v in iteritems(vars(self)) if not k.startswith('_'))\n result.update(exception_type=text_type(type(self)),\n exception_name=self.__class__.__name__,\n message=text_type(self),\n error=repr(self),\n **self._kwargs)\n return result\n\n\nclass CondaMultiError(CondaError):\n\n def __init__(self, errors):\n self.errors = errors\n super(CondaError, self).__init__(None)\n\n def __repr__(self):\n return '\\n'.join(repr(e) for e in self.errors) + '\\n'\n\n def __str__(self):\n return '\\n'.join(text_type(e) for e in self.errors) + '\\n'\n\n def dump_map(self):\n return dict(exception_type=text_type(type(self)),\n exception_name=self.__class__.__name__,\n errors=tuple(error.dump_map() for error in self.errors),\n error=\"Multiple Errors Encountered.\",\n )\n\n\nclass CondaExitZero(CondaError):\n pass\n", "path": "conda/__init__.py"}]}
1,236
181
gh_patches_debug_19684
rasdani/github-patches
git_diff
Azure__azure-cli-extensions-2985
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- The parameter for --administration-members is incorrectly stated as optional For the function 'az powerbi embedded-capacity create', the parameter for --administration-members is incorrectly stated as optional. If you leave this parameter out, it will give this error: **BadRequestError: At least one capacity administrator is required** --- #### Document Details ⚠ *Do not edit this section. It is required for docs.microsoft.com ➟ GitHub issue linking.* * ID: edf4a4a9-8ff1-c276-3e51-d5e83c180879 * Version Independent ID: de63a28e-4d16-2270-595f-1a67f5e682bd * Content: [az powerbi embedded-capacity](https://docs.microsoft.com/en-us/cli/azure/ext/powerbidedicated/powerbi/embedded-capacity?view=azure-cli-latest) * Content Source: [latest/docs-ref-autogen/ext/powerbidedicated/powerbi/embedded-capacity.yml](https://github.com/MicrosoftDocs/azure-docs-cli/blob/master/latest/docs-ref-autogen/ext/powerbidedicated/powerbi/embedded-capacity.yml) * GitHub Login: @rloutlaw * Microsoft Alias: **routlaw** --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/powerbidedicated/azext_powerbidedicated/_params.py` Content: ``` 1 # -------------------------------------------------------------------------------------------- 2 # Copyright (c) Microsoft Corporation. All rights reserved. 3 # Licensed under the MIT License. See License.txt in the project root for license information. 4 # -------------------------------------------------------------------------------------------- 5 # pylint: disable=line-too-long 6 # pylint: disable=too-many-lines 7 # pylint: disable=too-many-statements 8 9 from knack.arguments import CLIArgumentType 10 11 from azure.cli.core.commands.parameters import ( 12 tags_type, 13 get_enum_type, 14 resource_group_name_type, 15 get_location_type 16 ) 17 18 19 def load_arguments(self, _): 20 name_type = CLIArgumentType( 21 options_list=['--name', '-n'], 22 help='The name of the Dedicated capacity. It must be at least 3 characters in length, and no more than 63.') 23 sku_name_type = CLIArgumentType( 24 arg_type=get_enum_type(['A1', 'A2', 'A3', 'A4', 'A5', 'A6']), 25 help='Name of the SKU level. For more information, please refer to ' 26 'https://azure.microsoft.com/en-us/pricing/details/power-bi-embedded/.' 27 ) 28 sku_tier_type = CLIArgumentType( 29 arg_type=get_enum_type(['PBIE_Azure']), 30 help='The name of the Azure pricing tier to which the SKU applies.' 31 ) 32 administration_type = CLIArgumentType( 33 help='An array of administrator user identities.', nargs='+' 34 ) 35 36 with self.argument_context('powerbi embedded-capacity') as c: 37 c.argument('resource_group_name', resource_group_name_type) 38 c.argument('name', name_type) 39 40 with self.argument_context('powerbi embedded-capacity create') as c: 41 c.argument('sku_name', sku_name_type) 42 c.argument('sku_tier', sku_tier_type) 43 c.argument('tags', tags_type) 44 c.argument('administration_members', administration_type) 45 c.argument('location', get_location_type(self.cli_ctx)) 46 47 with self.argument_context('powerbi embedded-capacity update') as c: 48 c.argument('sku_name', sku_name_type) 49 c.argument('sku_tier', sku_tier_type) 50 c.argument('tags', tags_type) 51 c.argument('administration_members', administration_type) 52 ``` Path: `src/powerbidedicated/setup.py` Content: ``` 1 #!/usr/bin/env python 2 3 # -------------------------------------------------------------------------------------------- 4 # Copyright (c) Microsoft Corporation. All rights reserved. 5 # Licensed under the MIT License. See License.txt in the project root for license information. 6 # -------------------------------------------------------------------------------------------- 7 8 9 from codecs import open 10 from setuptools import setup, find_packages 11 try: 12 from azure_bdist_wheel import cmdclass 13 except ImportError: 14 from distutils import log as logger 15 logger.warn("Wheel is not available, disabling bdist_wheel hook") 16 17 # TODO: Confirm this is the right version number you want and it matches your 18 # HISTORY.rst entry. 19 VERSION = '0.1.1' 20 21 # The full list of classifiers is available at 22 # https://pypi.python.org/pypi?%3Aaction=list_classifiers 23 CLASSIFIERS = [ 24 'Development Status :: 4 - Beta', 25 'Intended Audience :: Developers', 26 'Intended Audience :: System Administrators', 27 'Programming Language :: Python', 28 'Programming Language :: Python :: 2', 29 'Programming Language :: Python :: 2.7', 30 'Programming Language :: Python :: 3', 31 'Programming Language :: Python :: 3.4', 32 'Programming Language :: Python :: 3.5', 33 'Programming Language :: Python :: 3.6', 34 'License :: OSI Approved :: MIT License', 35 ] 36 37 # TODO: Add any additional SDK dependencies here 38 DEPENDENCIES = [] 39 40 with open('README.md', 'r', encoding='utf-8') as f: 41 README = f.read() 42 with open('HISTORY.rst', 'r', encoding='utf-8') as f: 43 HISTORY = f.read() 44 45 setup( 46 name='powerbidedicated', 47 version=VERSION, 48 description='Microsoft Azure Command-Line Tools PowerBIDedicated Extension', 49 # TODO: Update author and email, if applicable 50 author='Microsoft Corporation', 51 author_email='[email protected]', 52 url='https://github.com/Azure/azure-cli-extensions/tree/master/src/powerbidedicated', 53 long_description=README + '\n\n' + HISTORY, 54 license='MIT', 55 classifiers=CLASSIFIERS, 56 packages=find_packages(), 57 install_requires=DEPENDENCIES, 58 package_data={'azext_powerbidedicated': ['azext_metadata.json']}, 59 ) 60 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/powerbidedicated/azext_powerbidedicated/_params.py b/src/powerbidedicated/azext_powerbidedicated/_params.py --- a/src/powerbidedicated/azext_powerbidedicated/_params.py +++ b/src/powerbidedicated/azext_powerbidedicated/_params.py @@ -41,7 +41,7 @@ c.argument('sku_name', sku_name_type) c.argument('sku_tier', sku_tier_type) c.argument('tags', tags_type) - c.argument('administration_members', administration_type) + c.argument('administration_members', administration_type, required=True) c.argument('location', get_location_type(self.cli_ctx)) with self.argument_context('powerbi embedded-capacity update') as c: diff --git a/src/powerbidedicated/setup.py b/src/powerbidedicated/setup.py --- a/src/powerbidedicated/setup.py +++ b/src/powerbidedicated/setup.py @@ -16,7 +16,7 @@ # TODO: Confirm this is the right version number you want and it matches your # HISTORY.rst entry. -VERSION = '0.1.1' +VERSION = '0.2.0' # The full list of classifiers is available at # https://pypi.python.org/pypi?%3Aaction=list_classifiers
{"golden_diff": "diff --git a/src/powerbidedicated/azext_powerbidedicated/_params.py b/src/powerbidedicated/azext_powerbidedicated/_params.py\n--- a/src/powerbidedicated/azext_powerbidedicated/_params.py\n+++ b/src/powerbidedicated/azext_powerbidedicated/_params.py\n@@ -41,7 +41,7 @@\n c.argument('sku_name', sku_name_type)\n c.argument('sku_tier', sku_tier_type)\n c.argument('tags', tags_type)\n- c.argument('administration_members', administration_type)\n+ c.argument('administration_members', administration_type, required=True)\n c.argument('location', get_location_type(self.cli_ctx))\n \n with self.argument_context('powerbi embedded-capacity update') as c:\ndiff --git a/src/powerbidedicated/setup.py b/src/powerbidedicated/setup.py\n--- a/src/powerbidedicated/setup.py\n+++ b/src/powerbidedicated/setup.py\n@@ -16,7 +16,7 @@\n \n # TODO: Confirm this is the right version number you want and it matches your\n # HISTORY.rst entry.\n-VERSION = '0.1.1'\n+VERSION = '0.2.0'\n \n # The full list of classifiers is available at\n # https://pypi.python.org/pypi?%3Aaction=list_classifiers\n", "issue": "The parameter for --administration-members is incorrectly stated as optional \nFor the function 'az powerbi embedded-capacity create', the parameter for --administration-members is incorrectly stated as optional.\r\nIf you leave this parameter out, it will give this error:\r\n**BadRequestError: At least one capacity administrator is required**\r\n\r\n---\r\n#### Document Details\r\n\r\n\u26a0 *Do not edit this section. It is required for docs.microsoft.com \u279f GitHub issue linking.*\r\n\r\n* ID: edf4a4a9-8ff1-c276-3e51-d5e83c180879\r\n* Version Independent ID: de63a28e-4d16-2270-595f-1a67f5e682bd\r\n* Content: [az powerbi embedded-capacity](https://docs.microsoft.com/en-us/cli/azure/ext/powerbidedicated/powerbi/embedded-capacity?view=azure-cli-latest)\r\n* Content Source: [latest/docs-ref-autogen/ext/powerbidedicated/powerbi/embedded-capacity.yml](https://github.com/MicrosoftDocs/azure-docs-cli/blob/master/latest/docs-ref-autogen/ext/powerbidedicated/powerbi/embedded-capacity.yml)\r\n* GitHub Login: @rloutlaw\r\n* Microsoft Alias: **routlaw**\n", "before_files": [{"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n# pylint: disable=line-too-long\n# pylint: disable=too-many-lines\n# pylint: disable=too-many-statements\n\nfrom knack.arguments import CLIArgumentType\n\nfrom azure.cli.core.commands.parameters import (\n tags_type,\n get_enum_type,\n resource_group_name_type,\n get_location_type\n)\n\n\ndef load_arguments(self, _):\n name_type = CLIArgumentType(\n options_list=['--name', '-n'],\n help='The name of the Dedicated capacity. It must be at least 3 characters in length, and no more than 63.')\n sku_name_type = CLIArgumentType(\n arg_type=get_enum_type(['A1', 'A2', 'A3', 'A4', 'A5', 'A6']),\n help='Name of the SKU level. For more information, please refer to '\n 'https://azure.microsoft.com/en-us/pricing/details/power-bi-embedded/.'\n )\n sku_tier_type = CLIArgumentType(\n arg_type=get_enum_type(['PBIE_Azure']),\n help='The name of the Azure pricing tier to which the SKU applies.'\n )\n administration_type = CLIArgumentType(\n help='An array of administrator user identities.', nargs='+'\n )\n\n with self.argument_context('powerbi embedded-capacity') as c:\n c.argument('resource_group_name', resource_group_name_type)\n c.argument('name', name_type)\n\n with self.argument_context('powerbi embedded-capacity create') as c:\n c.argument('sku_name', sku_name_type)\n c.argument('sku_tier', sku_tier_type)\n c.argument('tags', tags_type)\n c.argument('administration_members', administration_type)\n c.argument('location', get_location_type(self.cli_ctx))\n\n with self.argument_context('powerbi embedded-capacity update') as c:\n c.argument('sku_name', sku_name_type)\n c.argument('sku_tier', sku_tier_type)\n c.argument('tags', tags_type)\n c.argument('administration_members', administration_type)\n", "path": "src/powerbidedicated/azext_powerbidedicated/_params.py"}, {"content": "#!/usr/bin/env python\n\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\n\nfrom codecs import open\nfrom setuptools import setup, find_packages\ntry:\n from azure_bdist_wheel import cmdclass\nexcept ImportError:\n from distutils import log as logger\n logger.warn(\"Wheel is not available, disabling bdist_wheel hook\")\n\n# TODO: Confirm this is the right version number you want and it matches your\n# HISTORY.rst entry.\nVERSION = '0.1.1'\n\n# The full list of classifiers is available at\n# https://pypi.python.org/pypi?%3Aaction=list_classifiers\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'License :: OSI Approved :: MIT License',\n]\n\n# TODO: Add any additional SDK dependencies here\nDEPENDENCIES = []\n\nwith open('README.md', 'r', encoding='utf-8') as f:\n README = f.read()\nwith open('HISTORY.rst', 'r', encoding='utf-8') as f:\n HISTORY = f.read()\n\nsetup(\n name='powerbidedicated',\n version=VERSION,\n description='Microsoft Azure Command-Line Tools PowerBIDedicated Extension',\n # TODO: Update author and email, if applicable\n author='Microsoft Corporation',\n author_email='[email protected]',\n url='https://github.com/Azure/azure-cli-extensions/tree/master/src/powerbidedicated',\n long_description=README + '\\n\\n' + HISTORY,\n license='MIT',\n classifiers=CLASSIFIERS,\n packages=find_packages(),\n install_requires=DEPENDENCIES,\n package_data={'azext_powerbidedicated': ['azext_metadata.json']},\n)\n", "path": "src/powerbidedicated/setup.py"}], "after_files": [{"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n# pylint: disable=line-too-long\n# pylint: disable=too-many-lines\n# pylint: disable=too-many-statements\n\nfrom knack.arguments import CLIArgumentType\n\nfrom azure.cli.core.commands.parameters import (\n tags_type,\n get_enum_type,\n resource_group_name_type,\n get_location_type\n)\n\n\ndef load_arguments(self, _):\n name_type = CLIArgumentType(\n options_list=['--name', '-n'],\n help='The name of the Dedicated capacity. It must be at least 3 characters in length, and no more than 63.')\n sku_name_type = CLIArgumentType(\n arg_type=get_enum_type(['A1', 'A2', 'A3', 'A4', 'A5', 'A6']),\n help='Name of the SKU level. For more information, please refer to '\n 'https://azure.microsoft.com/en-us/pricing/details/power-bi-embedded/.'\n )\n sku_tier_type = CLIArgumentType(\n arg_type=get_enum_type(['PBIE_Azure']),\n help='The name of the Azure pricing tier to which the SKU applies.'\n )\n administration_type = CLIArgumentType(\n help='An array of administrator user identities.', nargs='+'\n )\n\n with self.argument_context('powerbi embedded-capacity') as c:\n c.argument('resource_group_name', resource_group_name_type)\n c.argument('name', name_type)\n\n with self.argument_context('powerbi embedded-capacity create') as c:\n c.argument('sku_name', sku_name_type)\n c.argument('sku_tier', sku_tier_type)\n c.argument('tags', tags_type)\n c.argument('administration_members', administration_type, required=True)\n c.argument('location', get_location_type(self.cli_ctx))\n\n with self.argument_context('powerbi embedded-capacity update') as c:\n c.argument('sku_name', sku_name_type)\n c.argument('sku_tier', sku_tier_type)\n c.argument('tags', tags_type)\n c.argument('administration_members', administration_type)\n", "path": "src/powerbidedicated/azext_powerbidedicated/_params.py"}, {"content": "#!/usr/bin/env python\n\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\n\nfrom codecs import open\nfrom setuptools import setup, find_packages\ntry:\n from azure_bdist_wheel import cmdclass\nexcept ImportError:\n from distutils import log as logger\n logger.warn(\"Wheel is not available, disabling bdist_wheel hook\")\n\n# TODO: Confirm this is the right version number you want and it matches your\n# HISTORY.rst entry.\nVERSION = '0.2.0'\n\n# The full list of classifiers is available at\n# https://pypi.python.org/pypi?%3Aaction=list_classifiers\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'License :: OSI Approved :: MIT License',\n]\n\n# TODO: Add any additional SDK dependencies here\nDEPENDENCIES = []\n\nwith open('README.md', 'r', encoding='utf-8') as f:\n README = f.read()\nwith open('HISTORY.rst', 'r', encoding='utf-8') as f:\n HISTORY = f.read()\n\nsetup(\n name='powerbidedicated',\n version=VERSION,\n description='Microsoft Azure Command-Line Tools PowerBIDedicated Extension',\n # TODO: Update author and email, if applicable\n author='Microsoft Corporation',\n author_email='[email protected]',\n url='https://github.com/Azure/azure-cli-extensions/tree/master/src/powerbidedicated',\n long_description=README + '\\n\\n' + HISTORY,\n license='MIT',\n classifiers=CLASSIFIERS,\n packages=find_packages(),\n install_requires=DEPENDENCIES,\n package_data={'azext_powerbidedicated': ['azext_metadata.json']},\n)\n", "path": "src/powerbidedicated/setup.py"}]}
1,727
297
gh_patches_debug_10979
rasdani/github-patches
git_diff
bokeh__bokeh-10074
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [DOCS] Page wise display of documentation search **Is your feature request related to a problem?** Yes. I searched for a relatively simple query in the documentation search bar of https://docs.bokeh.org, and it took very long to load the results. In my second try, the results weren't even loading, I'm afraid. These are displayed in an unordered list which fills the entire page up. It might get frustrating to read through everything to find the answer to the input query. **Describe the solution you'd like** I would suggest displaying the fetched results in a page wise format, the way most search engines do it. Relevance weighted sorted answer, shown page wise. Fill up only the current page of about 20 to 30 odd query results, and depending on whether the user wants to see the other pages, load them. **Describe alternatives you've considered** If not a page wise result, a folder wise result would also benefit, which leaves the option to the user to navigate where he/she wants to. A custom google search may also help. **Additional context** ![issue](https://user-images.githubusercontent.com/43414361/82223644-88c83400-9940-11ea-9534-f96dc4ae3a6d.PNG) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sphinx/docserver.py` Content: ``` 1 import os 2 import sys 3 import threading 4 import time 5 import webbrowser 6 7 import flask 8 import tornado 9 from tornado.httpserver import HTTPServer 10 from tornado.ioloop import IOLoop 11 from tornado.wsgi import WSGIContainer 12 13 _basedir = os.path.join("..", os.path.dirname(__file__)) 14 15 app = flask.Flask(__name__, static_folder="/unused") 16 PORT=5009 17 http_server = HTTPServer(WSGIContainer(app)) 18 19 @app.route('/') 20 def welcome(): 21 return """ 22 <h1>Welcome to the Bokeh documentation server</h1> 23 You probably want to go to <a href="/en/latest/index.html"> Index</a> 24 """ 25 26 @app.route('/versions.json') 27 def send_versions(): 28 return flask.send_from_directory( 29 os.path.join(_basedir, "sphinx"), "test_versions.json") 30 31 @app.route('/alert.html') 32 def send_alert(): 33 return os.environ.get("BOKEH_DOCS_ALERT", "") 34 35 @app.route('/en/latest/<path:filename>') 36 def send_docs(filename): 37 return flask.send_from_directory( 38 os.path.join(_basedir, "sphinx/build/html/"), filename) 39 40 def open_browser(): 41 # Child process 42 time.sleep(0.5) 43 webbrowser.open("http://localhost:%d/en/latest/index.html" % PORT, new="tab") 44 45 data = {} 46 47 def serve_http(): 48 data['ioloop'] = IOLoop() 49 http_server.listen(PORT) 50 IOLoop.current().start() 51 52 def shutdown_server(): 53 ioloop = data['ioloop'] 54 ioloop.add_callback(ioloop.stop) 55 print("Asked Server to shut down.") 56 57 def ui(): 58 try: 59 time.sleep(0.5) 60 input("Press <ENTER> to exit...\n") # lgtm [py/use-of-input] 61 except KeyboardInterrupt: 62 pass 63 64 if __name__ == "__main__": 65 66 if tornado.version_info[0] == 4: 67 print('docserver.py script requires tornado 5 or higher') 68 sys.exit(1) 69 70 print("\nStarting Bokeh plot server on port %d..." % PORT) 71 print("Visit http://localhost:%d/en/latest/index.html to see plots\n" % PORT) 72 73 t_server = threading.Thread(target=serve_http) 74 t_server.start() 75 t_browser = threading.Thread(target=open_browser) 76 t_browser.start() 77 78 ui() 79 80 shutdown_server() 81 t_server.join() 82 t_browser.join() 83 print("Server shut down.") 84 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sphinx/docserver.py b/sphinx/docserver.py --- a/sphinx/docserver.py +++ b/sphinx/docserver.py @@ -1,3 +1,4 @@ +import asyncio import os import sys import threading @@ -10,6 +11,11 @@ from tornado.ioloop import IOLoop from tornado.wsgi import WSGIContainer +# Needed for Windows + Python 3.8 config +if sys.version_info.major==3 and sys.version_info.minor >= 8 and sys.platform.startswith('win'): + asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) + + _basedir = os.path.join("..", os.path.dirname(__file__)) app = flask.Flask(__name__, static_folder="/unused")
{"golden_diff": "diff --git a/sphinx/docserver.py b/sphinx/docserver.py\n--- a/sphinx/docserver.py\n+++ b/sphinx/docserver.py\n@@ -1,3 +1,4 @@\n+import asyncio\n import os\n import sys\n import threading\n@@ -10,6 +11,11 @@\n from tornado.ioloop import IOLoop\n from tornado.wsgi import WSGIContainer\n \n+# Needed for Windows + Python 3.8 config\n+if sys.version_info.major==3 and sys.version_info.minor >= 8 and sys.platform.startswith('win'):\n+ asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\n+\n+\n _basedir = os.path.join(\"..\", os.path.dirname(__file__))\n \n app = flask.Flask(__name__, static_folder=\"/unused\")\n", "issue": "[DOCS] Page wise display of documentation search \n**Is your feature request related to a problem?**\r\nYes. I searched for a relatively simple query in the documentation search bar of https://docs.bokeh.org, and it took very long to load the results. In my second try, the results weren't even loading, I'm afraid. These are displayed in an unordered list which fills the entire page up. It might get frustrating to read through everything to find the answer to the input query. \r\n\r\n**Describe the solution you'd like**\r\nI would suggest displaying the fetched results in a page wise format, the way most search engines do it. Relevance weighted sorted answer, shown page wise. Fill up only the current page of about 20 to 30 odd query results, and depending on whether the user wants to see the other pages, load them.\r\n\r\n**Describe alternatives you've considered**\r\nIf not a page wise result, a folder wise result would also benefit, which leaves the option to the user to navigate where he/she wants to. A custom google search may also help.\r\n\r\n**Additional context**\r\n![issue](https://user-images.githubusercontent.com/43414361/82223644-88c83400-9940-11ea-9534-f96dc4ae3a6d.PNG)\r\n\r\n\n", "before_files": [{"content": "import os\nimport sys\nimport threading\nimport time\nimport webbrowser\n\nimport flask\nimport tornado\nfrom tornado.httpserver import HTTPServer\nfrom tornado.ioloop import IOLoop\nfrom tornado.wsgi import WSGIContainer\n\n_basedir = os.path.join(\"..\", os.path.dirname(__file__))\n\napp = flask.Flask(__name__, static_folder=\"/unused\")\nPORT=5009\nhttp_server = HTTPServer(WSGIContainer(app))\n\[email protected]('/')\ndef welcome():\n return \"\"\"\n <h1>Welcome to the Bokeh documentation server</h1>\n You probably want to go to <a href=\"/en/latest/index.html\"> Index</a>\n \"\"\"\n\[email protected]('/versions.json')\ndef send_versions():\n return flask.send_from_directory(\n os.path.join(_basedir, \"sphinx\"), \"test_versions.json\")\n\[email protected]('/alert.html')\ndef send_alert():\n return os.environ.get(\"BOKEH_DOCS_ALERT\", \"\")\n\[email protected]('/en/latest/<path:filename>')\ndef send_docs(filename):\n return flask.send_from_directory(\n os.path.join(_basedir, \"sphinx/build/html/\"), filename)\n\ndef open_browser():\n # Child process\n time.sleep(0.5)\n webbrowser.open(\"http://localhost:%d/en/latest/index.html\" % PORT, new=\"tab\")\n\ndata = {}\n\ndef serve_http():\n data['ioloop'] = IOLoop()\n http_server.listen(PORT)\n IOLoop.current().start()\n\ndef shutdown_server():\n ioloop = data['ioloop']\n ioloop.add_callback(ioloop.stop)\n print(\"Asked Server to shut down.\")\n\ndef ui():\n try:\n time.sleep(0.5)\n input(\"Press <ENTER> to exit...\\n\") # lgtm [py/use-of-input]\n except KeyboardInterrupt:\n pass\n\nif __name__ == \"__main__\":\n\n if tornado.version_info[0] == 4:\n print('docserver.py script requires tornado 5 or higher')\n sys.exit(1)\n\n print(\"\\nStarting Bokeh plot server on port %d...\" % PORT)\n print(\"Visit http://localhost:%d/en/latest/index.html to see plots\\n\" % PORT)\n\n t_server = threading.Thread(target=serve_http)\n t_server.start()\n t_browser = threading.Thread(target=open_browser)\n t_browser.start()\n\n ui()\n\n shutdown_server()\n t_server.join()\n t_browser.join()\n print(\"Server shut down.\")\n", "path": "sphinx/docserver.py"}], "after_files": [{"content": "import asyncio\nimport os\nimport sys\nimport threading\nimport time\nimport webbrowser\n\nimport flask\nimport tornado\nfrom tornado.httpserver import HTTPServer\nfrom tornado.ioloop import IOLoop\nfrom tornado.wsgi import WSGIContainer\n\n# Needed for Windows + Python 3.8 config\nif sys.version_info.major==3 and sys.version_info.minor >= 8 and sys.platform.startswith('win'):\n asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\n\n\n_basedir = os.path.join(\"..\", os.path.dirname(__file__))\n\napp = flask.Flask(__name__, static_folder=\"/unused\")\nPORT=5009\nhttp_server = HTTPServer(WSGIContainer(app))\n\[email protected]('/')\ndef welcome():\n return \"\"\"\n <h1>Welcome to the Bokeh documentation server</h1>\n You probably want to go to <a href=\"/en/latest/index.html\"> Index</a>\n \"\"\"\n\[email protected]('/versions.json')\ndef send_versions():\n return flask.send_from_directory(\n os.path.join(_basedir, \"sphinx\"), \"test_versions.json\")\n\[email protected]('/alert.html')\ndef send_alert():\n return os.environ.get(\"BOKEH_DOCS_ALERT\", \"\")\n\[email protected]('/en/latest/<path:filename>')\ndef send_docs(filename):\n return flask.send_from_directory(\n os.path.join(_basedir, \"sphinx/build/html/\"), filename)\n\ndef open_browser():\n # Child process\n time.sleep(0.5)\n webbrowser.open(\"http://localhost:%d/en/latest/index.html\" % PORT, new=\"tab\")\n\ndata = {}\n\ndef serve_http():\n data['ioloop'] = IOLoop()\n http_server.listen(PORT)\n IOLoop.current().start()\n\ndef shutdown_server():\n ioloop = data['ioloop']\n ioloop.add_callback(ioloop.stop)\n print(\"Asked Server to shut down.\")\n\ndef ui():\n try:\n time.sleep(0.5)\n input(\"Press <ENTER> to exit...\\n\") # lgtm [py/use-of-input]\n except KeyboardInterrupt:\n pass\n\nif __name__ == \"__main__\":\n\n if tornado.version_info[0] == 4:\n print('docserver.py script requires tornado 5 or higher')\n sys.exit(1)\n\n print(\"\\nStarting Bokeh plot server on port %d...\" % PORT)\n print(\"Visit http://localhost:%d/en/latest/index.html to see plots\\n\" % PORT)\n\n t_server = threading.Thread(target=serve_http)\n t_server.start()\n t_browser = threading.Thread(target=open_browser)\n t_browser.start()\n\n ui()\n\n shutdown_server()\n t_server.join()\n t_browser.join()\n print(\"Server shut down.\")\n", "path": "sphinx/docserver.py"}]}
1,253
170
gh_patches_debug_31758
rasdani/github-patches
git_diff
docker__docker-py-384
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Do not support sslv3 (poodle) In Docker 1.3.1 (coming very soon), only TLS1.0+ will be supported. Ping @shin- --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docker/tls.py` Content: ``` 1 import os 2 3 from . import errors 4 from .ssladapter import ssladapter 5 6 7 class TLSConfig(object): 8 cert = None 9 verify = None 10 ssl_version = None 11 12 def __init__(self, client_cert=None, ca_cert=None, verify=None, 13 ssl_version=None, assert_hostname=None): 14 # Argument compatibility/mapping with 15 # http://docs.docker.com/examples/https/ 16 # This diverges from the Docker CLI in that users can specify 'tls' 17 # here, but also disable any public/default CA pool verification by 18 # leaving tls_verify=False 19 20 # urllib3 sets a default ssl_version if ssl_version is None 21 # http://tinyurl.com/kxga8hb 22 self.ssl_version = ssl_version 23 self.assert_hostname = assert_hostname 24 25 # "tls" and "tls_verify" must have both or neither cert/key files 26 # In either case, Alert the user when both are expected, but any are 27 # missing. 28 29 if client_cert: 30 try: 31 tls_cert, tls_key = client_cert 32 except ValueError: 33 raise errors.TLSParameterError( 34 'client_config must be a tuple of' 35 ' (client certificate, key file)' 36 ) 37 38 if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or 39 not os.path.isfile(tls_key)): 40 raise errors.TLSParameterError( 41 'Path to a certificate and key files must be provided' 42 ' through the client_config param' 43 ) 44 self.cert = (tls_cert, tls_key) 45 46 # Either set verify to True (public/default CA checks) or to the 47 # path of a CA Cert file. 48 if verify is not None: 49 if not ca_cert: 50 self.verify = verify 51 elif os.path.isfile(ca_cert): 52 if not verify: 53 raise errors.TLSParameterError( 54 'verify can not be False when a CA cert is' 55 ' provided.' 56 ) 57 self.verify = ca_cert 58 else: 59 raise errors.TLSParameterError( 60 'Invalid CA certificate provided for `tls_ca_cert`.' 61 ) 62 63 def configure_client(self, client): 64 client.ssl_version = self.ssl_version 65 if self.verify is not None: 66 client.verify = self.verify 67 if self.cert: 68 client.cert = self.cert 69 client.mount('https://', ssladapter.SSLAdapter( 70 ssl_version=self.ssl_version, 71 assert_hostname=self.assert_hostname, 72 )) 73 ``` Path: `docker/ssladapter/ssladapter.py` Content: ``` 1 """ Resolves OpenSSL issues in some servers: 2 https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/ 3 https://github.com/kennethreitz/requests/pull/799 4 """ 5 from distutils.version import StrictVersion 6 from requests.adapters import HTTPAdapter 7 try: 8 import requests.packages.urllib3 as urllib3 9 except ImportError: 10 import urllib3 11 12 13 PoolManager = urllib3.poolmanager.PoolManager 14 15 16 class SSLAdapter(HTTPAdapter): 17 '''An HTTPS Transport Adapter that uses an arbitrary SSL version.''' 18 def __init__(self, ssl_version=None, assert_hostname=None, **kwargs): 19 self.ssl_version = ssl_version 20 self.assert_hostname = assert_hostname 21 super(SSLAdapter, self).__init__(**kwargs) 22 23 def init_poolmanager(self, connections, maxsize, block=False): 24 kwargs = { 25 'num_pools': connections, 26 'maxsize': maxsize, 27 'block': block, 28 'assert_hostname': self.assert_hostname, 29 } 30 if self.can_override_ssl_version(): 31 kwargs['ssl_version'] = self.ssl_version 32 33 self.poolmanager = PoolManager(**kwargs) 34 35 def can_override_ssl_version(self): 36 urllib_ver = urllib3.__version__.split('-')[0] 37 if urllib_ver is None: 38 return False 39 if urllib_ver == 'dev': 40 return True 41 return StrictVersion(urllib_ver) > StrictVersion('1.5') 42 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docker/ssladapter/ssladapter.py b/docker/ssladapter/ssladapter.py --- a/docker/ssladapter/ssladapter.py +++ b/docker/ssladapter/ssladapter.py @@ -4,6 +4,7 @@ """ from distutils.version import StrictVersion from requests.adapters import HTTPAdapter +import ssl try: import requests.packages.urllib3 as urllib3 except ImportError: @@ -13,9 +14,19 @@ PoolManager = urllib3.poolmanager.PoolManager +def get_max_tls_protocol(): + protocols = ('PROTOCOL_TLSv1_2', + 'PROTOCOL_TLSv1_1', + 'PROTOCOL_TLSv1') + for proto in protocols: + if hasattr(ssl, proto): + return proto + + class SSLAdapter(HTTPAdapter): '''An HTTPS Transport Adapter that uses an arbitrary SSL version.''' def __init__(self, ssl_version=None, assert_hostname=None, **kwargs): + ssl_version = ssl_version or get_max_tls_protocol() self.ssl_version = ssl_version self.assert_hostname = assert_hostname super(SSLAdapter, self).__init__(**kwargs) diff --git a/docker/tls.py b/docker/tls.py --- a/docker/tls.py +++ b/docker/tls.py @@ -17,8 +17,11 @@ # here, but also disable any public/default CA pool verification by # leaving tls_verify=False - # urllib3 sets a default ssl_version if ssl_version is None - # http://tinyurl.com/kxga8hb + # urllib3 sets a default ssl_version if ssl_version is None, + # but that default is the vulnerable PROTOCOL_SSLv23 selection, + # so we override the default with the maximum supported in the running + # Python interpeter up to TLS 1.2. (see: http://tinyurl.com/kxga8hb) + ssl_version = ssl_version or ssladapter.get_max_tls_protocol() self.ssl_version = ssl_version self.assert_hostname = assert_hostname
{"golden_diff": "diff --git a/docker/ssladapter/ssladapter.py b/docker/ssladapter/ssladapter.py\n--- a/docker/ssladapter/ssladapter.py\n+++ b/docker/ssladapter/ssladapter.py\n@@ -4,6 +4,7 @@\n \"\"\"\n from distutils.version import StrictVersion\n from requests.adapters import HTTPAdapter\n+import ssl\n try:\n import requests.packages.urllib3 as urllib3\n except ImportError:\n@@ -13,9 +14,19 @@\n PoolManager = urllib3.poolmanager.PoolManager\n \n \n+def get_max_tls_protocol():\n+ protocols = ('PROTOCOL_TLSv1_2',\n+ 'PROTOCOL_TLSv1_1',\n+ 'PROTOCOL_TLSv1')\n+ for proto in protocols:\n+ if hasattr(ssl, proto):\n+ return proto\n+\n+\n class SSLAdapter(HTTPAdapter):\n '''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''\n def __init__(self, ssl_version=None, assert_hostname=None, **kwargs):\n+ ssl_version = ssl_version or get_max_tls_protocol()\n self.ssl_version = ssl_version\n self.assert_hostname = assert_hostname\n super(SSLAdapter, self).__init__(**kwargs)\ndiff --git a/docker/tls.py b/docker/tls.py\n--- a/docker/tls.py\n+++ b/docker/tls.py\n@@ -17,8 +17,11 @@\n # here, but also disable any public/default CA pool verification by\n # leaving tls_verify=False\n \n- # urllib3 sets a default ssl_version if ssl_version is None\n- # http://tinyurl.com/kxga8hb\n+ # urllib3 sets a default ssl_version if ssl_version is None,\n+ # but that default is the vulnerable PROTOCOL_SSLv23 selection,\n+ # so we override the default with the maximum supported in the running\n+ # Python interpeter up to TLS 1.2. (see: http://tinyurl.com/kxga8hb)\n+ ssl_version = ssl_version or ssladapter.get_max_tls_protocol()\n self.ssl_version = ssl_version\n self.assert_hostname = assert_hostname\n", "issue": "Do not support sslv3 (poodle)\nIn Docker 1.3.1 (coming very soon), only TLS1.0+ will be supported.\nPing @shin- \n\n", "before_files": [{"content": "import os\n\nfrom . import errors\nfrom .ssladapter import ssladapter\n\n\nclass TLSConfig(object):\n cert = None\n verify = None\n ssl_version = None\n\n def __init__(self, client_cert=None, ca_cert=None, verify=None,\n ssl_version=None, assert_hostname=None):\n # Argument compatibility/mapping with\n # http://docs.docker.com/examples/https/\n # This diverges from the Docker CLI in that users can specify 'tls'\n # here, but also disable any public/default CA pool verification by\n # leaving tls_verify=False\n\n # urllib3 sets a default ssl_version if ssl_version is None\n # http://tinyurl.com/kxga8hb\n self.ssl_version = ssl_version\n self.assert_hostname = assert_hostname\n\n # \"tls\" and \"tls_verify\" must have both or neither cert/key files\n # In either case, Alert the user when both are expected, but any are\n # missing.\n\n if client_cert:\n try:\n tls_cert, tls_key = client_cert\n except ValueError:\n raise errors.TLSParameterError(\n 'client_config must be a tuple of'\n ' (client certificate, key file)'\n )\n\n if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or\n not os.path.isfile(tls_key)):\n raise errors.TLSParameterError(\n 'Path to a certificate and key files must be provided'\n ' through the client_config param'\n )\n self.cert = (tls_cert, tls_key)\n\n # Either set verify to True (public/default CA checks) or to the\n # path of a CA Cert file.\n if verify is not None:\n if not ca_cert:\n self.verify = verify\n elif os.path.isfile(ca_cert):\n if not verify:\n raise errors.TLSParameterError(\n 'verify can not be False when a CA cert is'\n ' provided.'\n )\n self.verify = ca_cert\n else:\n raise errors.TLSParameterError(\n 'Invalid CA certificate provided for `tls_ca_cert`.'\n )\n\n def configure_client(self, client):\n client.ssl_version = self.ssl_version\n if self.verify is not None:\n client.verify = self.verify\n if self.cert:\n client.cert = self.cert\n client.mount('https://', ssladapter.SSLAdapter(\n ssl_version=self.ssl_version,\n assert_hostname=self.assert_hostname,\n ))\n", "path": "docker/tls.py"}, {"content": "\"\"\" Resolves OpenSSL issues in some servers:\n https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/\n https://github.com/kennethreitz/requests/pull/799\n\"\"\"\nfrom distutils.version import StrictVersion\nfrom requests.adapters import HTTPAdapter\ntry:\n import requests.packages.urllib3 as urllib3\nexcept ImportError:\n import urllib3\n\n\nPoolManager = urllib3.poolmanager.PoolManager\n\n\nclass SSLAdapter(HTTPAdapter):\n '''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''\n def __init__(self, ssl_version=None, assert_hostname=None, **kwargs):\n self.ssl_version = ssl_version\n self.assert_hostname = assert_hostname\n super(SSLAdapter, self).__init__(**kwargs)\n\n def init_poolmanager(self, connections, maxsize, block=False):\n kwargs = {\n 'num_pools': connections,\n 'maxsize': maxsize,\n 'block': block,\n 'assert_hostname': self.assert_hostname,\n }\n if self.can_override_ssl_version():\n kwargs['ssl_version'] = self.ssl_version\n\n self.poolmanager = PoolManager(**kwargs)\n\n def can_override_ssl_version(self):\n urllib_ver = urllib3.__version__.split('-')[0]\n if urllib_ver is None:\n return False\n if urllib_ver == 'dev':\n return True\n return StrictVersion(urllib_ver) > StrictVersion('1.5')\n", "path": "docker/ssladapter/ssladapter.py"}], "after_files": [{"content": "import os\n\nfrom . import errors\nfrom .ssladapter import ssladapter\n\n\nclass TLSConfig(object):\n cert = None\n verify = None\n ssl_version = None\n\n def __init__(self, client_cert=None, ca_cert=None, verify=None,\n ssl_version=None, assert_hostname=None):\n # Argument compatibility/mapping with\n # http://docs.docker.com/examples/https/\n # This diverges from the Docker CLI in that users can specify 'tls'\n # here, but also disable any public/default CA pool verification by\n # leaving tls_verify=False\n\n # urllib3 sets a default ssl_version if ssl_version is None,\n # but that default is the vulnerable PROTOCOL_SSLv23 selection,\n # so we override the default with the maximum supported in the running\n # Python interpeter up to TLS 1.2. (see: http://tinyurl.com/kxga8hb)\n ssl_version = ssl_version or ssladapter.get_max_tls_protocol()\n self.ssl_version = ssl_version\n self.assert_hostname = assert_hostname\n\n # \"tls\" and \"tls_verify\" must have both or neither cert/key files\n # In either case, Alert the user when both are expected, but any are\n # missing.\n\n if client_cert:\n try:\n tls_cert, tls_key = client_cert\n except ValueError:\n raise errors.TLSParameterError(\n 'client_config must be a tuple of'\n ' (client certificate, key file)'\n )\n\n if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or\n not os.path.isfile(tls_key)):\n raise errors.TLSParameterError(\n 'Path to a certificate and key files must be provided'\n ' through the client_config param'\n )\n self.cert = (tls_cert, tls_key)\n\n # Either set verify to True (public/default CA checks) or to the\n # path of a CA Cert file.\n if verify is not None:\n if not ca_cert:\n self.verify = verify\n elif os.path.isfile(ca_cert):\n if not verify:\n raise errors.TLSParameterError(\n 'verify can not be False when a CA cert is'\n ' provided.'\n )\n self.verify = ca_cert\n else:\n raise errors.TLSParameterError(\n 'Invalid CA certificate provided for `tls_ca_cert`.'\n )\n\n def configure_client(self, client):\n client.ssl_version = self.ssl_version\n if self.verify is not None:\n client.verify = self.verify\n if self.cert:\n client.cert = self.cert\n client.mount('https://', ssladapter.SSLAdapter(\n ssl_version=self.ssl_version,\n assert_hostname=self.assert_hostname,\n ))\n", "path": "docker/tls.py"}, {"content": "\"\"\" Resolves OpenSSL issues in some servers:\n https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/\n https://github.com/kennethreitz/requests/pull/799\n\"\"\"\nfrom distutils.version import StrictVersion\nfrom requests.adapters import HTTPAdapter\nimport ssl\ntry:\n import requests.packages.urllib3 as urllib3\nexcept ImportError:\n import urllib3\n\n\nPoolManager = urllib3.poolmanager.PoolManager\n\n\ndef get_max_tls_protocol():\n protocols = ('PROTOCOL_TLSv1_2',\n 'PROTOCOL_TLSv1_1',\n 'PROTOCOL_TLSv1')\n for proto in protocols:\n if hasattr(ssl, proto):\n return proto\n\n\nclass SSLAdapter(HTTPAdapter):\n '''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''\n def __init__(self, ssl_version=None, assert_hostname=None, **kwargs):\n ssl_version = ssl_version or get_max_tls_protocol()\n self.ssl_version = ssl_version\n self.assert_hostname = assert_hostname\n super(SSLAdapter, self).__init__(**kwargs)\n\n def init_poolmanager(self, connections, maxsize, block=False):\n kwargs = {\n 'num_pools': connections,\n 'maxsize': maxsize,\n 'block': block,\n 'assert_hostname': self.assert_hostname,\n }\n if self.can_override_ssl_version():\n kwargs['ssl_version'] = self.ssl_version\n\n self.poolmanager = PoolManager(**kwargs)\n\n def can_override_ssl_version(self):\n urllib_ver = urllib3.__version__.split('-')[0]\n if urllib_ver is None:\n return False\n if urllib_ver == 'dev':\n return True\n return StrictVersion(urllib_ver) > StrictVersion('1.5')\n", "path": "docker/ssladapter/ssladapter.py"}]}
1,376
458
gh_patches_debug_36699
rasdani/github-patches
git_diff
secdev__scapy-2078
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- AttributeErrror: module 'os' has no attribute 'popen2' when using voip module Hello I have installed the last scapy version, I want to use the module voip but i has the issue AttributeErrror: module 'os' has no attribute 'popen2' when using voip_play function. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scapy/modules/voip.py` Content: ``` 1 # This file is part of Scapy 2 # See http://www.secdev.org/projects/scapy for more information 3 # Copyright (C) Philippe Biondi <[email protected]> 4 # This program is published under a GPLv2 license 5 6 """ 7 VoIP (Voice over IP) related functions 8 """ 9 10 from __future__ import absolute_import 11 import os 12 ################### 13 # Listen VoIP # 14 ################### 15 16 from scapy.sendrecv import sniff 17 from scapy.layers.inet import IP, UDP 18 from scapy.layers.rtp import RTP 19 from scapy.consts import WINDOWS 20 from scapy.config import conf 21 from scapy.modules.six.moves import range 22 23 24 sox_base = "sox -t .ul %s - -t ossdsp /dev/dsp" 25 26 if WINDOWS: 27 if conf.prog.sox is None: 28 raise OSError("Sox must be installed to play VoIP packets") 29 sox_base = "\"" + conf.prog.sox + "\" -t .ul %s - -t waveaudio" 30 31 32 def _merge_sound_bytes(x, y, sample_size=2): 33 # TODO: find a better way to merge sound bytes 34 # This will only add them one next to each other: 35 # \xff + \xff ==> \xff\xff 36 m = "" 37 ss = sample_size 38 min_ = 0 39 if len(x) >= len(y): 40 min_ = y 41 elif len(x) < len(y): 42 min_ = x 43 r_ = len(min_) 44 for i in range(r_ / ss): 45 m += x[ss * i:ss * (i + 1)] + y[ss * i:ss * (i + 1)] 46 return x[r_:], y[r_:], m 47 48 49 def voip_play(s1, lst=None, **kargs): 50 """Play VoIP packets with RAW data that 51 are either sniffed either from an IP, or 52 specified as a list. 53 54 It will play only the incoming packets ! 55 56 :param s1: The IP of the src of all VoIP packets. 57 :param lst: (optional) A list of packets to load 58 :type s1: string 59 :type lst: list 60 61 :Example: 62 63 >>> voip_play("64.2.142.189") 64 while calling '[email protected]' 65 66 >>> voip_play("64.2.142.189", lst) 67 with list a list of packets with VoIP data 68 in their RAW layer 69 70 .. seealso:: voip_play2 71 to play both the outcoming and incoming packets 72 at the same time. 73 74 .. seealso:: voip_play3 75 to read RTP VoIP packets 76 """ 77 78 dsp, rd = os.popen2(sox_base % "") 79 80 def play(pkt): 81 if not pkt: 82 return 83 if not pkt.haslayer(UDP) or not pkt.haslayer(IP): 84 return 85 ip = pkt.getlayer(IP) 86 if s1 == ip.src: 87 dsp.write(pkt.getlayer(conf.raw_layer).load[12:]) 88 try: 89 if lst is None: 90 sniff(store=0, prn=play, **kargs) 91 else: 92 for p in lst: 93 play(p) 94 finally: 95 dsp.close() 96 rd.close() 97 98 99 def voip_play1(s1, lst=None, **kargs): 100 """Same than voip_play, backward compatibility 101 """ 102 return voip_play(s1, lst, **kargs) 103 104 105 def voip_play2(s1, **kargs): 106 """ 107 Same than voip_play, but will play 108 both incoming and outcoming packets. 109 The sound will surely suffer distortion. 110 111 Only supports sniffing. 112 113 .. seealso:: voip_play 114 to play only incoming packets. 115 """ 116 dsp, rd = os.popen2(sox_base % "-c 2") 117 global x1, x2 118 x1 = "" 119 x2 = "" 120 121 def play(pkt): 122 global x1, x2 123 if not pkt: 124 return 125 if not pkt.haslayer(UDP) or not pkt.haslayer(IP): 126 return 127 ip = pkt.getlayer(IP) 128 if s1 in [ip.src, ip.dst]: 129 if ip.dst == s1: 130 x1 += pkt.getlayer(conf.raw_layer).load[12:] 131 else: 132 x2 += pkt.getlayer(conf.raw_layer).load[12:] 133 x1, x2, r = _merge_sound_bytes(x1, x2) 134 dsp.write(r) 135 136 sniff(store=0, prn=play, **kargs) 137 138 139 def voip_play3(lst=None, **kargs): 140 """Same than voip_play, but made to 141 read and play VoIP RTP packets, without 142 checking IP. 143 144 .. seealso:: voip_play 145 for basic VoIP packets 146 """ 147 dsp, rd = os.popen2(sox_base % "") 148 149 def play(pkt, dsp=dsp): 150 if pkt and pkt.haslayer(UDP) and pkt.haslayer(RTP): 151 dsp.write(pkt.getlayer(RTP).load) 152 try: 153 if lst is None: 154 sniff(store=0, prn=play, **kargs) 155 else: 156 for p in lst: 157 play(p) 158 finally: 159 try: 160 dsp.close() 161 rd.close() 162 except Exception: 163 pass 164 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scapy/modules/voip.py b/scapy/modules/voip.py --- a/scapy/modules/voip.py +++ b/scapy/modules/voip.py @@ -8,7 +8,7 @@ """ from __future__ import absolute_import -import os +import subprocess ################### # Listen VoIP # ################### @@ -21,12 +21,12 @@ from scapy.modules.six.moves import range -sox_base = "sox -t .ul %s - -t ossdsp /dev/dsp" +sox_base = (["sox", "-t", ".ul"], ["-", "-t", "ossdsp", "/dev/dsp"]) if WINDOWS: if conf.prog.sox is None: raise OSError("Sox must be installed to play VoIP packets") - sox_base = "\"" + conf.prog.sox + "\" -t .ul %s - -t waveaudio" + sox_base = ([conf.prog.sox, "-t", ".ul"], ["-", "-t", "waveaudio"]) def _merge_sound_bytes(x, y, sample_size=2): @@ -75,7 +75,9 @@ to read RTP VoIP packets """ - dsp, rd = os.popen2(sox_base % "") + proc = subprocess.Popen(sox_base[0] + sox_base[1], stdin=subprocess.PIPE, + stdout=subprocess.PIPE) + dsp, rd = proc.stdin, proc.stdout def play(pkt): if not pkt: @@ -113,7 +115,9 @@ .. seealso:: voip_play to play only incoming packets. """ - dsp, rd = os.popen2(sox_base % "-c 2") + proc = subprocess.Popen(sox_base[0] + ["-c", "2"] + sox_base[1], + stdin=subprocess.PIPE, stdout=subprocess.PIPE) + dsp, rd = proc.stdin, proc.stdout global x1, x2 x1 = "" x2 = "" @@ -133,7 +137,14 @@ x1, x2, r = _merge_sound_bytes(x1, x2) dsp.write(r) - sniff(store=0, prn=play, **kargs) + try: + sniff(store=0, prn=play, **kargs) + finally: + try: + dsp.close() + rd.close() + except Exception: + pass def voip_play3(lst=None, **kargs): @@ -144,7 +155,9 @@ .. seealso:: voip_play for basic VoIP packets """ - dsp, rd = os.popen2(sox_base % "") + proc = subprocess.Popen(sox_base[0] + sox_base[1], stdin=subprocess.PIPE, + stdout=subprocess.PIPE) + dsp, rd = proc.stdin, proc.stdout def play(pkt, dsp=dsp): if pkt and pkt.haslayer(UDP) and pkt.haslayer(RTP):
{"golden_diff": "diff --git a/scapy/modules/voip.py b/scapy/modules/voip.py\n--- a/scapy/modules/voip.py\n+++ b/scapy/modules/voip.py\n@@ -8,7 +8,7 @@\n \"\"\"\n \n from __future__ import absolute_import\n-import os\n+import subprocess\n ###################\n # Listen VoIP #\n ###################\n@@ -21,12 +21,12 @@\n from scapy.modules.six.moves import range\n \n \n-sox_base = \"sox -t .ul %s - -t ossdsp /dev/dsp\"\n+sox_base = ([\"sox\", \"-t\", \".ul\"], [\"-\", \"-t\", \"ossdsp\", \"/dev/dsp\"])\n \n if WINDOWS:\n if conf.prog.sox is None:\n raise OSError(\"Sox must be installed to play VoIP packets\")\n- sox_base = \"\\\"\" + conf.prog.sox + \"\\\" -t .ul %s - -t waveaudio\"\n+ sox_base = ([conf.prog.sox, \"-t\", \".ul\"], [\"-\", \"-t\", \"waveaudio\"])\n \n \n def _merge_sound_bytes(x, y, sample_size=2):\n@@ -75,7 +75,9 @@\n to read RTP VoIP packets\n \"\"\"\n \n- dsp, rd = os.popen2(sox_base % \"\")\n+ proc = subprocess.Popen(sox_base[0] + sox_base[1], stdin=subprocess.PIPE,\n+ stdout=subprocess.PIPE)\n+ dsp, rd = proc.stdin, proc.stdout\n \n def play(pkt):\n if not pkt:\n@@ -113,7 +115,9 @@\n .. seealso:: voip_play\n to play only incoming packets.\n \"\"\"\n- dsp, rd = os.popen2(sox_base % \"-c 2\")\n+ proc = subprocess.Popen(sox_base[0] + [\"-c\", \"2\"] + sox_base[1],\n+ stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n+ dsp, rd = proc.stdin, proc.stdout\n global x1, x2\n x1 = \"\"\n x2 = \"\"\n@@ -133,7 +137,14 @@\n x1, x2, r = _merge_sound_bytes(x1, x2)\n dsp.write(r)\n \n- sniff(store=0, prn=play, **kargs)\n+ try:\n+ sniff(store=0, prn=play, **kargs)\n+ finally:\n+ try:\n+ dsp.close()\n+ rd.close()\n+ except Exception:\n+ pass\n \n \n def voip_play3(lst=None, **kargs):\n@@ -144,7 +155,9 @@\n .. seealso:: voip_play\n for basic VoIP packets\n \"\"\"\n- dsp, rd = os.popen2(sox_base % \"\")\n+ proc = subprocess.Popen(sox_base[0] + sox_base[1], stdin=subprocess.PIPE,\n+ stdout=subprocess.PIPE)\n+ dsp, rd = proc.stdin, proc.stdout\n \n def play(pkt, dsp=dsp):\n if pkt and pkt.haslayer(UDP) and pkt.haslayer(RTP):\n", "issue": "AttributeErrror: module 'os' has no attribute 'popen2' when using voip module\nHello I have installed the last scapy version, I want to use the module voip but i has the issue \r\nAttributeErrror: module 'os' has no attribute 'popen2' when using voip_play function. \n", "before_files": [{"content": "# This file is part of Scapy\n# See http://www.secdev.org/projects/scapy for more information\n# Copyright (C) Philippe Biondi <[email protected]>\n# This program is published under a GPLv2 license\n\n\"\"\"\nVoIP (Voice over IP) related functions\n\"\"\"\n\nfrom __future__ import absolute_import\nimport os\n###################\n# Listen VoIP #\n###################\n\nfrom scapy.sendrecv import sniff\nfrom scapy.layers.inet import IP, UDP\nfrom scapy.layers.rtp import RTP\nfrom scapy.consts import WINDOWS\nfrom scapy.config import conf\nfrom scapy.modules.six.moves import range\n\n\nsox_base = \"sox -t .ul %s - -t ossdsp /dev/dsp\"\n\nif WINDOWS:\n if conf.prog.sox is None:\n raise OSError(\"Sox must be installed to play VoIP packets\")\n sox_base = \"\\\"\" + conf.prog.sox + \"\\\" -t .ul %s - -t waveaudio\"\n\n\ndef _merge_sound_bytes(x, y, sample_size=2):\n # TODO: find a better way to merge sound bytes\n # This will only add them one next to each other:\n # \\xff + \\xff ==> \\xff\\xff\n m = \"\"\n ss = sample_size\n min_ = 0\n if len(x) >= len(y):\n min_ = y\n elif len(x) < len(y):\n min_ = x\n r_ = len(min_)\n for i in range(r_ / ss):\n m += x[ss * i:ss * (i + 1)] + y[ss * i:ss * (i + 1)]\n return x[r_:], y[r_:], m\n\n\ndef voip_play(s1, lst=None, **kargs):\n \"\"\"Play VoIP packets with RAW data that\n are either sniffed either from an IP, or\n specified as a list.\n\n It will play only the incoming packets !\n\n :param s1: The IP of the src of all VoIP packets.\n :param lst: (optional) A list of packets to load\n :type s1: string\n :type lst: list\n\n :Example:\n\n >>> voip_play(\"64.2.142.189\")\n while calling '[email protected]'\n\n >>> voip_play(\"64.2.142.189\", lst)\n with list a list of packets with VoIP data\n in their RAW layer\n\n .. seealso:: voip_play2\n to play both the outcoming and incoming packets\n at the same time.\n\n .. seealso:: voip_play3\n to read RTP VoIP packets\n \"\"\"\n\n dsp, rd = os.popen2(sox_base % \"\")\n\n def play(pkt):\n if not pkt:\n return\n if not pkt.haslayer(UDP) or not pkt.haslayer(IP):\n return\n ip = pkt.getlayer(IP)\n if s1 == ip.src:\n dsp.write(pkt.getlayer(conf.raw_layer).load[12:])\n try:\n if lst is None:\n sniff(store=0, prn=play, **kargs)\n else:\n for p in lst:\n play(p)\n finally:\n dsp.close()\n rd.close()\n\n\ndef voip_play1(s1, lst=None, **kargs):\n \"\"\"Same than voip_play, backward compatibility\n \"\"\"\n return voip_play(s1, lst, **kargs)\n\n\ndef voip_play2(s1, **kargs):\n \"\"\"\n Same than voip_play, but will play\n both incoming and outcoming packets.\n The sound will surely suffer distortion.\n\n Only supports sniffing.\n\n .. seealso:: voip_play\n to play only incoming packets.\n \"\"\"\n dsp, rd = os.popen2(sox_base % \"-c 2\")\n global x1, x2\n x1 = \"\"\n x2 = \"\"\n\n def play(pkt):\n global x1, x2\n if not pkt:\n return\n if not pkt.haslayer(UDP) or not pkt.haslayer(IP):\n return\n ip = pkt.getlayer(IP)\n if s1 in [ip.src, ip.dst]:\n if ip.dst == s1:\n x1 += pkt.getlayer(conf.raw_layer).load[12:]\n else:\n x2 += pkt.getlayer(conf.raw_layer).load[12:]\n x1, x2, r = _merge_sound_bytes(x1, x2)\n dsp.write(r)\n\n sniff(store=0, prn=play, **kargs)\n\n\ndef voip_play3(lst=None, **kargs):\n \"\"\"Same than voip_play, but made to\n read and play VoIP RTP packets, without\n checking IP.\n\n .. seealso:: voip_play\n for basic VoIP packets\n \"\"\"\n dsp, rd = os.popen2(sox_base % \"\")\n\n def play(pkt, dsp=dsp):\n if pkt and pkt.haslayer(UDP) and pkt.haslayer(RTP):\n dsp.write(pkt.getlayer(RTP).load)\n try:\n if lst is None:\n sniff(store=0, prn=play, **kargs)\n else:\n for p in lst:\n play(p)\n finally:\n try:\n dsp.close()\n rd.close()\n except Exception:\n pass\n", "path": "scapy/modules/voip.py"}], "after_files": [{"content": "# This file is part of Scapy\n# See http://www.secdev.org/projects/scapy for more information\n# Copyright (C) Philippe Biondi <[email protected]>\n# This program is published under a GPLv2 license\n\n\"\"\"\nVoIP (Voice over IP) related functions\n\"\"\"\n\nfrom __future__ import absolute_import\nimport subprocess\n###################\n# Listen VoIP #\n###################\n\nfrom scapy.sendrecv import sniff\nfrom scapy.layers.inet import IP, UDP\nfrom scapy.layers.rtp import RTP\nfrom scapy.consts import WINDOWS\nfrom scapy.config import conf\nfrom scapy.modules.six.moves import range\n\n\nsox_base = ([\"sox\", \"-t\", \".ul\"], [\"-\", \"-t\", \"ossdsp\", \"/dev/dsp\"])\n\nif WINDOWS:\n if conf.prog.sox is None:\n raise OSError(\"Sox must be installed to play VoIP packets\")\n sox_base = ([conf.prog.sox, \"-t\", \".ul\"], [\"-\", \"-t\", \"waveaudio\"])\n\n\ndef _merge_sound_bytes(x, y, sample_size=2):\n # TODO: find a better way to merge sound bytes\n # This will only add them one next to each other:\n # \\xff + \\xff ==> \\xff\\xff\n m = \"\"\n ss = sample_size\n min_ = 0\n if len(x) >= len(y):\n min_ = y\n elif len(x) < len(y):\n min_ = x\n r_ = len(min_)\n for i in range(r_ / ss):\n m += x[ss * i:ss * (i + 1)] + y[ss * i:ss * (i + 1)]\n return x[r_:], y[r_:], m\n\n\ndef voip_play(s1, lst=None, **kargs):\n \"\"\"Play VoIP packets with RAW data that\n are either sniffed either from an IP, or\n specified as a list.\n\n It will play only the incoming packets !\n\n :param s1: The IP of the src of all VoIP packets.\n :param lst: (optional) A list of packets to load\n :type s1: string\n :type lst: list\n\n :Example:\n\n >>> voip_play(\"64.2.142.189\")\n while calling '[email protected]'\n\n >>> voip_play(\"64.2.142.189\", lst)\n with list a list of packets with VoIP data\n in their RAW layer\n\n .. seealso:: voip_play2\n to play both the outcoming and incoming packets\n at the same time.\n\n .. seealso:: voip_play3\n to read RTP VoIP packets\n \"\"\"\n\n proc = subprocess.Popen(sox_base[0] + sox_base[1], stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n dsp, rd = proc.stdin, proc.stdout\n\n def play(pkt):\n if not pkt:\n return\n if not pkt.haslayer(UDP) or not pkt.haslayer(IP):\n return\n ip = pkt.getlayer(IP)\n if s1 == ip.src:\n dsp.write(pkt.getlayer(conf.raw_layer).load[12:])\n try:\n if lst is None:\n sniff(store=0, prn=play, **kargs)\n else:\n for p in lst:\n play(p)\n finally:\n dsp.close()\n rd.close()\n\n\ndef voip_play1(s1, lst=None, **kargs):\n \"\"\"Same than voip_play, backward compatibility\n \"\"\"\n return voip_play(s1, lst, **kargs)\n\n\ndef voip_play2(s1, **kargs):\n \"\"\"\n Same than voip_play, but will play\n both incoming and outcoming packets.\n The sound will surely suffer distortion.\n\n Only supports sniffing.\n\n .. seealso:: voip_play\n to play only incoming packets.\n \"\"\"\n proc = subprocess.Popen(sox_base[0] + [\"-c\", \"2\"] + sox_base[1],\n stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n dsp, rd = proc.stdin, proc.stdout\n global x1, x2\n x1 = \"\"\n x2 = \"\"\n\n def play(pkt):\n global x1, x2\n if not pkt:\n return\n if not pkt.haslayer(UDP) or not pkt.haslayer(IP):\n return\n ip = pkt.getlayer(IP)\n if s1 in [ip.src, ip.dst]:\n if ip.dst == s1:\n x1 += pkt.getlayer(conf.raw_layer).load[12:]\n else:\n x2 += pkt.getlayer(conf.raw_layer).load[12:]\n x1, x2, r = _merge_sound_bytes(x1, x2)\n dsp.write(r)\n\n try:\n sniff(store=0, prn=play, **kargs)\n finally:\n try:\n dsp.close()\n rd.close()\n except Exception:\n pass\n\n\ndef voip_play3(lst=None, **kargs):\n \"\"\"Same than voip_play, but made to\n read and play VoIP RTP packets, without\n checking IP.\n\n .. seealso:: voip_play\n for basic VoIP packets\n \"\"\"\n proc = subprocess.Popen(sox_base[0] + sox_base[1], stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n dsp, rd = proc.stdin, proc.stdout\n\n def play(pkt, dsp=dsp):\n if pkt and pkt.haslayer(UDP) and pkt.haslayer(RTP):\n dsp.write(pkt.getlayer(RTP).load)\n try:\n if lst is None:\n sniff(store=0, prn=play, **kargs)\n else:\n for p in lst:\n play(p)\n finally:\n try:\n dsp.close()\n rd.close()\n except Exception:\n pass\n", "path": "scapy/modules/voip.py"}]}
1,915
692
gh_patches_debug_40329
rasdani/github-patches
git_diff
Kinto__kinto-1943
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add a clear cache command to kinto For debugging purposes mainly we want to be able to clear the cache on time to time Add a clear cache command to kinto For debugging purposes mainly we want to be able to clear the cache on time to time Add a clear cache command to kinto For debugging purposes mainly we want to be able to clear the cache on time to time --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kinto/core/scripts.py` Content: ``` 1 """ 2 kinto.core.scripts: utilities to build admin scripts for kinto-based services 3 """ 4 import logging 5 6 from pyramid.settings import asbool 7 8 9 logger = logging.getLogger(__name__) 10 11 12 def migrate(env, dry_run=False): 13 """ 14 User-friendly frontend to run database migrations. 15 """ 16 registry = env["registry"] 17 settings = registry.settings 18 readonly_backends = ("storage", "permission") 19 readonly_mode = asbool(settings.get("readonly", False)) 20 21 for backend in ("cache", "storage", "permission"): 22 if hasattr(registry, backend): 23 if readonly_mode and backend in readonly_backends: 24 message = f"Cannot migrate the {backend} backend while in readonly mode." 25 logger.error(message) 26 else: 27 getattr(registry, backend).initialize_schema(dry_run=dry_run) 28 ``` Path: `kinto/__main__.py` Content: ``` 1 import argparse 2 import os 3 import subprocess 4 import sys 5 import logging 6 import logging.config 7 8 from kinto.core import scripts as core_scripts 9 from kinto import scripts as kinto_scripts 10 from kinto.plugins.accounts import scripts as accounts_scripts 11 from pyramid.scripts import pserve 12 from pyramid.paster import bootstrap 13 from kinto import __version__ 14 from kinto.config import init 15 16 DEFAULT_CONFIG_FILE = os.getenv("KINTO_INI", "config/kinto.ini") 17 DEFAULT_PORT = 8888 18 DEFAULT_LOG_LEVEL = logging.INFO 19 DEFAULT_LOG_FORMAT = "%(levelname)-5.5s %(message)s" 20 21 22 def main(args=None): 23 """The main routine.""" 24 if args is None: 25 args = sys.argv[1:] 26 27 parser = argparse.ArgumentParser(description="Kinto Command-Line " "Interface") 28 commands = ( 29 "init", 30 "start", 31 "migrate", 32 "delete-collection", 33 "version", 34 "rebuild-quotas", 35 "create-user", 36 ) 37 subparsers = parser.add_subparsers( 38 title="subcommands", 39 description="Main Kinto CLI commands", 40 dest="subcommand", 41 help="Choose and run with --help", 42 ) 43 subparsers.required = True 44 45 for command in commands: 46 subparser = subparsers.add_parser(command) 47 subparser.set_defaults(which=command) 48 49 subparser.add_argument( 50 "--ini", 51 help="Application configuration file", 52 dest="ini_file", 53 required=False, 54 default=DEFAULT_CONFIG_FILE, 55 ) 56 57 subparser.add_argument( 58 "-q", 59 "--quiet", 60 action="store_const", 61 const=logging.CRITICAL, 62 dest="verbosity", 63 help="Show only critical errors.", 64 ) 65 66 subparser.add_argument( 67 "-v", 68 "--debug", 69 action="store_const", 70 const=logging.DEBUG, 71 dest="verbosity", 72 help="Show all messages, including debug messages.", 73 ) 74 75 if command == "init": 76 subparser.add_argument( 77 "--backend", 78 help="{memory,redis,postgresql}", 79 dest="backend", 80 required=False, 81 default=None, 82 ) 83 subparser.add_argument( 84 "--cache-backend", 85 help="{memory,redis,postgresql,memcached}", 86 dest="cache-backend", 87 required=False, 88 default=None, 89 ) 90 subparser.add_argument( 91 "--host", 92 help="Host to listen() on.", 93 dest="host", 94 required=False, 95 default="127.0.0.1", 96 ) 97 elif command == "migrate": 98 subparser.add_argument( 99 "--dry-run", 100 action="store_true", 101 help="Simulate the migration operations " "and show information", 102 dest="dry_run", 103 required=False, 104 default=False, 105 ) 106 elif command == "delete-collection": 107 subparser.add_argument( 108 "--bucket", help="The bucket where the collection " "belongs to.", required=True 109 ) 110 subparser.add_argument("--collection", help="The collection to remove.", required=True) 111 112 elif command == "rebuild-quotas": 113 subparser.add_argument( 114 "--dry-run", 115 action="store_true", 116 help="Simulate the rebuild operation " "and show information", 117 dest="dry_run", 118 required=False, 119 default=False, 120 ) 121 122 elif command == "start": 123 subparser.add_argument( 124 "--reload", 125 action="store_true", 126 help="Restart when code or config changes", 127 required=False, 128 default=False, 129 ) 130 subparser.add_argument( 131 "--port", 132 type=int, 133 help="Listening port number", 134 required=False, 135 default=DEFAULT_PORT, 136 ) 137 138 elif command == "create-user": 139 subparser.add_argument( 140 "-u", "--username", help="Superuser username", required=False, default=None 141 ) 142 subparser.add_argument( 143 "-p", "--password", help="Superuser password", required=False, default=None 144 ) 145 146 # Parse command-line arguments 147 parsed_args = vars(parser.parse_args(args)) 148 149 config_file = parsed_args["ini_file"] 150 which_command = parsed_args["which"] 151 152 # Initialize logging from 153 level = parsed_args.get("verbosity") or DEFAULT_LOG_LEVEL 154 logging.basicConfig(level=level, format=DEFAULT_LOG_FORMAT) 155 156 if which_command == "init": 157 if os.path.exists(config_file): 158 print(f"{config_file} already exists.", file=sys.stderr) 159 return 1 160 161 backend = parsed_args["backend"] 162 cache_backend = parsed_args["cache-backend"] 163 if not backend: 164 while True: 165 prompt = ( 166 "Select the backend you would like to use: " 167 "(1 - postgresql, 2 - redis, default - memory) " 168 ) 169 answer = input(prompt).strip() 170 try: 171 backends = {"1": "postgresql", "2": "redis", "": "memory"} 172 backend = backends[answer] 173 break 174 except KeyError: 175 pass 176 177 if not cache_backend: 178 while True: 179 prompt = ( 180 "Select the cache backend you would like to use: " 181 "(1 - postgresql, 2 - redis, 3 - memcached, default - memory) " 182 ) 183 answer = input(prompt).strip() 184 try: 185 cache_backends = { 186 "1": "postgresql", 187 "2": "redis", 188 "3": "memcached", 189 "": "memory", 190 } 191 cache_backend = cache_backends[answer] 192 break 193 except KeyError: 194 pass 195 196 init(config_file, backend, cache_backend, parsed_args["host"]) 197 198 # Install postgresql libraries if necessary 199 if backend == "postgresql" or cache_backend == "postgresql": 200 try: 201 import psycopg2 # NOQA 202 except ImportError: 203 subprocess.check_call( 204 [sys.executable, "-m", "pip", "install", "kinto[postgresql]"] 205 ) 206 elif backend == "redis" or cache_backend == "redis": 207 try: 208 import kinto_redis # NOQA 209 except ImportError: 210 subprocess.check_call([sys.executable, "-m", "pip", "install", "kinto[redis]"]) 211 elif cache_backend == "memcached": 212 try: 213 import memcache # NOQA 214 except ImportError: 215 subprocess.check_call([sys.executable, "-m", "pip", "install", "kinto[memcached]"]) 216 217 elif which_command == "migrate": 218 dry_run = parsed_args["dry_run"] 219 env = bootstrap(config_file, options={"command": "migrate"}) 220 core_scripts.migrate(env, dry_run=dry_run) 221 222 elif which_command == "delete-collection": 223 env = bootstrap(config_file, options={"command": "delete-collection"}) 224 return kinto_scripts.delete_collection( 225 env, parsed_args["bucket"], parsed_args["collection"] 226 ) 227 228 elif which_command == "rebuild-quotas": 229 dry_run = parsed_args["dry_run"] 230 env = bootstrap(config_file, options={"command": "rebuild-quotas"}) 231 return kinto_scripts.rebuild_quotas(env, dry_run=dry_run) 232 233 elif which_command == "create-user": 234 username = parsed_args["username"] 235 password = parsed_args["password"] 236 env = bootstrap(config_file, options={"command": "create-user"}) 237 return accounts_scripts.create_user(env, username=username, password=password) 238 239 elif which_command == "start": 240 pserve_argv = ["pserve"] 241 242 if parsed_args["reload"]: 243 pserve_argv.append("--reload") 244 245 if level == logging.DEBUG: 246 pserve_argv.append("-v") 247 248 if level == logging.CRITICAL: 249 pserve_argv.append("-q") 250 251 pserve_argv.append(config_file) 252 pserve_argv.append(f"http_port={parsed_args['port']}") 253 pserve.main(argv=pserve_argv) 254 255 else: 256 print(__version__) 257 258 return 0 259 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kinto/__main__.py b/kinto/__main__.py --- a/kinto/__main__.py +++ b/kinto/__main__.py @@ -24,12 +24,13 @@ if args is None: args = sys.argv[1:] - parser = argparse.ArgumentParser(description="Kinto Command-Line " "Interface") + parser = argparse.ArgumentParser(description="Kinto Command-Line Interface") commands = ( "init", "start", "migrate", "delete-collection", + "flush-cache", "version", "rebuild-quotas", "create-user", @@ -94,26 +95,34 @@ required=False, default="127.0.0.1", ) + elif command == "migrate": subparser.add_argument( "--dry-run", action="store_true", - help="Simulate the migration operations " "and show information", + help="Simulate the migration operations and show information", dest="dry_run", required=False, default=False, ) + elif command == "delete-collection": subparser.add_argument( - "--bucket", help="The bucket where the collection " "belongs to.", required=True + "--bucket", + help="The bucket where the collection belongs to.", + required=True + ) + subparser.add_argument( + "--collection", + help="The collection to remove.", + required=True ) - subparser.add_argument("--collection", help="The collection to remove.", required=True) elif command == "rebuild-quotas": subparser.add_argument( "--dry-run", action="store_true", - help="Simulate the rebuild operation " "and show information", + help="Simulate the rebuild operation and show information", dest="dry_run", required=False, default=False, @@ -225,6 +234,10 @@ env, parsed_args["bucket"], parsed_args["collection"] ) + elif which_command == "flush-cache": + env = bootstrap(config_file, options={"command": "flush-cache"}) + core_scripts.flush_cache(env) + elif which_command == "rebuild-quotas": dry_run = parsed_args["dry_run"] env = bootstrap(config_file, options={"command": "rebuild-quotas"}) diff --git a/kinto/core/scripts.py b/kinto/core/scripts.py --- a/kinto/core/scripts.py +++ b/kinto/core/scripts.py @@ -25,3 +25,10 @@ logger.error(message) else: getattr(registry, backend).initialize_schema(dry_run=dry_run) + + +def flush_cache(env): + registry = env["registry"] + registry.cache.flush() + logger.info(f"Cache has been cleared.") + return 0
{"golden_diff": "diff --git a/kinto/__main__.py b/kinto/__main__.py\n--- a/kinto/__main__.py\n+++ b/kinto/__main__.py\n@@ -24,12 +24,13 @@\n if args is None:\n args = sys.argv[1:]\n \n- parser = argparse.ArgumentParser(description=\"Kinto Command-Line \" \"Interface\")\n+ parser = argparse.ArgumentParser(description=\"Kinto Command-Line Interface\")\n commands = (\n \"init\",\n \"start\",\n \"migrate\",\n \"delete-collection\",\n+ \"flush-cache\",\n \"version\",\n \"rebuild-quotas\",\n \"create-user\",\n@@ -94,26 +95,34 @@\n required=False,\n default=\"127.0.0.1\",\n )\n+ \n elif command == \"migrate\":\n subparser.add_argument(\n \"--dry-run\",\n action=\"store_true\",\n- help=\"Simulate the migration operations \" \"and show information\",\n+ help=\"Simulate the migration operations and show information\",\n dest=\"dry_run\",\n required=False,\n default=False,\n )\n+\n elif command == \"delete-collection\":\n subparser.add_argument(\n- \"--bucket\", help=\"The bucket where the collection \" \"belongs to.\", required=True\n+ \"--bucket\",\n+ help=\"The bucket where the collection belongs to.\",\n+ required=True\n+ )\n+ subparser.add_argument(\n+ \"--collection\",\n+ help=\"The collection to remove.\",\n+ required=True\n )\n- subparser.add_argument(\"--collection\", help=\"The collection to remove.\", required=True)\n \n elif command == \"rebuild-quotas\":\n subparser.add_argument(\n \"--dry-run\",\n action=\"store_true\",\n- help=\"Simulate the rebuild operation \" \"and show information\",\n+ help=\"Simulate the rebuild operation and show information\",\n dest=\"dry_run\",\n required=False,\n default=False,\n@@ -225,6 +234,10 @@\n env, parsed_args[\"bucket\"], parsed_args[\"collection\"]\n )\n \n+ elif which_command == \"flush-cache\":\n+ env = bootstrap(config_file, options={\"command\": \"flush-cache\"})\n+ core_scripts.flush_cache(env)\n+\n elif which_command == \"rebuild-quotas\":\n dry_run = parsed_args[\"dry_run\"]\n env = bootstrap(config_file, options={\"command\": \"rebuild-quotas\"})\ndiff --git a/kinto/core/scripts.py b/kinto/core/scripts.py\n--- a/kinto/core/scripts.py\n+++ b/kinto/core/scripts.py\n@@ -25,3 +25,10 @@\n logger.error(message)\n else:\n getattr(registry, backend).initialize_schema(dry_run=dry_run)\n+\n+\n+def flush_cache(env):\n+ registry = env[\"registry\"]\n+ registry.cache.flush()\n+ logger.info(f\"Cache has been cleared.\")\n+ return 0\n", "issue": "Add a clear cache command to kinto\nFor debugging purposes mainly we want to be able to clear the cache on time to time\nAdd a clear cache command to kinto\nFor debugging purposes mainly we want to be able to clear the cache on time to time\nAdd a clear cache command to kinto\nFor debugging purposes mainly we want to be able to clear the cache on time to time\n", "before_files": [{"content": "\"\"\"\nkinto.core.scripts: utilities to build admin scripts for kinto-based services\n\"\"\"\nimport logging\n\nfrom pyramid.settings import asbool\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef migrate(env, dry_run=False):\n \"\"\"\n User-friendly frontend to run database migrations.\n \"\"\"\n registry = env[\"registry\"]\n settings = registry.settings\n readonly_backends = (\"storage\", \"permission\")\n readonly_mode = asbool(settings.get(\"readonly\", False))\n\n for backend in (\"cache\", \"storage\", \"permission\"):\n if hasattr(registry, backend):\n if readonly_mode and backend in readonly_backends:\n message = f\"Cannot migrate the {backend} backend while in readonly mode.\"\n logger.error(message)\n else:\n getattr(registry, backend).initialize_schema(dry_run=dry_run)\n", "path": "kinto/core/scripts.py"}, {"content": "import argparse\nimport os\nimport subprocess\nimport sys\nimport logging\nimport logging.config\n\nfrom kinto.core import scripts as core_scripts\nfrom kinto import scripts as kinto_scripts\nfrom kinto.plugins.accounts import scripts as accounts_scripts\nfrom pyramid.scripts import pserve\nfrom pyramid.paster import bootstrap\nfrom kinto import __version__\nfrom kinto.config import init\n\nDEFAULT_CONFIG_FILE = os.getenv(\"KINTO_INI\", \"config/kinto.ini\")\nDEFAULT_PORT = 8888\nDEFAULT_LOG_LEVEL = logging.INFO\nDEFAULT_LOG_FORMAT = \"%(levelname)-5.5s %(message)s\"\n\n\ndef main(args=None):\n \"\"\"The main routine.\"\"\"\n if args is None:\n args = sys.argv[1:]\n\n parser = argparse.ArgumentParser(description=\"Kinto Command-Line \" \"Interface\")\n commands = (\n \"init\",\n \"start\",\n \"migrate\",\n \"delete-collection\",\n \"version\",\n \"rebuild-quotas\",\n \"create-user\",\n )\n subparsers = parser.add_subparsers(\n title=\"subcommands\",\n description=\"Main Kinto CLI commands\",\n dest=\"subcommand\",\n help=\"Choose and run with --help\",\n )\n subparsers.required = True\n\n for command in commands:\n subparser = subparsers.add_parser(command)\n subparser.set_defaults(which=command)\n\n subparser.add_argument(\n \"--ini\",\n help=\"Application configuration file\",\n dest=\"ini_file\",\n required=False,\n default=DEFAULT_CONFIG_FILE,\n )\n\n subparser.add_argument(\n \"-q\",\n \"--quiet\",\n action=\"store_const\",\n const=logging.CRITICAL,\n dest=\"verbosity\",\n help=\"Show only critical errors.\",\n )\n\n subparser.add_argument(\n \"-v\",\n \"--debug\",\n action=\"store_const\",\n const=logging.DEBUG,\n dest=\"verbosity\",\n help=\"Show all messages, including debug messages.\",\n )\n\n if command == \"init\":\n subparser.add_argument(\n \"--backend\",\n help=\"{memory,redis,postgresql}\",\n dest=\"backend\",\n required=False,\n default=None,\n )\n subparser.add_argument(\n \"--cache-backend\",\n help=\"{memory,redis,postgresql,memcached}\",\n dest=\"cache-backend\",\n required=False,\n default=None,\n )\n subparser.add_argument(\n \"--host\",\n help=\"Host to listen() on.\",\n dest=\"host\",\n required=False,\n default=\"127.0.0.1\",\n )\n elif command == \"migrate\":\n subparser.add_argument(\n \"--dry-run\",\n action=\"store_true\",\n help=\"Simulate the migration operations \" \"and show information\",\n dest=\"dry_run\",\n required=False,\n default=False,\n )\n elif command == \"delete-collection\":\n subparser.add_argument(\n \"--bucket\", help=\"The bucket where the collection \" \"belongs to.\", required=True\n )\n subparser.add_argument(\"--collection\", help=\"The collection to remove.\", required=True)\n\n elif command == \"rebuild-quotas\":\n subparser.add_argument(\n \"--dry-run\",\n action=\"store_true\",\n help=\"Simulate the rebuild operation \" \"and show information\",\n dest=\"dry_run\",\n required=False,\n default=False,\n )\n\n elif command == \"start\":\n subparser.add_argument(\n \"--reload\",\n action=\"store_true\",\n help=\"Restart when code or config changes\",\n required=False,\n default=False,\n )\n subparser.add_argument(\n \"--port\",\n type=int,\n help=\"Listening port number\",\n required=False,\n default=DEFAULT_PORT,\n )\n\n elif command == \"create-user\":\n subparser.add_argument(\n \"-u\", \"--username\", help=\"Superuser username\", required=False, default=None\n )\n subparser.add_argument(\n \"-p\", \"--password\", help=\"Superuser password\", required=False, default=None\n )\n\n # Parse command-line arguments\n parsed_args = vars(parser.parse_args(args))\n\n config_file = parsed_args[\"ini_file\"]\n which_command = parsed_args[\"which\"]\n\n # Initialize logging from\n level = parsed_args.get(\"verbosity\") or DEFAULT_LOG_LEVEL\n logging.basicConfig(level=level, format=DEFAULT_LOG_FORMAT)\n\n if which_command == \"init\":\n if os.path.exists(config_file):\n print(f\"{config_file} already exists.\", file=sys.stderr)\n return 1\n\n backend = parsed_args[\"backend\"]\n cache_backend = parsed_args[\"cache-backend\"]\n if not backend:\n while True:\n prompt = (\n \"Select the backend you would like to use: \"\n \"(1 - postgresql, 2 - redis, default - memory) \"\n )\n answer = input(prompt).strip()\n try:\n backends = {\"1\": \"postgresql\", \"2\": \"redis\", \"\": \"memory\"}\n backend = backends[answer]\n break\n except KeyError:\n pass\n\n if not cache_backend:\n while True:\n prompt = (\n \"Select the cache backend you would like to use: \"\n \"(1 - postgresql, 2 - redis, 3 - memcached, default - memory) \"\n )\n answer = input(prompt).strip()\n try:\n cache_backends = {\n \"1\": \"postgresql\",\n \"2\": \"redis\",\n \"3\": \"memcached\",\n \"\": \"memory\",\n }\n cache_backend = cache_backends[answer]\n break\n except KeyError:\n pass\n\n init(config_file, backend, cache_backend, parsed_args[\"host\"])\n\n # Install postgresql libraries if necessary\n if backend == \"postgresql\" or cache_backend == \"postgresql\":\n try:\n import psycopg2 # NOQA\n except ImportError:\n subprocess.check_call(\n [sys.executable, \"-m\", \"pip\", \"install\", \"kinto[postgresql]\"]\n )\n elif backend == \"redis\" or cache_backend == \"redis\":\n try:\n import kinto_redis # NOQA\n except ImportError:\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"kinto[redis]\"])\n elif cache_backend == \"memcached\":\n try:\n import memcache # NOQA\n except ImportError:\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"kinto[memcached]\"])\n\n elif which_command == \"migrate\":\n dry_run = parsed_args[\"dry_run\"]\n env = bootstrap(config_file, options={\"command\": \"migrate\"})\n core_scripts.migrate(env, dry_run=dry_run)\n\n elif which_command == \"delete-collection\":\n env = bootstrap(config_file, options={\"command\": \"delete-collection\"})\n return kinto_scripts.delete_collection(\n env, parsed_args[\"bucket\"], parsed_args[\"collection\"]\n )\n\n elif which_command == \"rebuild-quotas\":\n dry_run = parsed_args[\"dry_run\"]\n env = bootstrap(config_file, options={\"command\": \"rebuild-quotas\"})\n return kinto_scripts.rebuild_quotas(env, dry_run=dry_run)\n\n elif which_command == \"create-user\":\n username = parsed_args[\"username\"]\n password = parsed_args[\"password\"]\n env = bootstrap(config_file, options={\"command\": \"create-user\"})\n return accounts_scripts.create_user(env, username=username, password=password)\n\n elif which_command == \"start\":\n pserve_argv = [\"pserve\"]\n\n if parsed_args[\"reload\"]:\n pserve_argv.append(\"--reload\")\n\n if level == logging.DEBUG:\n pserve_argv.append(\"-v\")\n\n if level == logging.CRITICAL:\n pserve_argv.append(\"-q\")\n\n pserve_argv.append(config_file)\n pserve_argv.append(f\"http_port={parsed_args['port']}\")\n pserve.main(argv=pserve_argv)\n\n else:\n print(__version__)\n\n return 0\n", "path": "kinto/__main__.py"}], "after_files": [{"content": "\"\"\"\nkinto.core.scripts: utilities to build admin scripts for kinto-based services\n\"\"\"\nimport logging\n\nfrom pyramid.settings import asbool\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef migrate(env, dry_run=False):\n \"\"\"\n User-friendly frontend to run database migrations.\n \"\"\"\n registry = env[\"registry\"]\n settings = registry.settings\n readonly_backends = (\"storage\", \"permission\")\n readonly_mode = asbool(settings.get(\"readonly\", False))\n\n for backend in (\"cache\", \"storage\", \"permission\"):\n if hasattr(registry, backend):\n if readonly_mode and backend in readonly_backends:\n message = f\"Cannot migrate the {backend} backend while in readonly mode.\"\n logger.error(message)\n else:\n getattr(registry, backend).initialize_schema(dry_run=dry_run)\n\n\ndef flush_cache(env):\n registry = env[\"registry\"]\n registry.cache.flush()\n logger.info(f\"Cache has been cleared.\")\n return 0\n", "path": "kinto/core/scripts.py"}, {"content": "import argparse\nimport os\nimport subprocess\nimport sys\nimport logging\nimport logging.config\n\nfrom kinto.core import scripts as core_scripts\nfrom kinto import scripts as kinto_scripts\nfrom kinto.plugins.accounts import scripts as accounts_scripts\nfrom pyramid.scripts import pserve\nfrom pyramid.paster import bootstrap\nfrom kinto import __version__\nfrom kinto.config import init\n\nDEFAULT_CONFIG_FILE = os.getenv(\"KINTO_INI\", \"config/kinto.ini\")\nDEFAULT_PORT = 8888\nDEFAULT_LOG_LEVEL = logging.INFO\nDEFAULT_LOG_FORMAT = \"%(levelname)-5.5s %(message)s\"\n\n\ndef main(args=None):\n \"\"\"The main routine.\"\"\"\n if args is None:\n args = sys.argv[1:]\n\n parser = argparse.ArgumentParser(description=\"Kinto Command-Line Interface\")\n commands = (\n \"init\",\n \"start\",\n \"migrate\",\n \"delete-collection\",\n \"flush-cache\",\n \"version\",\n \"rebuild-quotas\",\n \"create-user\",\n )\n subparsers = parser.add_subparsers(\n title=\"subcommands\",\n description=\"Main Kinto CLI commands\",\n dest=\"subcommand\",\n help=\"Choose and run with --help\",\n )\n subparsers.required = True\n\n for command in commands:\n subparser = subparsers.add_parser(command)\n subparser.set_defaults(which=command)\n\n subparser.add_argument(\n \"--ini\",\n help=\"Application configuration file\",\n dest=\"ini_file\",\n required=False,\n default=DEFAULT_CONFIG_FILE,\n )\n\n subparser.add_argument(\n \"-q\",\n \"--quiet\",\n action=\"store_const\",\n const=logging.CRITICAL,\n dest=\"verbosity\",\n help=\"Show only critical errors.\",\n )\n\n subparser.add_argument(\n \"-v\",\n \"--debug\",\n action=\"store_const\",\n const=logging.DEBUG,\n dest=\"verbosity\",\n help=\"Show all messages, including debug messages.\",\n )\n\n if command == \"init\":\n subparser.add_argument(\n \"--backend\",\n help=\"{memory,redis,postgresql}\",\n dest=\"backend\",\n required=False,\n default=None,\n )\n subparser.add_argument(\n \"--cache-backend\",\n help=\"{memory,redis,postgresql,memcached}\",\n dest=\"cache-backend\",\n required=False,\n default=None,\n )\n subparser.add_argument(\n \"--host\",\n help=\"Host to listen() on.\",\n dest=\"host\",\n required=False,\n default=\"127.0.0.1\",\n )\n \n elif command == \"migrate\":\n subparser.add_argument(\n \"--dry-run\",\n action=\"store_true\",\n help=\"Simulate the migration operations and show information\",\n dest=\"dry_run\",\n required=False,\n default=False,\n )\n\n elif command == \"delete-collection\":\n subparser.add_argument(\n \"--bucket\",\n help=\"The bucket where the collection belongs to.\",\n required=True\n )\n subparser.add_argument(\n \"--collection\",\n help=\"The collection to remove.\",\n required=True\n )\n\n elif command == \"rebuild-quotas\":\n subparser.add_argument(\n \"--dry-run\",\n action=\"store_true\",\n help=\"Simulate the rebuild operation and show information\",\n dest=\"dry_run\",\n required=False,\n default=False,\n )\n\n elif command == \"start\":\n subparser.add_argument(\n \"--reload\",\n action=\"store_true\",\n help=\"Restart when code or config changes\",\n required=False,\n default=False,\n )\n subparser.add_argument(\n \"--port\",\n type=int,\n help=\"Listening port number\",\n required=False,\n default=DEFAULT_PORT,\n )\n\n elif command == \"create-user\":\n subparser.add_argument(\n \"-u\", \"--username\", help=\"Superuser username\", required=False, default=None\n )\n subparser.add_argument(\n \"-p\", \"--password\", help=\"Superuser password\", required=False, default=None\n )\n\n # Parse command-line arguments\n parsed_args = vars(parser.parse_args(args))\n\n config_file = parsed_args[\"ini_file\"]\n which_command = parsed_args[\"which\"]\n\n # Initialize logging from\n level = parsed_args.get(\"verbosity\") or DEFAULT_LOG_LEVEL\n logging.basicConfig(level=level, format=DEFAULT_LOG_FORMAT)\n\n if which_command == \"init\":\n if os.path.exists(config_file):\n print(f\"{config_file} already exists.\", file=sys.stderr)\n return 1\n\n backend = parsed_args[\"backend\"]\n cache_backend = parsed_args[\"cache-backend\"]\n if not backend:\n while True:\n prompt = (\n \"Select the backend you would like to use: \"\n \"(1 - postgresql, 2 - redis, default - memory) \"\n )\n answer = input(prompt).strip()\n try:\n backends = {\"1\": \"postgresql\", \"2\": \"redis\", \"\": \"memory\"}\n backend = backends[answer]\n break\n except KeyError:\n pass\n\n if not cache_backend:\n while True:\n prompt = (\n \"Select the cache backend you would like to use: \"\n \"(1 - postgresql, 2 - redis, 3 - memcached, default - memory) \"\n )\n answer = input(prompt).strip()\n try:\n cache_backends = {\n \"1\": \"postgresql\",\n \"2\": \"redis\",\n \"3\": \"memcached\",\n \"\": \"memory\",\n }\n cache_backend = cache_backends[answer]\n break\n except KeyError:\n pass\n\n init(config_file, backend, cache_backend, parsed_args[\"host\"])\n\n # Install postgresql libraries if necessary\n if backend == \"postgresql\" or cache_backend == \"postgresql\":\n try:\n import psycopg2 # NOQA\n except ImportError:\n subprocess.check_call(\n [sys.executable, \"-m\", \"pip\", \"install\", \"kinto[postgresql]\"]\n )\n elif backend == \"redis\" or cache_backend == \"redis\":\n try:\n import kinto_redis # NOQA\n except ImportError:\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"kinto[redis]\"])\n elif cache_backend == \"memcached\":\n try:\n import memcache # NOQA\n except ImportError:\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"kinto[memcached]\"])\n\n elif which_command == \"migrate\":\n dry_run = parsed_args[\"dry_run\"]\n env = bootstrap(config_file, options={\"command\": \"migrate\"})\n core_scripts.migrate(env, dry_run=dry_run)\n\n elif which_command == \"delete-collection\":\n env = bootstrap(config_file, options={\"command\": \"delete-collection\"})\n return kinto_scripts.delete_collection(\n env, parsed_args[\"bucket\"], parsed_args[\"collection\"]\n )\n\n elif which_command == \"flush-cache\":\n env = bootstrap(config_file, options={\"command\": \"flush-cache\"})\n core_scripts.flush_cache(env)\n\n elif which_command == \"rebuild-quotas\":\n dry_run = parsed_args[\"dry_run\"]\n env = bootstrap(config_file, options={\"command\": \"rebuild-quotas\"})\n return kinto_scripts.rebuild_quotas(env, dry_run=dry_run)\n\n elif which_command == \"create-user\":\n username = parsed_args[\"username\"]\n password = parsed_args[\"password\"]\n env = bootstrap(config_file, options={\"command\": \"create-user\"})\n return accounts_scripts.create_user(env, username=username, password=password)\n\n elif which_command == \"start\":\n pserve_argv = [\"pserve\"]\n\n if parsed_args[\"reload\"]:\n pserve_argv.append(\"--reload\")\n\n if level == logging.DEBUG:\n pserve_argv.append(\"-v\")\n\n if level == logging.CRITICAL:\n pserve_argv.append(\"-q\")\n\n pserve_argv.append(config_file)\n pserve_argv.append(f\"http_port={parsed_args['port']}\")\n pserve.main(argv=pserve_argv)\n\n else:\n print(__version__)\n\n return 0\n", "path": "kinto/__main__.py"}]}
2,949
636
gh_patches_debug_9038
rasdani/github-patches
git_diff
beeware__toga-2384
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- iOS Camera take_photo ### Describe the bug When using the documented: ```python3 if await app.camera.request_permission(): photo = await app.camera.take_photo() ``` ### Steps to reproduce 1. request permission to use camera 2. await the take_photo function. ### Expected behavior Return a toga.Image or None as documented here https://toga.readthedocs.io/en/stable/reference/api/hardware/camera.html#toga.hardware.camera.Camera.take_photo ### Screenshots Successfully get permission to use camera: <img width="378" alt="Screenshot 2024-02-08 at 22 14 03" src="https://github.com/beeware/toga/assets/25908768/5867a18e-b5c5-4141-bd72-a4b737c0fbd1"> No camera screen comes up. ### Environment - Operating System: iOS 16.4 & 17.2 (built and development from Macbook Air M1 macOS 14.3.1) - Python version: 3.10.2 - Software versions: - Briefcase: 0.3.17 - Toga: 4.0.2 ### Logs Simulator Log ``` Error in async handler: 'NoneType' object has no attribute 'result' Traceback (most recent call last): File "/Users/j/Library/Developer/CoreSimulator/Devices/6EAED83E-F2F7-4497-8F74-52F3DC368DA2/data/Containers/Bundle/Application/E864BC86-AFF3-410A-A4CF-D594ADB4AFCA/Health App.app/app_packages/toga/handlers.py", line 38, in handler_with_cleanup result = await handler(interface, *args, **kwargs) File "/Users/j/Library/Developer/CoreSimulator/Devices/6EAED83E-F2F7-4497-8F74-52F3DC368DA2/data/Containers/Bundle/Application/E864BC86-AFF3-410A-A4CF-D594ADB4AFCA/Health App.app/app/healthapp/app.py", line 71, in analyse_gait_handler photo = await self.app.camera.take_photo() File "/Users/j/Library/Developer/CoreSimulator/Devices/6EAED83E-F2F7-4497-8F74-52F3DC368DA2/data/Containers/Bundle/Application/E864BC86-AFF3-410A-A4CF-D594ADB4AFCA/Health App.app/app_packages/toga/hardware/camera.py", line 122, in take_photo self._impl.take_photo(photo, device=device, flash=flash) File "/Users/j/Library/Developer/CoreSimulator/Devices/6EAED83E-F2F7-4497-8F74-52F3DC368DA2/data/Containers/Bundle/Application/E864BC86-AFF3-410A-A4CF-D594ADB4AFCA/Health App.app/app_packages/toga_iOS/hardware/camera.py", line 159, in take_photo self.native.delegate.result = result AttributeError: 'NoneType' object has no attribute 'result' ``` Physical iOS device testing log: ``` 2024-02-08 22:27:03.097595+0000 Health App[2216:513956] [TraitCollection] Class CKBrowserSwitcherViewController overrides the -traitCollection getter, which is not supported. If you're trying to override traits, you must use the appropriate API. 2024-02-08 22:27:03.179852+0000 Health App[2216:513956] Error in async handler: 'NoneType' object has no attribute 'result' 2024-02-08 22:27:03.183351+0000 Health App[2216:513956] Traceback (most recent call last): 2024-02-08 22:27:03.183705+0000 Health App[2216:513956] File "/private/var/containers/Bundle/Application/22F0E7F3-012A-46ED-93DE-C8EC8125EE19/Health App.app/app_packages/toga/handlers.py", line 38, in handler_with_cleanup 2024-02-08 22:27:03.183927+0000 Health App[2216:513956] result = await handler(interface, *args, **kwargs) 2024-02-08 22:27:03.184161+0000 Health App[2216:513956] File "/private/var/containers/Bundle/Application/22F0E7F3-012A-46ED-93DE-C8EC8125EE19/Health App.app/app/healthapp/app.py", line 71, in analyse_gait_handler 2024-02-08 22:27:03.184361+0000 Health App[2216:513956] photo = await self.app.camera.take_photo() 2024-02-08 22:27:03.184604+0000 Health App[2216:513956] File "/private/var/containers/Bundle/Application/22F0E7F3-012A-46ED-93DE-C8EC8125EE19/Health App.app/app_packages/toga/hardware/camera.py", line 122, in take_photo 2024-02-08 22:27:03.184827+0000 Health App[2216:513956] self._impl.take_photo(photo, device=device, flash=flash) 2024-02-08 22:27:03.185124+0000 Health App[2216:513956] File "/private/var/containers/Bundle/Application/22F0E7F3-012A-46ED-93DE-C8EC8125EE19/Health App.app/app_packages/toga_iOS/hardware/camera.py", line 159, in take_photo 2024-02-08 22:27:03.185537+0000 Health App[2216:513956] self.native.delegate.result = result 2024-02-08 22:27:03.185785+0000 Health App[2216:513956] AttributeError: 'NoneType' object has no attribute 'result' ``` ### Additional context Fully works on android, same problem on macOS I believe. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `iOS/src/toga_iOS/hardware/camera.py` Content: ``` 1 import warnings 2 3 from rubicon.objc import Block, NSObject, objc_method 4 5 import toga 6 from toga.constants import FlashMode 7 8 # for classes that need to be monkeypatched for testing 9 from toga_iOS import libs as iOS 10 from toga_iOS.libs import ( 11 AVAuthorizationStatus, 12 AVMediaTypeVideo, 13 NSBundle, 14 UIImagePickerControllerCameraCaptureMode, 15 UIImagePickerControllerCameraDevice, 16 UIImagePickerControllerCameraFlashMode, 17 UIImagePickerControllerSourceTypeCamera, 18 ) 19 20 21 class CameraDevice: 22 def __init__(self, id, name, native): 23 self._id = id 24 self._name = name 25 self.native = native 26 27 def id(self): 28 return self._id 29 30 def name(self): 31 return self._name 32 33 def has_flash(self): 34 return iOS.UIImagePickerController.isFlashAvailableForCameraDevice(self.native) 35 36 37 def native_flash_mode(flash): 38 return { 39 FlashMode.ON: UIImagePickerControllerCameraFlashMode.On, 40 FlashMode.OFF: UIImagePickerControllerCameraFlashMode.Off, 41 }.get(flash, UIImagePickerControllerCameraFlashMode.Auto) 42 43 44 # def native_video_quality(quality): 45 # return { 46 # VideoQuality.HIGH: UIImagePickerControllerQualityType.High, 47 # VideoQuality.LOW: UIImagePickerControllerQualityType.Low, 48 # }.get(quality, UIImagePickerControllerQualityType.Medium) 49 50 51 class TogaImagePickerDelegate(NSObject): 52 @objc_method 53 def imagePickerController_didFinishPickingMediaWithInfo_( 54 self, picker, info 55 ) -> None: 56 picker.dismissViewControllerAnimated(True, completion=None) 57 58 image = toga.Image(info["UIImagePickerControllerOriginalImage"]) 59 self.result.set_result(image) 60 61 @objc_method 62 def imagePickerControllerDidCancel_(self, picker) -> None: 63 picker.dismissViewControllerAnimated(True, completion=None) 64 self.result.set_result(None) 65 66 67 class Camera: 68 def __init__(self, interface): 69 self.interface = interface 70 71 if NSBundle.mainBundle.objectForInfoDictionaryKey("NSCameraUsageDescription"): 72 if iOS.UIImagePickerController.isSourceTypeAvailable( 73 UIImagePickerControllerSourceTypeCamera 74 ): 75 self.native = iOS.UIImagePickerController.new() 76 self.native.sourceType = UIImagePickerControllerSourceTypeCamera 77 self.native.delegate = TogaImagePickerDelegate.new() 78 else: 79 self.native = None 80 else: # pragma: no cover 81 # The app doesn't have the NSCameraUsageDescription key (e.g., via 82 # `permission.camera` in Briefcase). No-cover because we can't manufacture 83 # this condition in testing. 84 raise RuntimeError( 85 "Application metadata does not declare that the app will use the camera." 86 ) 87 88 def has_permission(self, allow_unknown=False): 89 if allow_unknown: 90 valid_values = { 91 AVAuthorizationStatus.Authorized.value, 92 AVAuthorizationStatus.NotDetermined.value, 93 } 94 else: 95 valid_values = {AVAuthorizationStatus.Authorized.value} 96 97 return ( 98 iOS.AVCaptureDevice.authorizationStatusForMediaType(AVMediaTypeVideo) 99 in valid_values 100 ) 101 102 def request_permission(self, future): 103 # This block is invoked when the permission is granted; however, permission is 104 # granted from a different (inaccessible) thread, so it isn't picked up by 105 # coverage. 106 def permission_complete(result) -> None: 107 future.set_result(result) 108 109 iOS.AVCaptureDevice.requestAccessForMediaType( 110 AVMediaTypeVideo, completionHandler=Block(permission_complete, None, bool) 111 ) 112 113 def get_devices(self): 114 return ( 115 [ 116 CameraDevice( 117 id="Rear", 118 name="Rear", 119 native=UIImagePickerControllerCameraDevice.Rear, 120 ) 121 ] 122 if iOS.UIImagePickerController.isCameraDeviceAvailable( 123 UIImagePickerControllerCameraDevice.Rear 124 ) 125 else [] 126 ) + ( 127 [ 128 CameraDevice( 129 id="Front", 130 name="Front", 131 native=UIImagePickerControllerCameraDevice.Front, 132 ) 133 ] 134 if iOS.UIImagePickerController.isCameraDeviceAvailable( 135 UIImagePickerControllerCameraDevice.Front 136 ) 137 else [] 138 ) 139 140 def take_photo(self, result, device, flash): 141 if self.native is None: 142 warnings.warn("No camera is available") 143 result.set_result(None) 144 elif self.has_permission(allow_unknown=True): 145 # Configure the controller to take a photo 146 self.native.cameraCaptureMode = ( 147 UIImagePickerControllerCameraCaptureMode.Photo 148 ) 149 150 self.native.showsCameraControls = True 151 self.native.cameraDevice = ( 152 device._impl.native 153 if device 154 else UIImagePickerControllerCameraDevice.Rear 155 ) 156 self.native.cameraFlashMode = native_flash_mode(flash) 157 158 # Attach the result to the delegate 159 self.native.delegate.result = result 160 161 # Show the pane 162 toga.App.app.current_window._impl.native.rootViewController.presentViewController( 163 self.native, animated=True, completion=None 164 ) 165 else: 166 raise PermissionError("App does not have permission to take photos") 167 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/iOS/src/toga_iOS/hardware/camera.py b/iOS/src/toga_iOS/hardware/camera.py --- a/iOS/src/toga_iOS/hardware/camera.py +++ b/iOS/src/toga_iOS/hardware/camera.py @@ -74,7 +74,8 @@ ): self.native = iOS.UIImagePickerController.new() self.native.sourceType = UIImagePickerControllerSourceTypeCamera - self.native.delegate = TogaImagePickerDelegate.new() + self.delegate_link = TogaImagePickerDelegate.new() + self.native.delegate = self.delegate_link else: self.native = None else: # pragma: no cover
{"golden_diff": "diff --git a/iOS/src/toga_iOS/hardware/camera.py b/iOS/src/toga_iOS/hardware/camera.py\n--- a/iOS/src/toga_iOS/hardware/camera.py\n+++ b/iOS/src/toga_iOS/hardware/camera.py\n@@ -74,7 +74,8 @@\n ):\n self.native = iOS.UIImagePickerController.new()\n self.native.sourceType = UIImagePickerControllerSourceTypeCamera\n- self.native.delegate = TogaImagePickerDelegate.new()\n+ self.delegate_link = TogaImagePickerDelegate.new()\n+ self.native.delegate = self.delegate_link\n else:\n self.native = None\n else: # pragma: no cover\n", "issue": "iOS Camera take_photo\n### Describe the bug\r\n\r\nWhen using the documented:\r\n\r\n```python3\r\nif await app.camera.request_permission():\r\n photo = await app.camera.take_photo()\r\n```\r\n\r\n### Steps to reproduce\r\n\r\n1. request permission to use camera\r\n2. await the take_photo function.\r\n\r\n\r\n### Expected behavior\r\n\r\nReturn a toga.Image or None as documented here https://toga.readthedocs.io/en/stable/reference/api/hardware/camera.html#toga.hardware.camera.Camera.take_photo\r\n\r\n### Screenshots\r\n\r\nSuccessfully get permission to use camera: <img width=\"378\" alt=\"Screenshot 2024-02-08 at 22 14 03\" src=\"https://github.com/beeware/toga/assets/25908768/5867a18e-b5c5-4141-bd72-a4b737c0fbd1\">\r\n\r\nNo camera screen comes up.\r\n\r\n\r\n### Environment\r\n\r\n- Operating System: iOS 16.4 & 17.2 (built and development from Macbook Air M1 macOS 14.3.1)\r\n- Python version: 3.10.2\r\n- Software versions:\r\n - Briefcase: 0.3.17\r\n - Toga: 4.0.2\r\n\r\n\r\n### Logs\r\n\r\nSimulator Log\r\n```\r\nError in async handler: 'NoneType' object has no attribute 'result'\r\nTraceback (most recent call last):\r\n File \"/Users/j/Library/Developer/CoreSimulator/Devices/6EAED83E-F2F7-4497-8F74-52F3DC368DA2/data/Containers/Bundle/Application/E864BC86-AFF3-410A-A4CF-D594ADB4AFCA/Health App.app/app_packages/toga/handlers.py\", line 38, in handler_with_cleanup\r\n result = await handler(interface, *args, **kwargs)\r\n File \"/Users/j/Library/Developer/CoreSimulator/Devices/6EAED83E-F2F7-4497-8F74-52F3DC368DA2/data/Containers/Bundle/Application/E864BC86-AFF3-410A-A4CF-D594ADB4AFCA/Health App.app/app/healthapp/app.py\", line 71, in analyse_gait_handler\r\n photo = await self.app.camera.take_photo()\r\n File \"/Users/j/Library/Developer/CoreSimulator/Devices/6EAED83E-F2F7-4497-8F74-52F3DC368DA2/data/Containers/Bundle/Application/E864BC86-AFF3-410A-A4CF-D594ADB4AFCA/Health App.app/app_packages/toga/hardware/camera.py\", line 122, in take_photo\r\n self._impl.take_photo(photo, device=device, flash=flash)\r\n File \"/Users/j/Library/Developer/CoreSimulator/Devices/6EAED83E-F2F7-4497-8F74-52F3DC368DA2/data/Containers/Bundle/Application/E864BC86-AFF3-410A-A4CF-D594ADB4AFCA/Health App.app/app_packages/toga_iOS/hardware/camera.py\", line 159, in take_photo\r\n self.native.delegate.result = result\r\nAttributeError: 'NoneType' object has no attribute 'result'\r\n```\r\n\r\n\r\nPhysical iOS device testing log:\r\n```\r\n2024-02-08 22:27:03.097595+0000 Health App[2216:513956] [TraitCollection] Class CKBrowserSwitcherViewController overrides the -traitCollection getter, which is not supported. If you're trying to override traits, you must use the appropriate API.\r\n2024-02-08 22:27:03.179852+0000 Health App[2216:513956] Error in async handler: 'NoneType' object has no attribute 'result'\r\n\r\n2024-02-08 22:27:03.183351+0000 Health App[2216:513956] Traceback (most recent call last):\r\n\r\n2024-02-08 22:27:03.183705+0000 Health App[2216:513956] File \"/private/var/containers/Bundle/Application/22F0E7F3-012A-46ED-93DE-C8EC8125EE19/Health App.app/app_packages/toga/handlers.py\", line 38, in handler_with_cleanup\r\n\r\n2024-02-08 22:27:03.183927+0000 Health App[2216:513956] result = await handler(interface, *args, **kwargs)\r\n\r\n2024-02-08 22:27:03.184161+0000 Health App[2216:513956] File \"/private/var/containers/Bundle/Application/22F0E7F3-012A-46ED-93DE-C8EC8125EE19/Health App.app/app/healthapp/app.py\", line 71, in analyse_gait_handler\r\n\r\n2024-02-08 22:27:03.184361+0000 Health App[2216:513956] photo = await self.app.camera.take_photo()\r\n\r\n2024-02-08 22:27:03.184604+0000 Health App[2216:513956] File \"/private/var/containers/Bundle/Application/22F0E7F3-012A-46ED-93DE-C8EC8125EE19/Health App.app/app_packages/toga/hardware/camera.py\", line 122, in take_photo\r\n\r\n2024-02-08 22:27:03.184827+0000 Health App[2216:513956] self._impl.take_photo(photo, device=device, flash=flash)\r\n\r\n2024-02-08 22:27:03.185124+0000 Health App[2216:513956] File \"/private/var/containers/Bundle/Application/22F0E7F3-012A-46ED-93DE-C8EC8125EE19/Health App.app/app_packages/toga_iOS/hardware/camera.py\", line 159, in take_photo\r\n\r\n2024-02-08 22:27:03.185537+0000 Health App[2216:513956] self.native.delegate.result = result\r\n\r\n2024-02-08 22:27:03.185785+0000 Health App[2216:513956] AttributeError: 'NoneType' object has no attribute 'result'\r\n```\r\n\r\n### Additional context\r\n\r\nFully works on android, same problem on macOS I believe.\n", "before_files": [{"content": "import warnings\n\nfrom rubicon.objc import Block, NSObject, objc_method\n\nimport toga\nfrom toga.constants import FlashMode\n\n# for classes that need to be monkeypatched for testing\nfrom toga_iOS import libs as iOS\nfrom toga_iOS.libs import (\n AVAuthorizationStatus,\n AVMediaTypeVideo,\n NSBundle,\n UIImagePickerControllerCameraCaptureMode,\n UIImagePickerControllerCameraDevice,\n UIImagePickerControllerCameraFlashMode,\n UIImagePickerControllerSourceTypeCamera,\n)\n\n\nclass CameraDevice:\n def __init__(self, id, name, native):\n self._id = id\n self._name = name\n self.native = native\n\n def id(self):\n return self._id\n\n def name(self):\n return self._name\n\n def has_flash(self):\n return iOS.UIImagePickerController.isFlashAvailableForCameraDevice(self.native)\n\n\ndef native_flash_mode(flash):\n return {\n FlashMode.ON: UIImagePickerControllerCameraFlashMode.On,\n FlashMode.OFF: UIImagePickerControllerCameraFlashMode.Off,\n }.get(flash, UIImagePickerControllerCameraFlashMode.Auto)\n\n\n# def native_video_quality(quality):\n# return {\n# VideoQuality.HIGH: UIImagePickerControllerQualityType.High,\n# VideoQuality.LOW: UIImagePickerControllerQualityType.Low,\n# }.get(quality, UIImagePickerControllerQualityType.Medium)\n\n\nclass TogaImagePickerDelegate(NSObject):\n @objc_method\n def imagePickerController_didFinishPickingMediaWithInfo_(\n self, picker, info\n ) -> None:\n picker.dismissViewControllerAnimated(True, completion=None)\n\n image = toga.Image(info[\"UIImagePickerControllerOriginalImage\"])\n self.result.set_result(image)\n\n @objc_method\n def imagePickerControllerDidCancel_(self, picker) -> None:\n picker.dismissViewControllerAnimated(True, completion=None)\n self.result.set_result(None)\n\n\nclass Camera:\n def __init__(self, interface):\n self.interface = interface\n\n if NSBundle.mainBundle.objectForInfoDictionaryKey(\"NSCameraUsageDescription\"):\n if iOS.UIImagePickerController.isSourceTypeAvailable(\n UIImagePickerControllerSourceTypeCamera\n ):\n self.native = iOS.UIImagePickerController.new()\n self.native.sourceType = UIImagePickerControllerSourceTypeCamera\n self.native.delegate = TogaImagePickerDelegate.new()\n else:\n self.native = None\n else: # pragma: no cover\n # The app doesn't have the NSCameraUsageDescription key (e.g., via\n # `permission.camera` in Briefcase). No-cover because we can't manufacture\n # this condition in testing.\n raise RuntimeError(\n \"Application metadata does not declare that the app will use the camera.\"\n )\n\n def has_permission(self, allow_unknown=False):\n if allow_unknown:\n valid_values = {\n AVAuthorizationStatus.Authorized.value,\n AVAuthorizationStatus.NotDetermined.value,\n }\n else:\n valid_values = {AVAuthorizationStatus.Authorized.value}\n\n return (\n iOS.AVCaptureDevice.authorizationStatusForMediaType(AVMediaTypeVideo)\n in valid_values\n )\n\n def request_permission(self, future):\n # This block is invoked when the permission is granted; however, permission is\n # granted from a different (inaccessible) thread, so it isn't picked up by\n # coverage.\n def permission_complete(result) -> None:\n future.set_result(result)\n\n iOS.AVCaptureDevice.requestAccessForMediaType(\n AVMediaTypeVideo, completionHandler=Block(permission_complete, None, bool)\n )\n\n def get_devices(self):\n return (\n [\n CameraDevice(\n id=\"Rear\",\n name=\"Rear\",\n native=UIImagePickerControllerCameraDevice.Rear,\n )\n ]\n if iOS.UIImagePickerController.isCameraDeviceAvailable(\n UIImagePickerControllerCameraDevice.Rear\n )\n else []\n ) + (\n [\n CameraDevice(\n id=\"Front\",\n name=\"Front\",\n native=UIImagePickerControllerCameraDevice.Front,\n )\n ]\n if iOS.UIImagePickerController.isCameraDeviceAvailable(\n UIImagePickerControllerCameraDevice.Front\n )\n else []\n )\n\n def take_photo(self, result, device, flash):\n if self.native is None:\n warnings.warn(\"No camera is available\")\n result.set_result(None)\n elif self.has_permission(allow_unknown=True):\n # Configure the controller to take a photo\n self.native.cameraCaptureMode = (\n UIImagePickerControllerCameraCaptureMode.Photo\n )\n\n self.native.showsCameraControls = True\n self.native.cameraDevice = (\n device._impl.native\n if device\n else UIImagePickerControllerCameraDevice.Rear\n )\n self.native.cameraFlashMode = native_flash_mode(flash)\n\n # Attach the result to the delegate\n self.native.delegate.result = result\n\n # Show the pane\n toga.App.app.current_window._impl.native.rootViewController.presentViewController(\n self.native, animated=True, completion=None\n )\n else:\n raise PermissionError(\"App does not have permission to take photos\")\n", "path": "iOS/src/toga_iOS/hardware/camera.py"}], "after_files": [{"content": "import warnings\n\nfrom rubicon.objc import Block, NSObject, objc_method\n\nimport toga\nfrom toga.constants import FlashMode\n\n# for classes that need to be monkeypatched for testing\nfrom toga_iOS import libs as iOS\nfrom toga_iOS.libs import (\n AVAuthorizationStatus,\n AVMediaTypeVideo,\n NSBundle,\n UIImagePickerControllerCameraCaptureMode,\n UIImagePickerControllerCameraDevice,\n UIImagePickerControllerCameraFlashMode,\n UIImagePickerControllerSourceTypeCamera,\n)\n\n\nclass CameraDevice:\n def __init__(self, id, name, native):\n self._id = id\n self._name = name\n self.native = native\n\n def id(self):\n return self._id\n\n def name(self):\n return self._name\n\n def has_flash(self):\n return iOS.UIImagePickerController.isFlashAvailableForCameraDevice(self.native)\n\n\ndef native_flash_mode(flash):\n return {\n FlashMode.ON: UIImagePickerControllerCameraFlashMode.On,\n FlashMode.OFF: UIImagePickerControllerCameraFlashMode.Off,\n }.get(flash, UIImagePickerControllerCameraFlashMode.Auto)\n\n\n# def native_video_quality(quality):\n# return {\n# VideoQuality.HIGH: UIImagePickerControllerQualityType.High,\n# VideoQuality.LOW: UIImagePickerControllerQualityType.Low,\n# }.get(quality, UIImagePickerControllerQualityType.Medium)\n\n\nclass TogaImagePickerDelegate(NSObject):\n @objc_method\n def imagePickerController_didFinishPickingMediaWithInfo_(\n self, picker, info\n ) -> None:\n picker.dismissViewControllerAnimated(True, completion=None)\n\n image = toga.Image(info[\"UIImagePickerControllerOriginalImage\"])\n self.result.set_result(image)\n\n @objc_method\n def imagePickerControllerDidCancel_(self, picker) -> None:\n picker.dismissViewControllerAnimated(True, completion=None)\n self.result.set_result(None)\n\n\nclass Camera:\n def __init__(self, interface):\n self.interface = interface\n\n if NSBundle.mainBundle.objectForInfoDictionaryKey(\"NSCameraUsageDescription\"):\n if iOS.UIImagePickerController.isSourceTypeAvailable(\n UIImagePickerControllerSourceTypeCamera\n ):\n self.native = iOS.UIImagePickerController.new()\n self.native.sourceType = UIImagePickerControllerSourceTypeCamera\n self.delegate_link = TogaImagePickerDelegate.new()\n self.native.delegate = self.delegate_link\n else:\n self.native = None\n else: # pragma: no cover\n # The app doesn't have the NSCameraUsageDescription key (e.g., via\n # `permission.camera` in Briefcase). No-cover because we can't manufacture\n # this condition in testing.\n raise RuntimeError(\n \"Application metadata does not declare that the app will use the camera.\"\n )\n\n def has_permission(self, allow_unknown=False):\n if allow_unknown:\n valid_values = {\n AVAuthorizationStatus.Authorized.value,\n AVAuthorizationStatus.NotDetermined.value,\n }\n else:\n valid_values = {AVAuthorizationStatus.Authorized.value}\n\n return (\n iOS.AVCaptureDevice.authorizationStatusForMediaType(AVMediaTypeVideo)\n in valid_values\n )\n\n def request_permission(self, future):\n # This block is invoked when the permission is granted; however, permission is\n # granted from a different (inaccessible) thread, so it isn't picked up by\n # coverage.\n def permission_complete(result) -> None:\n future.set_result(result)\n\n iOS.AVCaptureDevice.requestAccessForMediaType(\n AVMediaTypeVideo, completionHandler=Block(permission_complete, None, bool)\n )\n\n def get_devices(self):\n return (\n [\n CameraDevice(\n id=\"Rear\",\n name=\"Rear\",\n native=UIImagePickerControllerCameraDevice.Rear,\n )\n ]\n if iOS.UIImagePickerController.isCameraDeviceAvailable(\n UIImagePickerControllerCameraDevice.Rear\n )\n else []\n ) + (\n [\n CameraDevice(\n id=\"Front\",\n name=\"Front\",\n native=UIImagePickerControllerCameraDevice.Front,\n )\n ]\n if iOS.UIImagePickerController.isCameraDeviceAvailable(\n UIImagePickerControllerCameraDevice.Front\n )\n else []\n )\n\n def take_photo(self, result, device, flash):\n if self.native is None:\n warnings.warn(\"No camera is available\")\n result.set_result(None)\n elif self.has_permission(allow_unknown=True):\n # Configure the controller to take a photo\n self.native.cameraCaptureMode = (\n UIImagePickerControllerCameraCaptureMode.Photo\n )\n\n self.native.showsCameraControls = True\n self.native.cameraDevice = (\n device._impl.native\n if device\n else UIImagePickerControllerCameraDevice.Rear\n )\n self.native.cameraFlashMode = native_flash_mode(flash)\n\n # Attach the result to the delegate\n self.native.delegate.result = result\n\n # Show the pane\n toga.App.app.current_window._impl.native.rootViewController.presentViewController(\n self.native, animated=True, completion=None\n )\n else:\n raise PermissionError(\"App does not have permission to take photos\")\n", "path": "iOS/src/toga_iOS/hardware/camera.py"}]}
3,455
149
gh_patches_debug_6431
rasdani/github-patches
git_diff
pyca__cryptography-2285
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Installing cryptography with old pypy builds (and caches) bad wheels If I install cryptography with pypy 2.5.1, the new cffi build mechanism isn't invoked and I get an installed package without any of the compiled modules. However, pip builds and caches `cryptography-1.0-py2-none-any.whl` which is platform-independent (because it has no compiled modules). This bad wheel then gets used for any further installs with any Python 2 interpreter, and happily installs a cryptography without any of the interesting bits. To illustrate with an example: ``` console root@e1ffcbbe8f9d:/foo$ # Before cryptograpy is installed: root@e1ffcbbe8f9d:/foo$ find ~/.cache/pip/wheels -name 'cryptography*' root@e1ffcbbe8f9d:/foo$ /env-pypy-2.6.0/bin/pypy -c 'import cryptography.hazmat.bindings._openssl' 2>&1 | grep ImportError ImportError: No module named cryptography root@e1ffcbbe8f9d:/foo$ # Install cryptography using old pypy: root@e1ffcbbe8f9d:/foo$ /env-pypy-2.5.1/bin/pip install cryptography -q root@e1ffcbbe8f9d:/foo$ find ~/.cache/pip/wheels -name 'cryptography*' /root/.cache/pip/wheels/6d/db/24/a5bbe660e3b632abb007ba6d8c91b5fa51a83bb53544c16709/cryptography-1.0-py2-none-any.whl root@e1ffcbbe8f9d:/foo$ # After that, install cryptography using new pypy: root@e1ffcbbe8f9d:/foo$ /env-pypy-2.6.0/bin/pip install cryptography -q root@e1ffcbbe8f9d:/foo$ find ~/.cache/pip/wheels -name 'cryptography*' /root/.cache/pip/wheels/6d/db/24/a5bbe660e3b632abb007ba6d8c91b5fa51a83bb53544c16709/cryptography-1.0-py2-none-any.whl root@e1ffcbbe8f9d:/foo$ # The installed cryptography is now broken: root@e1ffcbbe8f9d:/foo$ /env-pypy-2.6.0/bin/pypy -c 'import cryptography.hazmat.bindings._openssl' 2>&1 | grep ImportError ImportError: No module named cryptography.hazmat.bindings._openssl ``` I think a reasonable solution is to explicitly fail to build or install on older versions of pypy. This would prevent the broken wheel from being being built, and would also give a useful error at install time instead of silently installing a nonfunctional cryptography. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 # This file is dual licensed under the terms of the Apache License, Version 2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository 3 # for complete details. 4 5 from __future__ import absolute_import, division, print_function 6 7 import os 8 import platform 9 import subprocess 10 import sys 11 from distutils.command.build import build 12 13 import pkg_resources 14 15 from setuptools import find_packages, setup 16 from setuptools.command.install import install 17 from setuptools.command.test import test 18 19 20 base_dir = os.path.dirname(__file__) 21 src_dir = os.path.join(base_dir, "src") 22 23 # When executing the setup.py, we need to be able to import ourselves, this 24 # means that we need to add the src/ directory to the sys.path. 25 sys.path.insert(0, src_dir) 26 27 about = {} 28 with open(os.path.join(src_dir, "cryptography", "__about__.py")) as f: 29 exec(f.read(), about) 30 31 32 VECTORS_DEPENDENCY = "cryptography_vectors=={0}".format(about['__version__']) 33 34 requirements = [ 35 "idna>=2.0", 36 "pyasn1>=0.1.8", 37 "six>=1.4.1", 38 "setuptools", 39 ] 40 setup_requirements = [] 41 42 if sys.version_info < (3, 4): 43 requirements.append("enum34") 44 45 if sys.version_info < (3, 3): 46 requirements.append("ipaddress") 47 48 if platform.python_implementation() != "PyPy": 49 requirements.append("cffi>=1.1.0") 50 setup_requirements.append("cffi>=1.1.0") 51 52 # If you add a new dep here you probably need to add it in the tox.ini as well 53 test_requirements = [ 54 "pytest", 55 "pretend", 56 "iso8601", 57 ] 58 59 # If there's no vectors locally that probably means we are in a tarball and 60 # need to go and get the matching vectors package from PyPi 61 if not os.path.exists(os.path.join(base_dir, "vectors/setup.py")): 62 test_requirements.append(VECTORS_DEPENDENCY) 63 64 65 def cc_is_available(): 66 return sys.platform == "darwin" and list(map( 67 int, platform.mac_ver()[0].split("."))) >= [10, 8, 0] 68 69 70 backends = [ 71 "openssl = cryptography.hazmat.backends.openssl:backend" 72 ] 73 74 if cc_is_available(): 75 backends.append( 76 "commoncrypto = cryptography.hazmat.backends.commoncrypto:backend", 77 ) 78 79 80 class PyTest(test): 81 def finalize_options(self): 82 test.finalize_options(self) 83 self.test_args = [] 84 self.test_suite = True 85 86 # This means there's a vectors/ folder with the package in here. 87 # cd into it, install the vectors package and then refresh sys.path 88 if VECTORS_DEPENDENCY not in test_requirements: 89 subprocess.check_call( 90 [sys.executable, "setup.py", "install"], cwd="vectors" 91 ) 92 pkg_resources.get_distribution("cryptography_vectors").activate() 93 94 def run_tests(self): 95 # Import here because in module scope the eggs are not loaded. 96 import pytest 97 test_args = [os.path.join(base_dir, "tests")] 98 errno = pytest.main(test_args) 99 sys.exit(errno) 100 101 102 def keywords_with_side_effects(argv): 103 """ 104 Get a dictionary with setup keywords that (can) have side effects. 105 106 :param argv: A list of strings with command line arguments. 107 :returns: A dictionary with keyword arguments for the ``setup()`` function. 108 109 This setup.py script uses the setuptools 'setup_requires' feature because 110 this is required by the cffi package to compile extension modules. The 111 purpose of ``keywords_with_side_effects()`` is to avoid triggering the cffi 112 build process as a result of setup.py invocations that don't need the cffi 113 module to be built (setup.py serves the dual purpose of exposing package 114 metadata). 115 116 All of the options listed by ``python setup.py --help`` that print 117 information should be recognized here. The commands ``clean``, 118 ``egg_info``, ``register``, ``sdist`` and ``upload`` are also recognized. 119 Any combination of these options and commands is also supported. 120 121 This function was originally based on the `setup.py script`_ of SciPy (see 122 also the discussion in `pip issue #25`_). 123 124 .. _pip issue #25: https://github.com/pypa/pip/issues/25 125 .. _setup.py script: https://github.com/scipy/scipy/blob/master/setup.py 126 """ 127 no_setup_requires_arguments = ( 128 '-h', '--help', 129 '-n', '--dry-run', 130 '-q', '--quiet', 131 '-v', '--verbose', 132 '-V', '--version', 133 '--author', 134 '--author-email', 135 '--classifiers', 136 '--contact', 137 '--contact-email', 138 '--description', 139 '--egg-base', 140 '--fullname', 141 '--help-commands', 142 '--keywords', 143 '--licence', 144 '--license', 145 '--long-description', 146 '--maintainer', 147 '--maintainer-email', 148 '--name', 149 '--no-user-cfg', 150 '--obsoletes', 151 '--platforms', 152 '--provides', 153 '--requires', 154 '--url', 155 'clean', 156 'egg_info', 157 'register', 158 'sdist', 159 'upload', 160 ) 161 162 def is_short_option(argument): 163 """Check whether a command line argument is a short option.""" 164 return len(argument) >= 2 and argument[0] == '-' and argument[1] != '-' 165 166 def expand_short_options(argument): 167 """Expand combined short options into canonical short options.""" 168 return ('-' + char for char in argument[1:]) 169 170 def argument_without_setup_requirements(argv, i): 171 """Check whether a command line argument needs setup requirements.""" 172 if argv[i] in no_setup_requires_arguments: 173 # Simple case: An argument which is either an option or a command 174 # which doesn't need setup requirements. 175 return True 176 elif (is_short_option(argv[i]) and 177 all(option in no_setup_requires_arguments 178 for option in expand_short_options(argv[i]))): 179 # Not so simple case: Combined short options none of which need 180 # setup requirements. 181 return True 182 elif argv[i - 1:i] == ['--egg-base']: 183 # Tricky case: --egg-info takes an argument which should not make 184 # us use setup_requires (defeating the purpose of this code). 185 return True 186 else: 187 return False 188 189 if all(argument_without_setup_requirements(argv, i) 190 for i in range(1, len(argv))): 191 return { 192 "cmdclass": { 193 "build": DummyBuild, 194 "install": DummyInstall, 195 "test": DummyPyTest, 196 } 197 } 198 else: 199 cffi_modules = [ 200 "src/_cffi_src/build_openssl.py:ffi", 201 "src/_cffi_src/build_constant_time.py:ffi", 202 "src/_cffi_src/build_padding.py:ffi", 203 ] 204 if cc_is_available(): 205 cffi_modules.append("src/_cffi_src/build_commoncrypto.py:ffi") 206 207 return { 208 "setup_requires": setup_requirements, 209 "cmdclass": { 210 "test": PyTest, 211 }, 212 "cffi_modules": cffi_modules 213 } 214 215 216 setup_requires_error = ("Requested setup command that needs 'setup_requires' " 217 "while command line arguments implied a side effect " 218 "free command or option.") 219 220 221 class DummyBuild(build): 222 """ 223 This class makes it very obvious when ``keywords_with_side_effects()`` has 224 incorrectly interpreted the command line arguments to ``setup.py build`` as 225 one of the 'side effect free' commands or options. 226 """ 227 228 def run(self): 229 raise RuntimeError(setup_requires_error) 230 231 232 class DummyInstall(install): 233 """ 234 This class makes it very obvious when ``keywords_with_side_effects()`` has 235 incorrectly interpreted the command line arguments to ``setup.py install`` 236 as one of the 'side effect free' commands or options. 237 """ 238 239 def run(self): 240 raise RuntimeError(setup_requires_error) 241 242 243 class DummyPyTest(test): 244 """ 245 This class makes it very obvious when ``keywords_with_side_effects()`` has 246 incorrectly interpreted the command line arguments to ``setup.py test`` as 247 one of the 'side effect free' commands or options. 248 """ 249 250 def run_tests(self): 251 raise RuntimeError(setup_requires_error) 252 253 254 with open(os.path.join(base_dir, "README.rst")) as f: 255 long_description = f.read() 256 257 258 setup( 259 name=about["__title__"], 260 version=about["__version__"], 261 262 description=about["__summary__"], 263 long_description=long_description, 264 license=about["__license__"], 265 url=about["__uri__"], 266 267 author=about["__author__"], 268 author_email=about["__email__"], 269 270 classifiers=[ 271 "Intended Audience :: Developers", 272 "License :: OSI Approved :: Apache Software License", 273 "License :: OSI Approved :: BSD License", 274 "Natural Language :: English", 275 "Operating System :: MacOS :: MacOS X", 276 "Operating System :: POSIX", 277 "Operating System :: POSIX :: BSD", 278 "Operating System :: POSIX :: Linux", 279 "Operating System :: Microsoft :: Windows", 280 "Programming Language :: Python", 281 "Programming Language :: Python :: 2", 282 "Programming Language :: Python :: 2.6", 283 "Programming Language :: Python :: 2.7", 284 "Programming Language :: Python :: 3", 285 "Programming Language :: Python :: 3.3", 286 "Programming Language :: Python :: 3.4", 287 "Programming Language :: Python :: Implementation :: CPython", 288 "Programming Language :: Python :: Implementation :: PyPy", 289 "Topic :: Security :: Cryptography", 290 ], 291 292 package_dir={"": "src"}, 293 packages=find_packages( 294 where="src", exclude=["_cffi_src", "_cffi_src.*", "tests", "tests.*"] 295 ), 296 include_package_data=True, 297 298 install_requires=requirements, 299 tests_require=test_requirements, 300 301 # for cffi 302 zip_safe=False, 303 ext_package="cryptography.hazmat.bindings", 304 entry_points={ 305 "cryptography.backends": backends, 306 }, 307 **keywords_with_side_effects(sys.argv) 308 ) 309 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -45,7 +45,13 @@ if sys.version_info < (3, 3): requirements.append("ipaddress") -if platform.python_implementation() != "PyPy": +if platform.python_implementation() == "PyPy": + if sys.pypy_version_info < (2, 6): + raise RuntimeError( + "cryptography 1.0 is not compatible with PyPy < 2.6. Please " + "upgrade PyPy to use this library." + ) +else: requirements.append("cffi>=1.1.0") setup_requirements.append("cffi>=1.1.0")
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -45,7 +45,13 @@\n if sys.version_info < (3, 3):\n requirements.append(\"ipaddress\")\n \n-if platform.python_implementation() != \"PyPy\":\n+if platform.python_implementation() == \"PyPy\":\n+ if sys.pypy_version_info < (2, 6):\n+ raise RuntimeError(\n+ \"cryptography 1.0 is not compatible with PyPy < 2.6. Please \"\n+ \"upgrade PyPy to use this library.\"\n+ )\n+else:\n requirements.append(\"cffi>=1.1.0\")\n setup_requirements.append(\"cffi>=1.1.0\")\n", "issue": "Installing cryptography with old pypy builds (and caches) bad wheels\nIf I install cryptography with pypy 2.5.1, the new cffi build mechanism isn't invoked and I get an installed package without any of the compiled modules. However, pip builds and caches `cryptography-1.0-py2-none-any.whl` which is platform-independent (because it has no compiled modules). This bad wheel then gets used for any further installs with any Python 2 interpreter, and happily installs a cryptography without any of the interesting bits.\n\nTo illustrate with an example:\n\n``` console\nroot@e1ffcbbe8f9d:/foo$ # Before cryptograpy is installed:\nroot@e1ffcbbe8f9d:/foo$ find ~/.cache/pip/wheels -name 'cryptography*'\nroot@e1ffcbbe8f9d:/foo$ /env-pypy-2.6.0/bin/pypy -c 'import cryptography.hazmat.bindings._openssl' 2>&1 | grep ImportError\nImportError: No module named cryptography\n\nroot@e1ffcbbe8f9d:/foo$ # Install cryptography using old pypy:\nroot@e1ffcbbe8f9d:/foo$ /env-pypy-2.5.1/bin/pip install cryptography -q\nroot@e1ffcbbe8f9d:/foo$ find ~/.cache/pip/wheels -name 'cryptography*'\n/root/.cache/pip/wheels/6d/db/24/a5bbe660e3b632abb007ba6d8c91b5fa51a83bb53544c16709/cryptography-1.0-py2-none-any.whl\n\nroot@e1ffcbbe8f9d:/foo$ # After that, install cryptography using new pypy:\nroot@e1ffcbbe8f9d:/foo$ /env-pypy-2.6.0/bin/pip install cryptography -q\nroot@e1ffcbbe8f9d:/foo$ find ~/.cache/pip/wheels -name 'cryptography*'\n/root/.cache/pip/wheels/6d/db/24/a5bbe660e3b632abb007ba6d8c91b5fa51a83bb53544c16709/cryptography-1.0-py2-none-any.whl\n\nroot@e1ffcbbe8f9d:/foo$ # The installed cryptography is now broken:\nroot@e1ffcbbe8f9d:/foo$ /env-pypy-2.6.0/bin/pypy -c 'import cryptography.hazmat.bindings._openssl' 2>&1 | grep ImportError\nImportError: No module named cryptography.hazmat.bindings._openssl\n```\n\nI think a reasonable solution is to explicitly fail to build or install on older versions of pypy. This would prevent the broken wheel from being being built, and would also give a useful error at install time instead of silently installing a nonfunctional cryptography.\n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport platform\nimport subprocess\nimport sys\nfrom distutils.command.build import build\n\nimport pkg_resources\n\nfrom setuptools import find_packages, setup\nfrom setuptools.command.install import install\nfrom setuptools.command.test import test\n\n\nbase_dir = os.path.dirname(__file__)\nsrc_dir = os.path.join(base_dir, \"src\")\n\n# When executing the setup.py, we need to be able to import ourselves, this\n# means that we need to add the src/ directory to the sys.path.\nsys.path.insert(0, src_dir)\n\nabout = {}\nwith open(os.path.join(src_dir, \"cryptography\", \"__about__.py\")) as f:\n exec(f.read(), about)\n\n\nVECTORS_DEPENDENCY = \"cryptography_vectors=={0}\".format(about['__version__'])\n\nrequirements = [\n \"idna>=2.0\",\n \"pyasn1>=0.1.8\",\n \"six>=1.4.1\",\n \"setuptools\",\n]\nsetup_requirements = []\n\nif sys.version_info < (3, 4):\n requirements.append(\"enum34\")\n\nif sys.version_info < (3, 3):\n requirements.append(\"ipaddress\")\n\nif platform.python_implementation() != \"PyPy\":\n requirements.append(\"cffi>=1.1.0\")\n setup_requirements.append(\"cffi>=1.1.0\")\n\n# If you add a new dep here you probably need to add it in the tox.ini as well\ntest_requirements = [\n \"pytest\",\n \"pretend\",\n \"iso8601\",\n]\n\n# If there's no vectors locally that probably means we are in a tarball and\n# need to go and get the matching vectors package from PyPi\nif not os.path.exists(os.path.join(base_dir, \"vectors/setup.py\")):\n test_requirements.append(VECTORS_DEPENDENCY)\n\n\ndef cc_is_available():\n return sys.platform == \"darwin\" and list(map(\n int, platform.mac_ver()[0].split(\".\"))) >= [10, 8, 0]\n\n\nbackends = [\n \"openssl = cryptography.hazmat.backends.openssl:backend\"\n]\n\nif cc_is_available():\n backends.append(\n \"commoncrypto = cryptography.hazmat.backends.commoncrypto:backend\",\n )\n\n\nclass PyTest(test):\n def finalize_options(self):\n test.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n # This means there's a vectors/ folder with the package in here.\n # cd into it, install the vectors package and then refresh sys.path\n if VECTORS_DEPENDENCY not in test_requirements:\n subprocess.check_call(\n [sys.executable, \"setup.py\", \"install\"], cwd=\"vectors\"\n )\n pkg_resources.get_distribution(\"cryptography_vectors\").activate()\n\n def run_tests(self):\n # Import here because in module scope the eggs are not loaded.\n import pytest\n test_args = [os.path.join(base_dir, \"tests\")]\n errno = pytest.main(test_args)\n sys.exit(errno)\n\n\ndef keywords_with_side_effects(argv):\n \"\"\"\n Get a dictionary with setup keywords that (can) have side effects.\n\n :param argv: A list of strings with command line arguments.\n :returns: A dictionary with keyword arguments for the ``setup()`` function.\n\n This setup.py script uses the setuptools 'setup_requires' feature because\n this is required by the cffi package to compile extension modules. The\n purpose of ``keywords_with_side_effects()`` is to avoid triggering the cffi\n build process as a result of setup.py invocations that don't need the cffi\n module to be built (setup.py serves the dual purpose of exposing package\n metadata).\n\n All of the options listed by ``python setup.py --help`` that print\n information should be recognized here. The commands ``clean``,\n ``egg_info``, ``register``, ``sdist`` and ``upload`` are also recognized.\n Any combination of these options and commands is also supported.\n\n This function was originally based on the `setup.py script`_ of SciPy (see\n also the discussion in `pip issue #25`_).\n\n .. _pip issue #25: https://github.com/pypa/pip/issues/25\n .. _setup.py script: https://github.com/scipy/scipy/blob/master/setup.py\n \"\"\"\n no_setup_requires_arguments = (\n '-h', '--help',\n '-n', '--dry-run',\n '-q', '--quiet',\n '-v', '--verbose',\n '-V', '--version',\n '--author',\n '--author-email',\n '--classifiers',\n '--contact',\n '--contact-email',\n '--description',\n '--egg-base',\n '--fullname',\n '--help-commands',\n '--keywords',\n '--licence',\n '--license',\n '--long-description',\n '--maintainer',\n '--maintainer-email',\n '--name',\n '--no-user-cfg',\n '--obsoletes',\n '--platforms',\n '--provides',\n '--requires',\n '--url',\n 'clean',\n 'egg_info',\n 'register',\n 'sdist',\n 'upload',\n )\n\n def is_short_option(argument):\n \"\"\"Check whether a command line argument is a short option.\"\"\"\n return len(argument) >= 2 and argument[0] == '-' and argument[1] != '-'\n\n def expand_short_options(argument):\n \"\"\"Expand combined short options into canonical short options.\"\"\"\n return ('-' + char for char in argument[1:])\n\n def argument_without_setup_requirements(argv, i):\n \"\"\"Check whether a command line argument needs setup requirements.\"\"\"\n if argv[i] in no_setup_requires_arguments:\n # Simple case: An argument which is either an option or a command\n # which doesn't need setup requirements.\n return True\n elif (is_short_option(argv[i]) and\n all(option in no_setup_requires_arguments\n for option in expand_short_options(argv[i]))):\n # Not so simple case: Combined short options none of which need\n # setup requirements.\n return True\n elif argv[i - 1:i] == ['--egg-base']:\n # Tricky case: --egg-info takes an argument which should not make\n # us use setup_requires (defeating the purpose of this code).\n return True\n else:\n return False\n\n if all(argument_without_setup_requirements(argv, i)\n for i in range(1, len(argv))):\n return {\n \"cmdclass\": {\n \"build\": DummyBuild,\n \"install\": DummyInstall,\n \"test\": DummyPyTest,\n }\n }\n else:\n cffi_modules = [\n \"src/_cffi_src/build_openssl.py:ffi\",\n \"src/_cffi_src/build_constant_time.py:ffi\",\n \"src/_cffi_src/build_padding.py:ffi\",\n ]\n if cc_is_available():\n cffi_modules.append(\"src/_cffi_src/build_commoncrypto.py:ffi\")\n\n return {\n \"setup_requires\": setup_requirements,\n \"cmdclass\": {\n \"test\": PyTest,\n },\n \"cffi_modules\": cffi_modules\n }\n\n\nsetup_requires_error = (\"Requested setup command that needs 'setup_requires' \"\n \"while command line arguments implied a side effect \"\n \"free command or option.\")\n\n\nclass DummyBuild(build):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py build`` as\n one of the 'side effect free' commands or options.\n \"\"\"\n\n def run(self):\n raise RuntimeError(setup_requires_error)\n\n\nclass DummyInstall(install):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py install``\n as one of the 'side effect free' commands or options.\n \"\"\"\n\n def run(self):\n raise RuntimeError(setup_requires_error)\n\n\nclass DummyPyTest(test):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py test`` as\n one of the 'side effect free' commands or options.\n \"\"\"\n\n def run_tests(self):\n raise RuntimeError(setup_requires_error)\n\n\nwith open(os.path.join(base_dir, \"README.rst\")) as f:\n long_description = f.read()\n\n\nsetup(\n name=about[\"__title__\"],\n version=about[\"__version__\"],\n\n description=about[\"__summary__\"],\n long_description=long_description,\n license=about[\"__license__\"],\n url=about[\"__uri__\"],\n\n author=about[\"__author__\"],\n author_email=about[\"__email__\"],\n\n classifiers=[\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Security :: Cryptography\",\n ],\n\n package_dir={\"\": \"src\"},\n packages=find_packages(\n where=\"src\", exclude=[\"_cffi_src\", \"_cffi_src.*\", \"tests\", \"tests.*\"]\n ),\n include_package_data=True,\n\n install_requires=requirements,\n tests_require=test_requirements,\n\n # for cffi\n zip_safe=False,\n ext_package=\"cryptography.hazmat.bindings\",\n entry_points={\n \"cryptography.backends\": backends,\n },\n **keywords_with_side_effects(sys.argv)\n)\n", "path": "setup.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport platform\nimport subprocess\nimport sys\nfrom distutils.command.build import build\n\nimport pkg_resources\n\nfrom setuptools import find_packages, setup\nfrom setuptools.command.install import install\nfrom setuptools.command.test import test\n\n\nbase_dir = os.path.dirname(__file__)\nsrc_dir = os.path.join(base_dir, \"src\")\n\n# When executing the setup.py, we need to be able to import ourselves, this\n# means that we need to add the src/ directory to the sys.path.\nsys.path.insert(0, src_dir)\n\nabout = {}\nwith open(os.path.join(src_dir, \"cryptography\", \"__about__.py\")) as f:\n exec(f.read(), about)\n\n\nVECTORS_DEPENDENCY = \"cryptography_vectors=={0}\".format(about['__version__'])\n\nrequirements = [\n \"idna>=2.0\",\n \"pyasn1>=0.1.8\",\n \"six>=1.4.1\",\n \"setuptools\",\n]\nsetup_requirements = []\n\nif sys.version_info < (3, 4):\n requirements.append(\"enum34\")\n\nif sys.version_info < (3, 3):\n requirements.append(\"ipaddress\")\n\nif platform.python_implementation() == \"PyPy\":\n if sys.pypy_version_info < (2, 6):\n raise RuntimeError(\n \"cryptography 1.0 is not compatible with PyPy < 2.6. Please \"\n \"upgrade PyPy to use this library.\"\n )\nelse:\n requirements.append(\"cffi>=1.1.0\")\n setup_requirements.append(\"cffi>=1.1.0\")\n\n# If you add a new dep here you probably need to add it in the tox.ini as well\ntest_requirements = [\n \"pytest\",\n \"pretend\",\n \"iso8601\",\n]\n\n# If there's no vectors locally that probably means we are in a tarball and\n# need to go and get the matching vectors package from PyPi\nif not os.path.exists(os.path.join(base_dir, \"vectors/setup.py\")):\n test_requirements.append(VECTORS_DEPENDENCY)\n\n\ndef cc_is_available():\n return sys.platform == \"darwin\" and list(map(\n int, platform.mac_ver()[0].split(\".\"))) >= [10, 8, 0]\n\n\nbackends = [\n \"openssl = cryptography.hazmat.backends.openssl:backend\"\n]\n\nif cc_is_available():\n backends.append(\n \"commoncrypto = cryptography.hazmat.backends.commoncrypto:backend\",\n )\n\n\nclass PyTest(test):\n def finalize_options(self):\n test.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n # This means there's a vectors/ folder with the package in here.\n # cd into it, install the vectors package and then refresh sys.path\n if VECTORS_DEPENDENCY not in test_requirements:\n subprocess.check_call(\n [sys.executable, \"setup.py\", \"install\"], cwd=\"vectors\"\n )\n pkg_resources.get_distribution(\"cryptography_vectors\").activate()\n\n def run_tests(self):\n # Import here because in module scope the eggs are not loaded.\n import pytest\n test_args = [os.path.join(base_dir, \"tests\")]\n errno = pytest.main(test_args)\n sys.exit(errno)\n\n\ndef keywords_with_side_effects(argv):\n \"\"\"\n Get a dictionary with setup keywords that (can) have side effects.\n\n :param argv: A list of strings with command line arguments.\n :returns: A dictionary with keyword arguments for the ``setup()`` function.\n\n This setup.py script uses the setuptools 'setup_requires' feature because\n this is required by the cffi package to compile extension modules. The\n purpose of ``keywords_with_side_effects()`` is to avoid triggering the cffi\n build process as a result of setup.py invocations that don't need the cffi\n module to be built (setup.py serves the dual purpose of exposing package\n metadata).\n\n All of the options listed by ``python setup.py --help`` that print\n information should be recognized here. The commands ``clean``,\n ``egg_info``, ``register``, ``sdist`` and ``upload`` are also recognized.\n Any combination of these options and commands is also supported.\n\n This function was originally based on the `setup.py script`_ of SciPy (see\n also the discussion in `pip issue #25`_).\n\n .. _pip issue #25: https://github.com/pypa/pip/issues/25\n .. _setup.py script: https://github.com/scipy/scipy/blob/master/setup.py\n \"\"\"\n no_setup_requires_arguments = (\n '-h', '--help',\n '-n', '--dry-run',\n '-q', '--quiet',\n '-v', '--verbose',\n '-V', '--version',\n '--author',\n '--author-email',\n '--classifiers',\n '--contact',\n '--contact-email',\n '--description',\n '--egg-base',\n '--fullname',\n '--help-commands',\n '--keywords',\n '--licence',\n '--license',\n '--long-description',\n '--maintainer',\n '--maintainer-email',\n '--name',\n '--no-user-cfg',\n '--obsoletes',\n '--platforms',\n '--provides',\n '--requires',\n '--url',\n 'clean',\n 'egg_info',\n 'register',\n 'sdist',\n 'upload',\n )\n\n def is_short_option(argument):\n \"\"\"Check whether a command line argument is a short option.\"\"\"\n return len(argument) >= 2 and argument[0] == '-' and argument[1] != '-'\n\n def expand_short_options(argument):\n \"\"\"Expand combined short options into canonical short options.\"\"\"\n return ('-' + char for char in argument[1:])\n\n def argument_without_setup_requirements(argv, i):\n \"\"\"Check whether a command line argument needs setup requirements.\"\"\"\n if argv[i] in no_setup_requires_arguments:\n # Simple case: An argument which is either an option or a command\n # which doesn't need setup requirements.\n return True\n elif (is_short_option(argv[i]) and\n all(option in no_setup_requires_arguments\n for option in expand_short_options(argv[i]))):\n # Not so simple case: Combined short options none of which need\n # setup requirements.\n return True\n elif argv[i - 1:i] == ['--egg-base']:\n # Tricky case: --egg-info takes an argument which should not make\n # us use setup_requires (defeating the purpose of this code).\n return True\n else:\n return False\n\n if all(argument_without_setup_requirements(argv, i)\n for i in range(1, len(argv))):\n return {\n \"cmdclass\": {\n \"build\": DummyBuild,\n \"install\": DummyInstall,\n \"test\": DummyPyTest,\n }\n }\n else:\n cffi_modules = [\n \"src/_cffi_src/build_openssl.py:ffi\",\n \"src/_cffi_src/build_constant_time.py:ffi\",\n \"src/_cffi_src/build_padding.py:ffi\",\n ]\n if cc_is_available():\n cffi_modules.append(\"src/_cffi_src/build_commoncrypto.py:ffi\")\n\n return {\n \"setup_requires\": setup_requirements,\n \"cmdclass\": {\n \"test\": PyTest,\n },\n \"cffi_modules\": cffi_modules\n }\n\n\nsetup_requires_error = (\"Requested setup command that needs 'setup_requires' \"\n \"while command line arguments implied a side effect \"\n \"free command or option.\")\n\n\nclass DummyBuild(build):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py build`` as\n one of the 'side effect free' commands or options.\n \"\"\"\n\n def run(self):\n raise RuntimeError(setup_requires_error)\n\n\nclass DummyInstall(install):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py install``\n as one of the 'side effect free' commands or options.\n \"\"\"\n\n def run(self):\n raise RuntimeError(setup_requires_error)\n\n\nclass DummyPyTest(test):\n \"\"\"\n This class makes it very obvious when ``keywords_with_side_effects()`` has\n incorrectly interpreted the command line arguments to ``setup.py test`` as\n one of the 'side effect free' commands or options.\n \"\"\"\n\n def run_tests(self):\n raise RuntimeError(setup_requires_error)\n\n\nwith open(os.path.join(base_dir, \"README.rst\")) as f:\n long_description = f.read()\n\n\nsetup(\n name=about[\"__title__\"],\n version=about[\"__version__\"],\n\n description=about[\"__summary__\"],\n long_description=long_description,\n license=about[\"__license__\"],\n url=about[\"__uri__\"],\n\n author=about[\"__author__\"],\n author_email=about[\"__email__\"],\n\n classifiers=[\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"License :: OSI Approved :: BSD License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Security :: Cryptography\",\n ],\n\n package_dir={\"\": \"src\"},\n packages=find_packages(\n where=\"src\", exclude=[\"_cffi_src\", \"_cffi_src.*\", \"tests\", \"tests.*\"]\n ),\n include_package_data=True,\n\n install_requires=requirements,\n tests_require=test_requirements,\n\n # for cffi\n zip_safe=False,\n ext_package=\"cryptography.hazmat.bindings\",\n entry_points={\n \"cryptography.backends\": backends,\n },\n **keywords_with_side_effects(sys.argv)\n)\n", "path": "setup.py"}]}
4,002
162
gh_patches_debug_26215
rasdani/github-patches
git_diff
hedyorg__hedy-1769
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [UI idea] Improve pre-join class UI **Idea incl level** Currently the pre-join page of a class is nothing more than plain-text with a link. It would be nice to improve the UI a bit to better align with the rest of the Hedy website. See the screenshot below for the current situation: ![Schermafbeelding 2022-01-24 om 13 22 28](https://user-images.githubusercontent.com/38155424/150782347-2c230948-6284-4396-bf97-a9d9b5ebc32b.png) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `website/teacher.py` Content: ``` 1 import json 2 3 from website.auth import requires_login, is_teacher, current_user 4 import utils 5 import uuid 6 from flask import g, request, jsonify, redirect 7 from flask_helpers import render_template 8 import os 9 import hedyweb 10 import hedy_content 11 TRANSLATIONS = hedyweb.Translations () 12 from config import config 13 cookie_name = config ['session'] ['cookie_name'] 14 15 16 def routes (app, database, achievements): 17 global DATABASE 18 global ACHIEVEMENTS 19 DATABASE = database 20 ACHIEVEMENTS = achievements 21 22 @app.route('/classes', methods=['GET']) 23 @requires_login 24 def get_classes (user): 25 if not is_teacher(user): 26 return utils.error_page_403(error=403, ui_message='retrieve_class') 27 return jsonify (DATABASE.get_teacher_classes (user ['username'], True)) 28 29 @app.route('/for-teachers/class/<class_id>', methods=['GET']) 30 @requires_login 31 def get_class (user, class_id): 32 app.logger.info('This is info output') 33 if not is_teacher(user): 34 return utils.error_page_403(error=403, ui_message='retrieve_class') 35 Class = DATABASE.get_class (class_id) 36 if not Class or Class ['teacher'] != user ['username']: 37 return utils.error_page(error=404, ui_message='no_such_class') 38 students = [] 39 for student_username in Class.get ('students', []): 40 student = DATABASE.user_by_username (student_username) 41 programs = DATABASE.programs_for_user(student_username) 42 highest_level = max(program['level'] for program in programs) if len(programs) else 0 43 sorted_public_programs = list(sorted([program for program in programs if program.get ('public')], key=lambda p: p['date'])) 44 if sorted_public_programs: 45 latest_shared = sorted_public_programs[-1] 46 latest_shared['link'] = f"/hedy/{latest_shared['id']}/view" 47 else: 48 latest_shared = None 49 students.append ({'username': student_username, 'last_login': utils.datetotimeordate (utils.mstoisostring (student ['last_login'])), 'programs': len (programs), 'highest_level': highest_level, 'latest_shared': latest_shared}) 50 51 if utils.is_testing_request (request): 52 return jsonify ({'students': students, 'link': Class ['link'], 'name': Class ['name'], 'id': Class ['id']}) 53 54 achievement = None 55 if len(students) > 20: 56 achievement = ACHIEVEMENTS.add_single_achievement(user['username'], "full_house") 57 if achievement: 58 achievement = json.dumps(achievement) 59 60 teachers = os.getenv('BETA_TEACHERS', '').split(',') 61 is_beta_teacher = user['username'] in teachers 62 63 return render_template ('class-overview.html', current_page='my-profile', 64 page_title=hedyweb.get_page_title('class overview'), 65 achievement=achievement, 66 is_beta_teacher=is_beta_teacher, 67 class_info={'students': students, 'link': os.getenv('BASE_URL') + '/hedy/l/' + Class ['link'], 68 'name': Class ['name'], 'id': Class ['id']}) 69 70 @app.route('/class', methods=['POST']) 71 @requires_login 72 def create_class (user): 73 if not is_teacher(user): 74 return 'Only teachers can create classes', 403 75 76 body = request.json 77 # Validations 78 if not isinstance(body, dict): 79 return 'body must be an object', 400 80 if not isinstance(body.get('name'), str): 81 return 'name must be a string', 400 82 83 # We use this extra call to verify if the class name doesn't already exist, if so it's a duplicate 84 Classes = DATABASE.get_teacher_classes(user['username'], True) 85 for Class in Classes: 86 if Class['name'] == body['name']: 87 return "duplicate", 200 88 89 Class = { 90 'id': uuid.uuid4().hex, 91 'date': utils.timems (), 92 'teacher': user ['username'], 93 'link': utils.random_id_generator (7), 94 'name': body ['name'] 95 } 96 97 DATABASE.store_class (Class) 98 achievement = ACHIEVEMENTS.add_single_achievement(user['username'], "ready_set_education") 99 if achievement: 100 return {'id': Class['id'], 'achievement': achievement}, 200 101 return {'id': Class['id']}, 200 102 103 @app.route('/class/<class_id>', methods=['PUT']) 104 @requires_login 105 def update_class (user, class_id): 106 if not is_teacher(user): 107 return 'Only teachers can update classes', 403 108 109 body = request.json 110 # Validations 111 if not isinstance(body, dict): 112 return 'body must be an object', 400 113 if not isinstance(body.get('name'), str): 114 return 'name must be a string', 400 115 116 Class = DATABASE.get_class (class_id) 117 if not Class or Class ['teacher'] != user ['username']: 118 return 'No such class', 404 119 120 # We use this extra call to verify if the class name doesn't already exist, if so it's a duplicate 121 Classes = DATABASE.get_teacher_classes(user ['username'], True) 122 for Class in Classes: 123 if Class['name'] == body['name']: 124 return "duplicate", 200 125 126 Class = DATABASE.update_class (class_id, body ['name']) 127 achievement = ACHIEVEMENTS.add_single_achievement(user['username'], "on_second_thoughts") 128 if achievement: 129 return {'achievement': achievement}, 200 130 return {}, 200 131 132 @app.route('/class/<class_id>', methods=['DELETE']) 133 @requires_login 134 def delete_class (user, class_id): 135 Class = DATABASE.get_class (class_id) 136 if not Class or Class ['teacher'] != user ['username']: 137 return 'No such class', 404 138 139 DATABASE.delete_class (Class) 140 achievement = ACHIEVEMENTS.add_single_achievement(user['username'], "end_of_semester") 141 if achievement: 142 return {'achievement': achievement}, 200 143 return {}, 200 144 145 @app.route('/class/<class_id>/prejoin/<link>', methods=['GET']) 146 def prejoin_class (class_id, link): 147 Class = DATABASE.get_class (class_id) 148 if not Class or Class ['link'] != link: 149 return utils.error_page(error=404, ui_message='invalid_class_link') 150 user = {} 151 if request.cookies.get (cookie_name): 152 token = DATABASE.get_token(request.cookies.get (cookie_name)) 153 if token: 154 if token ['username'] in Class.get ('students', []): 155 return render_template ('class-already-joined.html', page_title=hedyweb.get_page_title('join class'), 156 current_page='my-profile', class_info={'name': Class ['name']}) 157 user = DATABASE.user_by_username(token ['username']) 158 159 return render_template ('class-prejoin.html', page_title=hedyweb.get_page_title('join class'), 160 current_page='my-profile', 161 class_info={ 162 'id': Class ['id'], 163 'name': Class ['name'], 164 }) 165 166 @app.route('/class/join', methods=['POST']) 167 @requires_login 168 def join_class(user): 169 body = request.json 170 if 'id' in body: 171 Class = DATABASE.get_class(body['id']) 172 if not Class or Class ['id'] != body['id']: 173 return utils.error_page(error=404, ui_message='invalid_class_link') 174 175 DATABASE.add_student_to_class(Class['id'], user['username']) 176 achievement = ACHIEVEMENTS.add_single_achievement(user['username'], "epic_education") 177 if achievement: 178 return {'achievement': achievement}, 200 179 return {}, 200 180 181 @app.route('/class/<class_id>/student/<student_id>', methods=['DELETE']) 182 @requires_login 183 def leave_class (user, class_id, student_id): 184 Class = DATABASE.get_class (class_id) 185 if not Class or Class ['teacher'] != user ['username'] or student_id != user ['username']: 186 return 'No such class', 404 187 188 DATABASE.remove_student_from_class (Class ['id'], student_id) 189 if Class['teacher'] == user['username']: 190 achievement = ACHIEVEMENTS.add_single_achievement(user['username'], "detention") 191 if achievement: 192 return {'achievement': achievement}, 200 193 return {}, 200 194 195 @app.route('/for-teachers/customize-class/<class_id>', methods=['GET']) 196 @requires_login 197 def get_class_info(user, class_id): 198 if not is_teacher(user): 199 return utils.error_page_403(error=403, ui_message='retrieve_class') 200 Class = DATABASE.get_class(class_id) 201 if not Class or Class['teacher'] != user['username']: 202 return utils.error_page(error=404, ui_message='no_such_class') 203 204 if hedy_content.Adventures(g.lang).has_adventures(): 205 adventures = hedy_content.Adventures(g.lang).get_adventure_keyname_name_levels() 206 else: 207 adventures = hedy_content.Adventures("en").get_adventure_keyname_name_levels() 208 levels = hedy_content.LevelDefaults(g.lang).levels 209 preferences = DATABASE.get_customizations_class(class_id) 210 211 return render_template('customize-class.html', page_title=hedyweb.get_page_title('customize class'), 212 class_info={'name': Class['name'], 'id': Class['id']}, levels=levels, 213 adventures=adventures, preferences=preferences, current_page='my-profile') 214 215 @app.route('/customize-class/<class_id>', methods=['PUT']) 216 @requires_login 217 def update_level_preferences(user, class_id): 218 if not is_teacher(user): 219 return 'Only teachers can update class preferences', 403 220 221 body = request.json 222 print(body) 223 # Validations 224 if not isinstance(body, dict): 225 return 'body must be an object', 400 226 if not isinstance(body.get('example_programs'), bool): 227 return 'amount of example programs must be an integer', 400 228 if not isinstance(body.get('hide_level'), bool): 229 return 'level switch must be a boolean', 400 230 if not isinstance(body.get('hide_prev_level'), bool): 231 return 'level switch must be a boolean', 400 232 if not isinstance(body.get('hide_next_level'), bool): 233 return 'level switch must be a boolean', 400 234 if not isinstance(int(body.get('level')), int): 235 return 'level must ben an integer', 400 236 237 Class = DATABASE.get_class(class_id) 238 if not Class or Class['teacher'] != user['username']: 239 return 'No such class', 404 240 241 customizations = {} 242 customizations['id'] = class_id 243 customizations['level'] = int(body.get('level')) 244 customizations['adventures'] = body.get('adventures') 245 customizations['example_programs'] = body.get('example_programs') 246 customizations['hide'] = body.get('hide_level') 247 customizations['hide_prev_level'] = body.get('hide_prev_level') 248 customizations['hide_next_level'] = body.get('hide_next_level') 249 250 DATABASE.update_customizations_class(customizations) 251 achievement = ACHIEVEMENTS.add_single_achievement(user['username'], "my_class_my_rules") 252 if achievement: 253 return {'achievement': achievement}, 200 254 return {}, 200 255 256 @app.route('/hedy/l/<link_id>', methods=['GET']) 257 def resolve_class_link (link_id): 258 Class = DATABASE.resolve_class_link (link_id) 259 if not Class: 260 return utils.error_page(error=404, ui_message='invalid_class_link') 261 return redirect(request.url.replace('/hedy/l/' + link_id, '/class/' + Class ['id'] + '/prejoin/' + link_id), code=302) 262 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/website/teacher.py b/website/teacher.py --- a/website/teacher.py +++ b/website/teacher.py @@ -152,16 +152,18 @@ token = DATABASE.get_token(request.cookies.get (cookie_name)) if token: if token ['username'] in Class.get ('students', []): - return render_template ('class-already-joined.html', page_title=hedyweb.get_page_title('join class'), + return render_template ('class-prejoin.html', joined=True, + page_title=hedyweb.get_page_title('join class'), current_page='my-profile', class_info={'name': Class ['name']}) user = DATABASE.user_by_username(token ['username']) - return render_template ('class-prejoin.html', page_title=hedyweb.get_page_title('join class'), - current_page='my-profile', - class_info={ - 'id': Class ['id'], - 'name': Class ['name'], - }) + return render_template ('class-prejoin.html', joined=False, + page_title=hedyweb.get_page_title('join class'), + current_page='my-profile', + class_info={ + 'id': Class ['id'], + 'name': Class ['name'], + }) @app.route('/class/join', methods=['POST']) @requires_login
{"golden_diff": "diff --git a/website/teacher.py b/website/teacher.py\n--- a/website/teacher.py\n+++ b/website/teacher.py\n@@ -152,16 +152,18 @@\n token = DATABASE.get_token(request.cookies.get (cookie_name))\n if token:\n if token ['username'] in Class.get ('students', []):\n- return render_template ('class-already-joined.html', page_title=hedyweb.get_page_title('join class'),\n+ return render_template ('class-prejoin.html', joined=True,\n+ page_title=hedyweb.get_page_title('join class'),\n current_page='my-profile', class_info={'name': Class ['name']})\n user = DATABASE.user_by_username(token ['username'])\n \n- return render_template ('class-prejoin.html', page_title=hedyweb.get_page_title('join class'),\n- current_page='my-profile',\n- class_info={\n- 'id': Class ['id'],\n- 'name': Class ['name'],\n- })\n+ return render_template ('class-prejoin.html', joined=False,\n+ page_title=hedyweb.get_page_title('join class'),\n+ current_page='my-profile',\n+ class_info={\n+ 'id': Class ['id'],\n+ 'name': Class ['name'],\n+ })\n \n @app.route('/class/join', methods=['POST'])\n @requires_login\n", "issue": "[UI idea] Improve pre-join class UI\n**Idea incl level**\r\nCurrently the pre-join page of a class is nothing more than plain-text with a link. It would be nice to improve the UI a bit to better align with the rest of the Hedy website. See the screenshot below for the current situation:\r\n\r\n![Schermafbeelding 2022-01-24 om 13 22 28](https://user-images.githubusercontent.com/38155424/150782347-2c230948-6284-4396-bf97-a9d9b5ebc32b.png)\n", "before_files": [{"content": "import json\n\nfrom website.auth import requires_login, is_teacher, current_user\nimport utils\nimport uuid\nfrom flask import g, request, jsonify, redirect\nfrom flask_helpers import render_template\nimport os\nimport hedyweb\nimport hedy_content\nTRANSLATIONS = hedyweb.Translations ()\nfrom config import config\ncookie_name = config ['session'] ['cookie_name']\n\n\ndef routes (app, database, achievements):\n global DATABASE\n global ACHIEVEMENTS\n DATABASE = database\n ACHIEVEMENTS = achievements\n\n @app.route('/classes', methods=['GET'])\n @requires_login\n def get_classes (user):\n if not is_teacher(user):\n return utils.error_page_403(error=403, ui_message='retrieve_class')\n return jsonify (DATABASE.get_teacher_classes (user ['username'], True))\n\n @app.route('/for-teachers/class/<class_id>', methods=['GET'])\n @requires_login\n def get_class (user, class_id):\n app.logger.info('This is info output')\n if not is_teacher(user):\n return utils.error_page_403(error=403, ui_message='retrieve_class')\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return utils.error_page(error=404, ui_message='no_such_class')\n students = []\n for student_username in Class.get ('students', []):\n student = DATABASE.user_by_username (student_username)\n programs = DATABASE.programs_for_user(student_username)\n highest_level = max(program['level'] for program in programs) if len(programs) else 0\n sorted_public_programs = list(sorted([program for program in programs if program.get ('public')], key=lambda p: p['date']))\n if sorted_public_programs:\n latest_shared = sorted_public_programs[-1]\n latest_shared['link'] = f\"/hedy/{latest_shared['id']}/view\"\n else:\n latest_shared = None\n students.append ({'username': student_username, 'last_login': utils.datetotimeordate (utils.mstoisostring (student ['last_login'])), 'programs': len (programs), 'highest_level': highest_level, 'latest_shared': latest_shared})\n\n if utils.is_testing_request (request):\n return jsonify ({'students': students, 'link': Class ['link'], 'name': Class ['name'], 'id': Class ['id']})\n\n achievement = None\n if len(students) > 20:\n achievement = ACHIEVEMENTS.add_single_achievement(user['username'], \"full_house\")\n if achievement:\n achievement = json.dumps(achievement)\n\n teachers = os.getenv('BETA_TEACHERS', '').split(',')\n is_beta_teacher = user['username'] in teachers\n\n return render_template ('class-overview.html', current_page='my-profile',\n page_title=hedyweb.get_page_title('class overview'),\n achievement=achievement,\n is_beta_teacher=is_beta_teacher,\n class_info={'students': students, 'link': os.getenv('BASE_URL') + '/hedy/l/' + Class ['link'],\n 'name': Class ['name'], 'id': Class ['id']})\n\n @app.route('/class', methods=['POST'])\n @requires_login\n def create_class (user):\n if not is_teacher(user):\n return 'Only teachers can create classes', 403\n\n body = request.json\n # Validations\n if not isinstance(body, dict):\n return 'body must be an object', 400\n if not isinstance(body.get('name'), str):\n return 'name must be a string', 400\n\n # We use this extra call to verify if the class name doesn't already exist, if so it's a duplicate\n Classes = DATABASE.get_teacher_classes(user['username'], True)\n for Class in Classes:\n if Class['name'] == body['name']:\n return \"duplicate\", 200\n\n Class = {\n 'id': uuid.uuid4().hex,\n 'date': utils.timems (),\n 'teacher': user ['username'],\n 'link': utils.random_id_generator (7),\n 'name': body ['name']\n }\n\n DATABASE.store_class (Class)\n achievement = ACHIEVEMENTS.add_single_achievement(user['username'], \"ready_set_education\")\n if achievement:\n return {'id': Class['id'], 'achievement': achievement}, 200\n return {'id': Class['id']}, 200\n\n @app.route('/class/<class_id>', methods=['PUT'])\n @requires_login\n def update_class (user, class_id):\n if not is_teacher(user):\n return 'Only teachers can update classes', 403\n\n body = request.json\n # Validations\n if not isinstance(body, dict):\n return 'body must be an object', 400\n if not isinstance(body.get('name'), str):\n return 'name must be a string', 400\n\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return 'No such class', 404\n\n # We use this extra call to verify if the class name doesn't already exist, if so it's a duplicate\n Classes = DATABASE.get_teacher_classes(user ['username'], True)\n for Class in Classes:\n if Class['name'] == body['name']:\n return \"duplicate\", 200\n\n Class = DATABASE.update_class (class_id, body ['name'])\n achievement = ACHIEVEMENTS.add_single_achievement(user['username'], \"on_second_thoughts\")\n if achievement:\n return {'achievement': achievement}, 200\n return {}, 200\n\n @app.route('/class/<class_id>', methods=['DELETE'])\n @requires_login\n def delete_class (user, class_id):\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return 'No such class', 404\n\n DATABASE.delete_class (Class)\n achievement = ACHIEVEMENTS.add_single_achievement(user['username'], \"end_of_semester\")\n if achievement:\n return {'achievement': achievement}, 200\n return {}, 200\n\n @app.route('/class/<class_id>/prejoin/<link>', methods=['GET'])\n def prejoin_class (class_id, link):\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['link'] != link:\n return utils.error_page(error=404, ui_message='invalid_class_link')\n user = {}\n if request.cookies.get (cookie_name):\n token = DATABASE.get_token(request.cookies.get (cookie_name))\n if token:\n if token ['username'] in Class.get ('students', []):\n return render_template ('class-already-joined.html', page_title=hedyweb.get_page_title('join class'),\n current_page='my-profile', class_info={'name': Class ['name']})\n user = DATABASE.user_by_username(token ['username'])\n\n return render_template ('class-prejoin.html', page_title=hedyweb.get_page_title('join class'),\n current_page='my-profile',\n class_info={\n 'id': Class ['id'],\n 'name': Class ['name'],\n })\n\n @app.route('/class/join', methods=['POST'])\n @requires_login\n def join_class(user):\n body = request.json\n if 'id' in body:\n Class = DATABASE.get_class(body['id'])\n if not Class or Class ['id'] != body['id']:\n return utils.error_page(error=404, ui_message='invalid_class_link')\n\n DATABASE.add_student_to_class(Class['id'], user['username'])\n achievement = ACHIEVEMENTS.add_single_achievement(user['username'], \"epic_education\")\n if achievement:\n return {'achievement': achievement}, 200\n return {}, 200\n\n @app.route('/class/<class_id>/student/<student_id>', methods=['DELETE'])\n @requires_login\n def leave_class (user, class_id, student_id):\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username'] or student_id != user ['username']:\n return 'No such class', 404\n\n DATABASE.remove_student_from_class (Class ['id'], student_id)\n if Class['teacher'] == user['username']:\n achievement = ACHIEVEMENTS.add_single_achievement(user['username'], \"detention\")\n if achievement:\n return {'achievement': achievement}, 200\n return {}, 200\n\n @app.route('/for-teachers/customize-class/<class_id>', methods=['GET'])\n @requires_login\n def get_class_info(user, class_id):\n if not is_teacher(user):\n return utils.error_page_403(error=403, ui_message='retrieve_class')\n Class = DATABASE.get_class(class_id)\n if not Class or Class['teacher'] != user['username']:\n return utils.error_page(error=404, ui_message='no_such_class')\n\n if hedy_content.Adventures(g.lang).has_adventures():\n adventures = hedy_content.Adventures(g.lang).get_adventure_keyname_name_levels()\n else:\n adventures = hedy_content.Adventures(\"en\").get_adventure_keyname_name_levels()\n levels = hedy_content.LevelDefaults(g.lang).levels\n preferences = DATABASE.get_customizations_class(class_id)\n\n return render_template('customize-class.html', page_title=hedyweb.get_page_title('customize class'),\n class_info={'name': Class['name'], 'id': Class['id']}, levels=levels,\n adventures=adventures, preferences=preferences, current_page='my-profile')\n\n @app.route('/customize-class/<class_id>', methods=['PUT'])\n @requires_login\n def update_level_preferences(user, class_id):\n if not is_teacher(user):\n return 'Only teachers can update class preferences', 403\n\n body = request.json\n print(body)\n # Validations\n if not isinstance(body, dict):\n return 'body must be an object', 400\n if not isinstance(body.get('example_programs'), bool):\n return 'amount of example programs must be an integer', 400\n if not isinstance(body.get('hide_level'), bool):\n return 'level switch must be a boolean', 400\n if not isinstance(body.get('hide_prev_level'), bool):\n return 'level switch must be a boolean', 400\n if not isinstance(body.get('hide_next_level'), bool):\n return 'level switch must be a boolean', 400\n if not isinstance(int(body.get('level')), int):\n return 'level must ben an integer', 400\n\n Class = DATABASE.get_class(class_id)\n if not Class or Class['teacher'] != user['username']:\n return 'No such class', 404\n\n customizations = {}\n customizations['id'] = class_id\n customizations['level'] = int(body.get('level'))\n customizations['adventures'] = body.get('adventures')\n customizations['example_programs'] = body.get('example_programs')\n customizations['hide'] = body.get('hide_level')\n customizations['hide_prev_level'] = body.get('hide_prev_level')\n customizations['hide_next_level'] = body.get('hide_next_level')\n\n DATABASE.update_customizations_class(customizations)\n achievement = ACHIEVEMENTS.add_single_achievement(user['username'], \"my_class_my_rules\")\n if achievement:\n return {'achievement': achievement}, 200\n return {}, 200\n\n @app.route('/hedy/l/<link_id>', methods=['GET'])\n def resolve_class_link (link_id):\n Class = DATABASE.resolve_class_link (link_id)\n if not Class:\n return utils.error_page(error=404, ui_message='invalid_class_link')\n return redirect(request.url.replace('/hedy/l/' + link_id, '/class/' + Class ['id'] + '/prejoin/' + link_id), code=302)\n", "path": "website/teacher.py"}], "after_files": [{"content": "import json\n\nfrom website.auth import requires_login, is_teacher, current_user\nimport utils\nimport uuid\nfrom flask import g, request, jsonify, redirect\nfrom flask_helpers import render_template\nimport os\nimport hedyweb\nimport hedy_content\nTRANSLATIONS = hedyweb.Translations ()\nfrom config import config\ncookie_name = config ['session'] ['cookie_name']\n\n\ndef routes (app, database, achievements):\n global DATABASE\n global ACHIEVEMENTS\n DATABASE = database\n ACHIEVEMENTS = achievements\n\n @app.route('/classes', methods=['GET'])\n @requires_login\n def get_classes (user):\n if not is_teacher(user):\n return utils.error_page_403(error=403, ui_message='retrieve_class')\n return jsonify (DATABASE.get_teacher_classes (user ['username'], True))\n\n @app.route('/for-teachers/class/<class_id>', methods=['GET'])\n @requires_login\n def get_class (user, class_id):\n app.logger.info('This is info output')\n if not is_teacher(user):\n return utils.error_page_403(error=403, ui_message='retrieve_class')\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return utils.error_page(error=404, ui_message='no_such_class')\n students = []\n for student_username in Class.get ('students', []):\n student = DATABASE.user_by_username (student_username)\n programs = DATABASE.programs_for_user(student_username)\n highest_level = max(program['level'] for program in programs) if len(programs) else 0\n sorted_public_programs = list(sorted([program for program in programs if program.get ('public')], key=lambda p: p['date']))\n if sorted_public_programs:\n latest_shared = sorted_public_programs[-1]\n latest_shared['link'] = f\"/hedy/{latest_shared['id']}/view\"\n else:\n latest_shared = None\n students.append ({'username': student_username, 'last_login': utils.datetotimeordate (utils.mstoisostring (student ['last_login'])), 'programs': len (programs), 'highest_level': highest_level, 'latest_shared': latest_shared})\n\n if utils.is_testing_request (request):\n return jsonify ({'students': students, 'link': Class ['link'], 'name': Class ['name'], 'id': Class ['id']})\n\n achievement = None\n if len(students) > 20:\n achievement = ACHIEVEMENTS.add_single_achievement(user['username'], \"full_house\")\n if achievement:\n achievement = json.dumps(achievement)\n\n teachers = os.getenv('BETA_TEACHERS', '').split(',')\n is_beta_teacher = user['username'] in teachers\n\n return render_template ('class-overview.html', current_page='my-profile',\n page_title=hedyweb.get_page_title('class overview'),\n achievement=achievement,\n is_beta_teacher=is_beta_teacher,\n class_info={'students': students, 'link': os.getenv('BASE_URL') + '/hedy/l/' + Class ['link'],\n 'name': Class ['name'], 'id': Class ['id']})\n\n @app.route('/class', methods=['POST'])\n @requires_login\n def create_class (user):\n if not is_teacher(user):\n return 'Only teachers can create classes', 403\n\n body = request.json\n # Validations\n if not isinstance(body, dict):\n return 'body must be an object', 400\n if not isinstance(body.get('name'), str):\n return 'name must be a string', 400\n\n # We use this extra call to verify if the class name doesn't already exist, if so it's a duplicate\n Classes = DATABASE.get_teacher_classes(user['username'], True)\n for Class in Classes:\n if Class['name'] == body['name']:\n return \"duplicate\", 200\n\n Class = {\n 'id': uuid.uuid4().hex,\n 'date': utils.timems (),\n 'teacher': user ['username'],\n 'link': utils.random_id_generator (7),\n 'name': body ['name']\n }\n\n DATABASE.store_class (Class)\n achievement = ACHIEVEMENTS.add_single_achievement(user['username'], \"ready_set_education\")\n if achievement:\n return {'id': Class['id'], 'achievement': achievement}, 200\n return {'id': Class['id']}, 200\n\n @app.route('/class/<class_id>', methods=['PUT'])\n @requires_login\n def update_class (user, class_id):\n if not is_teacher(user):\n return 'Only teachers can update classes', 403\n\n body = request.json\n # Validations\n if not isinstance(body, dict):\n return 'body must be an object', 400\n if not isinstance(body.get('name'), str):\n return 'name must be a string', 400\n\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return 'No such class', 404\n\n # We use this extra call to verify if the class name doesn't already exist, if so it's a duplicate\n Classes = DATABASE.get_teacher_classes(user ['username'], True)\n for Class in Classes:\n if Class['name'] == body['name']:\n return \"duplicate\", 200\n\n Class = DATABASE.update_class (class_id, body ['name'])\n achievement = ACHIEVEMENTS.add_single_achievement(user['username'], \"on_second_thoughts\")\n if achievement:\n return {'achievement': achievement}, 200\n return {}, 200\n\n @app.route('/class/<class_id>', methods=['DELETE'])\n @requires_login\n def delete_class (user, class_id):\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return 'No such class', 404\n\n DATABASE.delete_class (Class)\n achievement = ACHIEVEMENTS.add_single_achievement(user['username'], \"end_of_semester\")\n if achievement:\n return {'achievement': achievement}, 200\n return {}, 200\n\n @app.route('/class/<class_id>/prejoin/<link>', methods=['GET'])\n def prejoin_class (class_id, link):\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['link'] != link:\n return utils.error_page(error=404, ui_message='invalid_class_link')\n user = {}\n if request.cookies.get (cookie_name):\n token = DATABASE.get_token(request.cookies.get (cookie_name))\n if token:\n if token ['username'] in Class.get ('students', []):\n return render_template ('class-prejoin.html', joined=True,\n page_title=hedyweb.get_page_title('join class'),\n current_page='my-profile', class_info={'name': Class ['name']})\n user = DATABASE.user_by_username(token ['username'])\n\n return render_template ('class-prejoin.html', joined=False,\n page_title=hedyweb.get_page_title('join class'),\n current_page='my-profile',\n class_info={\n 'id': Class ['id'],\n 'name': Class ['name'],\n })\n\n @app.route('/class/join', methods=['POST'])\n @requires_login\n def join_class(user):\n body = request.json\n if 'id' in body:\n Class = DATABASE.get_class(body['id'])\n if not Class or Class ['id'] != body['id']:\n return utils.error_page(error=404, ui_message='invalid_class_link')\n\n DATABASE.add_student_to_class(Class['id'], user['username'])\n achievement = ACHIEVEMENTS.add_single_achievement(user['username'], \"epic_education\")\n if achievement:\n return {'achievement': achievement}, 200\n return {}, 200\n\n @app.route('/class/<class_id>/student/<student_id>', methods=['DELETE'])\n @requires_login\n def leave_class (user, class_id, student_id):\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username'] or student_id != user ['username']:\n return 'No such class', 404\n\n DATABASE.remove_student_from_class (Class ['id'], student_id)\n if Class['teacher'] == user['username']:\n achievement = ACHIEVEMENTS.add_single_achievement(user['username'], \"detention\")\n if achievement:\n return {'achievement': achievement}, 200\n return {}, 200\n\n @app.route('/for-teachers/customize-class/<class_id>', methods=['GET'])\n @requires_login\n def get_class_info(user, class_id):\n if not is_teacher(user):\n return utils.error_page_403(error=403, ui_message='retrieve_class')\n Class = DATABASE.get_class(class_id)\n if not Class or Class['teacher'] != user['username']:\n return utils.error_page(error=404, ui_message='no_such_class')\n\n if hedy_content.Adventures(g.lang).has_adventures():\n adventures = hedy_content.Adventures(g.lang).get_adventure_keyname_name_levels()\n else:\n adventures = hedy_content.Adventures(\"en\").get_adventure_keyname_name_levels()\n levels = hedy_content.LevelDefaults(g.lang).levels\n preferences = DATABASE.get_customizations_class(class_id)\n\n return render_template('customize-class.html', page_title=hedyweb.get_page_title('customize class'),\n class_info={'name': Class['name'], 'id': Class['id']}, levels=levels,\n adventures=adventures, preferences=preferences, current_page='my-profile')\n\n @app.route('/customize-class/<class_id>', methods=['PUT'])\n @requires_login\n def update_level_preferences(user, class_id):\n if not is_teacher(user):\n return 'Only teachers can update class preferences', 403\n\n body = request.json\n print(body)\n # Validations\n if not isinstance(body, dict):\n return 'body must be an object', 400\n if not isinstance(body.get('example_programs'), bool):\n return 'amount of example programs must be an integer', 400\n if not isinstance(body.get('hide_level'), bool):\n return 'level switch must be a boolean', 400\n if not isinstance(body.get('hide_prev_level'), bool):\n return 'level switch must be a boolean', 400\n if not isinstance(body.get('hide_next_level'), bool):\n return 'level switch must be a boolean', 400\n if not isinstance(int(body.get('level')), int):\n return 'level must ben an integer', 400\n\n Class = DATABASE.get_class(class_id)\n if not Class or Class['teacher'] != user['username']:\n return 'No such class', 404\n\n customizations = {}\n customizations['id'] = class_id\n customizations['level'] = int(body.get('level'))\n customizations['adventures'] = body.get('adventures')\n customizations['example_programs'] = body.get('example_programs')\n customizations['hide'] = body.get('hide_level')\n customizations['hide_prev_level'] = body.get('hide_prev_level')\n customizations['hide_next_level'] = body.get('hide_next_level')\n\n DATABASE.update_customizations_class(customizations)\n achievement = ACHIEVEMENTS.add_single_achievement(user['username'], \"my_class_my_rules\")\n if achievement:\n return {'achievement': achievement}, 200\n return {}, 200\n\n @app.route('/hedy/l/<link_id>', methods=['GET'])\n def resolve_class_link (link_id):\n Class = DATABASE.resolve_class_link (link_id)\n if not Class:\n return utils.error_page(error=404, ui_message='invalid_class_link')\n return redirect(request.url.replace('/hedy/l/' + link_id, '/class/' + Class ['id'] + '/prejoin/' + link_id), code=302)\n", "path": "website/teacher.py"}]}
3,775
299
gh_patches_debug_5862
rasdani/github-patches
git_diff
pydantic__pydantic-3177
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- "extra" config not respected for TypedDicts in functions ### Checks * [x] I added a descriptive title to this issue * [x] I have searched (google, github) for similar issues and couldn't find anything * [x] I have read and followed [the docs](https://pydantic-docs.helpmanual.io/) and still think this is a bug <!-- Sorry to sound so draconian, but every second saved replying to issues is time spend improving pydantic :-) --> # Bug Output of `python -c "import pydantic.utils; print(pydantic.utils.version_info())"`: ``` pydantic version: 1.8.2 pydantic compiled: False install path: /home/msharma216/.local/lib/python3.8/site-packages/pydantic python version: 3.8.0 (default, Feb 25 2021, 22:10:10) [GCC 8.4.0] platform: Linux-4.4.0-19041-Microsoft-x86_64-with-glibc2.27 optional deps. installed: ['typing-extensions'] ``` <!-- or if you're using pydantic prior to v1.3, manually include: OS, python version and pydantic version --> <!-- Please read the [docs](https://pydantic-docs.helpmanual.io/) and search through issues to confirm your bug hasn't already been reported. --> <!-- Where possible please include a self-contained code snippet describing your bug: --> When trying to use the `validate_arguments` decorator, I face the issue of getting validation errors for an extra attribute for a TypedDict as below: ```py from typing_extensions import TypedDict from pydantic import validate_arguments, Extra class TypedTest(TypedDict): y: str @validate_arguments(config={'extra': Extra.allow}) def test(other: TypedTest): pass test(other={'y': 'b', 'z': 'a'}) ``` Output: ``` pydantic.error_wrappers.ValidationError: 1 validation error for Test other -> z extra fields not permitted (type=value_error.extra) ``` Expected: No errors Anything I have missed that would let this validation pass for extra attributes in the TypedDict? Thanks very much! --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pydantic/decorator.py` Content: ``` 1 from functools import wraps 2 from typing import TYPE_CHECKING, Any, Callable, Dict, List, Mapping, Optional, Tuple, Type, TypeVar, Union, overload 3 4 from . import validator 5 from .config import Extra 6 from .errors import ConfigError 7 from .main import BaseModel, create_model 8 from .typing import get_all_type_hints 9 from .utils import to_camel 10 11 __all__ = ('validate_arguments',) 12 13 if TYPE_CHECKING: 14 from .typing import AnyCallable 15 16 AnyCallableT = TypeVar('AnyCallableT', bound=AnyCallable) 17 ConfigType = Union[None, Type[Any], Dict[str, Any]] 18 19 20 @overload 21 def validate_arguments(func: None = None, *, config: 'ConfigType' = None) -> Callable[['AnyCallableT'], 'AnyCallableT']: 22 ... 23 24 25 @overload 26 def validate_arguments(func: 'AnyCallableT') -> 'AnyCallableT': 27 ... 28 29 30 def validate_arguments(func: Optional['AnyCallableT'] = None, *, config: 'ConfigType' = None) -> Any: 31 """ 32 Decorator to validate the arguments passed to a function. 33 """ 34 35 def validate(_func: 'AnyCallable') -> 'AnyCallable': 36 vd = ValidatedFunction(_func, config) 37 38 @wraps(_func) 39 def wrapper_function(*args: Any, **kwargs: Any) -> Any: 40 return vd.call(*args, **kwargs) 41 42 wrapper_function.vd = vd # type: ignore 43 wrapper_function.validate = vd.init_model_instance # type: ignore 44 wrapper_function.raw_function = vd.raw_function # type: ignore 45 wrapper_function.model = vd.model # type: ignore 46 return wrapper_function 47 48 if func: 49 return validate(func) 50 else: 51 return validate 52 53 54 ALT_V_ARGS = 'v__args' 55 ALT_V_KWARGS = 'v__kwargs' 56 V_POSITIONAL_ONLY_NAME = 'v__positional_only' 57 V_DUPLICATE_KWARGS = 'v__duplicate_kwargs' 58 59 60 class ValidatedFunction: 61 def __init__(self, function: 'AnyCallableT', config: 'ConfigType'): # noqa C901 62 from inspect import Parameter, signature 63 64 parameters: Mapping[str, Parameter] = signature(function).parameters 65 66 if parameters.keys() & {ALT_V_ARGS, ALT_V_KWARGS, V_POSITIONAL_ONLY_NAME, V_DUPLICATE_KWARGS}: 67 raise ConfigError( 68 f'"{ALT_V_ARGS}", "{ALT_V_KWARGS}", "{V_POSITIONAL_ONLY_NAME}" and "{V_DUPLICATE_KWARGS}" ' 69 f'are not permitted as argument names when using the "{validate_arguments.__name__}" decorator' 70 ) 71 72 self.raw_function = function 73 self.arg_mapping: Dict[int, str] = {} 74 self.positional_only_args = set() 75 self.v_args_name = 'args' 76 self.v_kwargs_name = 'kwargs' 77 78 type_hints = get_all_type_hints(function) 79 takes_args = False 80 takes_kwargs = False 81 fields: Dict[str, Tuple[Any, Any]] = {} 82 for i, (name, p) in enumerate(parameters.items()): 83 if p.annotation is p.empty: 84 annotation = Any 85 else: 86 annotation = type_hints[name] 87 88 default = ... if p.default is p.empty else p.default 89 if p.kind == Parameter.POSITIONAL_ONLY: 90 self.arg_mapping[i] = name 91 fields[name] = annotation, default 92 fields[V_POSITIONAL_ONLY_NAME] = List[str], None 93 self.positional_only_args.add(name) 94 elif p.kind == Parameter.POSITIONAL_OR_KEYWORD: 95 self.arg_mapping[i] = name 96 fields[name] = annotation, default 97 fields[V_DUPLICATE_KWARGS] = List[str], None 98 elif p.kind == Parameter.KEYWORD_ONLY: 99 fields[name] = annotation, default 100 elif p.kind == Parameter.VAR_POSITIONAL: 101 self.v_args_name = name 102 fields[name] = Tuple[annotation, ...], None 103 takes_args = True 104 else: 105 assert p.kind == Parameter.VAR_KEYWORD, p.kind 106 self.v_kwargs_name = name 107 fields[name] = Dict[str, annotation], None # type: ignore 108 takes_kwargs = True 109 110 # these checks avoid a clash between "args" and a field with that name 111 if not takes_args and self.v_args_name in fields: 112 self.v_args_name = ALT_V_ARGS 113 114 # same with "kwargs" 115 if not takes_kwargs and self.v_kwargs_name in fields: 116 self.v_kwargs_name = ALT_V_KWARGS 117 118 if not takes_args: 119 # we add the field so validation below can raise the correct exception 120 fields[self.v_args_name] = List[Any], None 121 122 if not takes_kwargs: 123 # same with kwargs 124 fields[self.v_kwargs_name] = Dict[Any, Any], None 125 126 self.create_model(fields, takes_args, takes_kwargs, config) 127 128 def init_model_instance(self, *args: Any, **kwargs: Any) -> BaseModel: 129 values = self.build_values(args, kwargs) 130 return self.model(**values) 131 132 def call(self, *args: Any, **kwargs: Any) -> Any: 133 m = self.init_model_instance(*args, **kwargs) 134 return self.execute(m) 135 136 def build_values(self, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Dict[str, Any]: 137 values: Dict[str, Any] = {} 138 if args: 139 arg_iter = enumerate(args) 140 while True: 141 try: 142 i, a = next(arg_iter) 143 except StopIteration: 144 break 145 arg_name = self.arg_mapping.get(i) 146 if arg_name is not None: 147 values[arg_name] = a 148 else: 149 values[self.v_args_name] = [a] + [a for _, a in arg_iter] 150 break 151 152 var_kwargs = {} 153 wrong_positional_args = [] 154 duplicate_kwargs = [] 155 non_var_fields = set(self.model.__fields__) - {self.v_args_name, self.v_kwargs_name} 156 for k, v in kwargs.items(): 157 if k in non_var_fields: 158 if k in self.positional_only_args: 159 wrong_positional_args.append(k) 160 if k in values: 161 duplicate_kwargs.append(k) 162 values[k] = v 163 else: 164 var_kwargs[k] = v 165 166 if var_kwargs: 167 values[self.v_kwargs_name] = var_kwargs 168 if wrong_positional_args: 169 values[V_POSITIONAL_ONLY_NAME] = wrong_positional_args 170 if duplicate_kwargs: 171 values[V_DUPLICATE_KWARGS] = duplicate_kwargs 172 return values 173 174 def execute(self, m: BaseModel) -> Any: 175 d = {k: v for k, v in m._iter() if k in m.__fields_set__ or m.__fields__[k].default_factory} 176 var_kwargs = d.pop(self.v_kwargs_name, {}) 177 178 if self.v_args_name in d: 179 args_: List[Any] = [] 180 in_kwargs = False 181 kwargs = {} 182 for name, value in d.items(): 183 if in_kwargs: 184 kwargs[name] = value 185 elif name == self.v_args_name: 186 args_ += value 187 in_kwargs = True 188 else: 189 args_.append(value) 190 return self.raw_function(*args_, **kwargs, **var_kwargs) 191 elif self.positional_only_args: 192 args_ = [] 193 kwargs = {} 194 for name, value in d.items(): 195 if name in self.positional_only_args: 196 args_.append(value) 197 else: 198 kwargs[name] = value 199 return self.raw_function(*args_, **kwargs, **var_kwargs) 200 else: 201 return self.raw_function(**d, **var_kwargs) 202 203 def create_model(self, fields: Dict[str, Any], takes_args: bool, takes_kwargs: bool, config: 'ConfigType') -> None: 204 pos_args = len(self.arg_mapping) 205 206 class CustomConfig: 207 pass 208 209 if not TYPE_CHECKING: # pragma: no branch 210 if isinstance(config, dict): 211 CustomConfig = type('Config', (), config) # noqa: F811 212 elif config is not None: 213 CustomConfig = config # noqa: F811 214 215 if hasattr(CustomConfig, 'fields') or hasattr(CustomConfig, 'alias_generator'): 216 raise ConfigError( 217 'Setting the "fields" and "alias_generator" property on custom Config for ' 218 '@validate_arguments is not yet supported, please remove.' 219 ) 220 221 class DecoratorBaseModel(BaseModel): 222 @validator(self.v_args_name, check_fields=False, allow_reuse=True) 223 def check_args(cls, v: Optional[List[Any]]) -> Optional[List[Any]]: 224 if takes_args or v is None: 225 return v 226 227 raise TypeError(f'{pos_args} positional arguments expected but {pos_args + len(v)} given') 228 229 @validator(self.v_kwargs_name, check_fields=False, allow_reuse=True) 230 def check_kwargs(cls, v: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]: 231 if takes_kwargs or v is None: 232 return v 233 234 plural = '' if len(v) == 1 else 's' 235 keys = ', '.join(map(repr, v.keys())) 236 raise TypeError(f'unexpected keyword argument{plural}: {keys}') 237 238 @validator(V_POSITIONAL_ONLY_NAME, check_fields=False, allow_reuse=True) 239 def check_positional_only(cls, v: Optional[List[str]]) -> None: 240 if v is None: 241 return 242 243 plural = '' if len(v) == 1 else 's' 244 keys = ', '.join(map(repr, v)) 245 raise TypeError(f'positional-only argument{plural} passed as keyword argument{plural}: {keys}') 246 247 @validator(V_DUPLICATE_KWARGS, check_fields=False, allow_reuse=True) 248 def check_duplicate_kwargs(cls, v: Optional[List[str]]) -> None: 249 if v is None: 250 return 251 252 plural = '' if len(v) == 1 else 's' 253 keys = ', '.join(map(repr, v)) 254 raise TypeError(f'multiple values for argument{plural}: {keys}') 255 256 class Config(CustomConfig): 257 extra = Extra.forbid 258 259 self.model = create_model(to_camel(self.raw_function.__name__), __base__=DecoratorBaseModel, **fields) 260 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pydantic/decorator.py b/pydantic/decorator.py --- a/pydantic/decorator.py +++ b/pydantic/decorator.py @@ -254,6 +254,6 @@ raise TypeError(f'multiple values for argument{plural}: {keys}') class Config(CustomConfig): - extra = Extra.forbid + extra = getattr(CustomConfig, 'extra', Extra.forbid) self.model = create_model(to_camel(self.raw_function.__name__), __base__=DecoratorBaseModel, **fields)
{"golden_diff": "diff --git a/pydantic/decorator.py b/pydantic/decorator.py\n--- a/pydantic/decorator.py\n+++ b/pydantic/decorator.py\n@@ -254,6 +254,6 @@\n raise TypeError(f'multiple values for argument{plural}: {keys}')\n \n class Config(CustomConfig):\n- extra = Extra.forbid\n+ extra = getattr(CustomConfig, 'extra', Extra.forbid)\n \n self.model = create_model(to_camel(self.raw_function.__name__), __base__=DecoratorBaseModel, **fields)\n", "issue": "\"extra\" config not respected for TypedDicts in functions\n### Checks\r\n\r\n* [x] I added a descriptive title to this issue\r\n* [x] I have searched (google, github) for similar issues and couldn't find anything\r\n* [x] I have read and followed [the docs](https://pydantic-docs.helpmanual.io/) and still think this is a bug\r\n\r\n<!-- Sorry to sound so draconian, but every second saved replying to issues is time spend improving pydantic :-) -->\r\n\r\n# Bug\r\n\r\nOutput of `python -c \"import pydantic.utils; print(pydantic.utils.version_info())\"`:\r\n```\r\n pydantic version: 1.8.2\r\n pydantic compiled: False\r\n install path: /home/msharma216/.local/lib/python3.8/site-packages/pydantic\r\n python version: 3.8.0 (default, Feb 25 2021, 22:10:10) [GCC 8.4.0]\r\n platform: Linux-4.4.0-19041-Microsoft-x86_64-with-glibc2.27\r\n optional deps. installed: ['typing-extensions']\r\n\r\n```\r\n<!-- or if you're using pydantic prior to v1.3, manually include: OS, python version and pydantic version -->\r\n\r\n<!-- Please read the [docs](https://pydantic-docs.helpmanual.io/) and search through issues to\r\nconfirm your bug hasn't already been reported. -->\r\n\r\n<!-- Where possible please include a self-contained code snippet describing your bug: -->\r\n\r\nWhen trying to use the `validate_arguments` decorator, I face the issue of getting validation errors for an extra attribute for a TypedDict as below:\r\n\r\n```py\r\nfrom typing_extensions import TypedDict\r\n\r\nfrom pydantic import validate_arguments, Extra\r\n\r\n\r\nclass TypedTest(TypedDict):\r\n y: str\r\n\r\n\r\n@validate_arguments(config={'extra': Extra.allow})\r\ndef test(other: TypedTest):\r\n pass\r\n\r\n\r\ntest(other={'y': 'b', 'z': 'a'})\r\n\r\n```\r\n\r\nOutput:\r\n```\r\npydantic.error_wrappers.ValidationError: 1 validation error for Test\r\nother -> z\r\n extra fields not permitted (type=value_error.extra)\r\n```\r\n\r\nExpected: No errors\r\n\r\nAnything I have missed that would let this validation pass for extra attributes in the TypedDict?\r\n\r\nThanks very much!\n", "before_files": [{"content": "from functools import wraps\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, List, Mapping, Optional, Tuple, Type, TypeVar, Union, overload\n\nfrom . import validator\nfrom .config import Extra\nfrom .errors import ConfigError\nfrom .main import BaseModel, create_model\nfrom .typing import get_all_type_hints\nfrom .utils import to_camel\n\n__all__ = ('validate_arguments',)\n\nif TYPE_CHECKING:\n from .typing import AnyCallable\n\n AnyCallableT = TypeVar('AnyCallableT', bound=AnyCallable)\n ConfigType = Union[None, Type[Any], Dict[str, Any]]\n\n\n@overload\ndef validate_arguments(func: None = None, *, config: 'ConfigType' = None) -> Callable[['AnyCallableT'], 'AnyCallableT']:\n ...\n\n\n@overload\ndef validate_arguments(func: 'AnyCallableT') -> 'AnyCallableT':\n ...\n\n\ndef validate_arguments(func: Optional['AnyCallableT'] = None, *, config: 'ConfigType' = None) -> Any:\n \"\"\"\n Decorator to validate the arguments passed to a function.\n \"\"\"\n\n def validate(_func: 'AnyCallable') -> 'AnyCallable':\n vd = ValidatedFunction(_func, config)\n\n @wraps(_func)\n def wrapper_function(*args: Any, **kwargs: Any) -> Any:\n return vd.call(*args, **kwargs)\n\n wrapper_function.vd = vd # type: ignore\n wrapper_function.validate = vd.init_model_instance # type: ignore\n wrapper_function.raw_function = vd.raw_function # type: ignore\n wrapper_function.model = vd.model # type: ignore\n return wrapper_function\n\n if func:\n return validate(func)\n else:\n return validate\n\n\nALT_V_ARGS = 'v__args'\nALT_V_KWARGS = 'v__kwargs'\nV_POSITIONAL_ONLY_NAME = 'v__positional_only'\nV_DUPLICATE_KWARGS = 'v__duplicate_kwargs'\n\n\nclass ValidatedFunction:\n def __init__(self, function: 'AnyCallableT', config: 'ConfigType'): # noqa C901\n from inspect import Parameter, signature\n\n parameters: Mapping[str, Parameter] = signature(function).parameters\n\n if parameters.keys() & {ALT_V_ARGS, ALT_V_KWARGS, V_POSITIONAL_ONLY_NAME, V_DUPLICATE_KWARGS}:\n raise ConfigError(\n f'\"{ALT_V_ARGS}\", \"{ALT_V_KWARGS}\", \"{V_POSITIONAL_ONLY_NAME}\" and \"{V_DUPLICATE_KWARGS}\" '\n f'are not permitted as argument names when using the \"{validate_arguments.__name__}\" decorator'\n )\n\n self.raw_function = function\n self.arg_mapping: Dict[int, str] = {}\n self.positional_only_args = set()\n self.v_args_name = 'args'\n self.v_kwargs_name = 'kwargs'\n\n type_hints = get_all_type_hints(function)\n takes_args = False\n takes_kwargs = False\n fields: Dict[str, Tuple[Any, Any]] = {}\n for i, (name, p) in enumerate(parameters.items()):\n if p.annotation is p.empty:\n annotation = Any\n else:\n annotation = type_hints[name]\n\n default = ... if p.default is p.empty else p.default\n if p.kind == Parameter.POSITIONAL_ONLY:\n self.arg_mapping[i] = name\n fields[name] = annotation, default\n fields[V_POSITIONAL_ONLY_NAME] = List[str], None\n self.positional_only_args.add(name)\n elif p.kind == Parameter.POSITIONAL_OR_KEYWORD:\n self.arg_mapping[i] = name\n fields[name] = annotation, default\n fields[V_DUPLICATE_KWARGS] = List[str], None\n elif p.kind == Parameter.KEYWORD_ONLY:\n fields[name] = annotation, default\n elif p.kind == Parameter.VAR_POSITIONAL:\n self.v_args_name = name\n fields[name] = Tuple[annotation, ...], None\n takes_args = True\n else:\n assert p.kind == Parameter.VAR_KEYWORD, p.kind\n self.v_kwargs_name = name\n fields[name] = Dict[str, annotation], None # type: ignore\n takes_kwargs = True\n\n # these checks avoid a clash between \"args\" and a field with that name\n if not takes_args and self.v_args_name in fields:\n self.v_args_name = ALT_V_ARGS\n\n # same with \"kwargs\"\n if not takes_kwargs and self.v_kwargs_name in fields:\n self.v_kwargs_name = ALT_V_KWARGS\n\n if not takes_args:\n # we add the field so validation below can raise the correct exception\n fields[self.v_args_name] = List[Any], None\n\n if not takes_kwargs:\n # same with kwargs\n fields[self.v_kwargs_name] = Dict[Any, Any], None\n\n self.create_model(fields, takes_args, takes_kwargs, config)\n\n def init_model_instance(self, *args: Any, **kwargs: Any) -> BaseModel:\n values = self.build_values(args, kwargs)\n return self.model(**values)\n\n def call(self, *args: Any, **kwargs: Any) -> Any:\n m = self.init_model_instance(*args, **kwargs)\n return self.execute(m)\n\n def build_values(self, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Dict[str, Any]:\n values: Dict[str, Any] = {}\n if args:\n arg_iter = enumerate(args)\n while True:\n try:\n i, a = next(arg_iter)\n except StopIteration:\n break\n arg_name = self.arg_mapping.get(i)\n if arg_name is not None:\n values[arg_name] = a\n else:\n values[self.v_args_name] = [a] + [a for _, a in arg_iter]\n break\n\n var_kwargs = {}\n wrong_positional_args = []\n duplicate_kwargs = []\n non_var_fields = set(self.model.__fields__) - {self.v_args_name, self.v_kwargs_name}\n for k, v in kwargs.items():\n if k in non_var_fields:\n if k in self.positional_only_args:\n wrong_positional_args.append(k)\n if k in values:\n duplicate_kwargs.append(k)\n values[k] = v\n else:\n var_kwargs[k] = v\n\n if var_kwargs:\n values[self.v_kwargs_name] = var_kwargs\n if wrong_positional_args:\n values[V_POSITIONAL_ONLY_NAME] = wrong_positional_args\n if duplicate_kwargs:\n values[V_DUPLICATE_KWARGS] = duplicate_kwargs\n return values\n\n def execute(self, m: BaseModel) -> Any:\n d = {k: v for k, v in m._iter() if k in m.__fields_set__ or m.__fields__[k].default_factory}\n var_kwargs = d.pop(self.v_kwargs_name, {})\n\n if self.v_args_name in d:\n args_: List[Any] = []\n in_kwargs = False\n kwargs = {}\n for name, value in d.items():\n if in_kwargs:\n kwargs[name] = value\n elif name == self.v_args_name:\n args_ += value\n in_kwargs = True\n else:\n args_.append(value)\n return self.raw_function(*args_, **kwargs, **var_kwargs)\n elif self.positional_only_args:\n args_ = []\n kwargs = {}\n for name, value in d.items():\n if name in self.positional_only_args:\n args_.append(value)\n else:\n kwargs[name] = value\n return self.raw_function(*args_, **kwargs, **var_kwargs)\n else:\n return self.raw_function(**d, **var_kwargs)\n\n def create_model(self, fields: Dict[str, Any], takes_args: bool, takes_kwargs: bool, config: 'ConfigType') -> None:\n pos_args = len(self.arg_mapping)\n\n class CustomConfig:\n pass\n\n if not TYPE_CHECKING: # pragma: no branch\n if isinstance(config, dict):\n CustomConfig = type('Config', (), config) # noqa: F811\n elif config is not None:\n CustomConfig = config # noqa: F811\n\n if hasattr(CustomConfig, 'fields') or hasattr(CustomConfig, 'alias_generator'):\n raise ConfigError(\n 'Setting the \"fields\" and \"alias_generator\" property on custom Config for '\n '@validate_arguments is not yet supported, please remove.'\n )\n\n class DecoratorBaseModel(BaseModel):\n @validator(self.v_args_name, check_fields=False, allow_reuse=True)\n def check_args(cls, v: Optional[List[Any]]) -> Optional[List[Any]]:\n if takes_args or v is None:\n return v\n\n raise TypeError(f'{pos_args} positional arguments expected but {pos_args + len(v)} given')\n\n @validator(self.v_kwargs_name, check_fields=False, allow_reuse=True)\n def check_kwargs(cls, v: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:\n if takes_kwargs or v is None:\n return v\n\n plural = '' if len(v) == 1 else 's'\n keys = ', '.join(map(repr, v.keys()))\n raise TypeError(f'unexpected keyword argument{plural}: {keys}')\n\n @validator(V_POSITIONAL_ONLY_NAME, check_fields=False, allow_reuse=True)\n def check_positional_only(cls, v: Optional[List[str]]) -> None:\n if v is None:\n return\n\n plural = '' if len(v) == 1 else 's'\n keys = ', '.join(map(repr, v))\n raise TypeError(f'positional-only argument{plural} passed as keyword argument{plural}: {keys}')\n\n @validator(V_DUPLICATE_KWARGS, check_fields=False, allow_reuse=True)\n def check_duplicate_kwargs(cls, v: Optional[List[str]]) -> None:\n if v is None:\n return\n\n plural = '' if len(v) == 1 else 's'\n keys = ', '.join(map(repr, v))\n raise TypeError(f'multiple values for argument{plural}: {keys}')\n\n class Config(CustomConfig):\n extra = Extra.forbid\n\n self.model = create_model(to_camel(self.raw_function.__name__), __base__=DecoratorBaseModel, **fields)\n", "path": "pydantic/decorator.py"}], "after_files": [{"content": "from functools import wraps\nfrom typing import TYPE_CHECKING, Any, Callable, Dict, List, Mapping, Optional, Tuple, Type, TypeVar, Union, overload\n\nfrom . import validator\nfrom .config import Extra\nfrom .errors import ConfigError\nfrom .main import BaseModel, create_model\nfrom .typing import get_all_type_hints\nfrom .utils import to_camel\n\n__all__ = ('validate_arguments',)\n\nif TYPE_CHECKING:\n from .typing import AnyCallable\n\n AnyCallableT = TypeVar('AnyCallableT', bound=AnyCallable)\n ConfigType = Union[None, Type[Any], Dict[str, Any]]\n\n\n@overload\ndef validate_arguments(func: None = None, *, config: 'ConfigType' = None) -> Callable[['AnyCallableT'], 'AnyCallableT']:\n ...\n\n\n@overload\ndef validate_arguments(func: 'AnyCallableT') -> 'AnyCallableT':\n ...\n\n\ndef validate_arguments(func: Optional['AnyCallableT'] = None, *, config: 'ConfigType' = None) -> Any:\n \"\"\"\n Decorator to validate the arguments passed to a function.\n \"\"\"\n\n def validate(_func: 'AnyCallable') -> 'AnyCallable':\n vd = ValidatedFunction(_func, config)\n\n @wraps(_func)\n def wrapper_function(*args: Any, **kwargs: Any) -> Any:\n return vd.call(*args, **kwargs)\n\n wrapper_function.vd = vd # type: ignore\n wrapper_function.validate = vd.init_model_instance # type: ignore\n wrapper_function.raw_function = vd.raw_function # type: ignore\n wrapper_function.model = vd.model # type: ignore\n return wrapper_function\n\n if func:\n return validate(func)\n else:\n return validate\n\n\nALT_V_ARGS = 'v__args'\nALT_V_KWARGS = 'v__kwargs'\nV_POSITIONAL_ONLY_NAME = 'v__positional_only'\nV_DUPLICATE_KWARGS = 'v__duplicate_kwargs'\n\n\nclass ValidatedFunction:\n def __init__(self, function: 'AnyCallableT', config: 'ConfigType'): # noqa C901\n from inspect import Parameter, signature\n\n parameters: Mapping[str, Parameter] = signature(function).parameters\n\n if parameters.keys() & {ALT_V_ARGS, ALT_V_KWARGS, V_POSITIONAL_ONLY_NAME, V_DUPLICATE_KWARGS}:\n raise ConfigError(\n f'\"{ALT_V_ARGS}\", \"{ALT_V_KWARGS}\", \"{V_POSITIONAL_ONLY_NAME}\" and \"{V_DUPLICATE_KWARGS}\" '\n f'are not permitted as argument names when using the \"{validate_arguments.__name__}\" decorator'\n )\n\n self.raw_function = function\n self.arg_mapping: Dict[int, str] = {}\n self.positional_only_args = set()\n self.v_args_name = 'args'\n self.v_kwargs_name = 'kwargs'\n\n type_hints = get_all_type_hints(function)\n takes_args = False\n takes_kwargs = False\n fields: Dict[str, Tuple[Any, Any]] = {}\n for i, (name, p) in enumerate(parameters.items()):\n if p.annotation is p.empty:\n annotation = Any\n else:\n annotation = type_hints[name]\n\n default = ... if p.default is p.empty else p.default\n if p.kind == Parameter.POSITIONAL_ONLY:\n self.arg_mapping[i] = name\n fields[name] = annotation, default\n fields[V_POSITIONAL_ONLY_NAME] = List[str], None\n self.positional_only_args.add(name)\n elif p.kind == Parameter.POSITIONAL_OR_KEYWORD:\n self.arg_mapping[i] = name\n fields[name] = annotation, default\n fields[V_DUPLICATE_KWARGS] = List[str], None\n elif p.kind == Parameter.KEYWORD_ONLY:\n fields[name] = annotation, default\n elif p.kind == Parameter.VAR_POSITIONAL:\n self.v_args_name = name\n fields[name] = Tuple[annotation, ...], None\n takes_args = True\n else:\n assert p.kind == Parameter.VAR_KEYWORD, p.kind\n self.v_kwargs_name = name\n fields[name] = Dict[str, annotation], None # type: ignore\n takes_kwargs = True\n\n # these checks avoid a clash between \"args\" and a field with that name\n if not takes_args and self.v_args_name in fields:\n self.v_args_name = ALT_V_ARGS\n\n # same with \"kwargs\"\n if not takes_kwargs and self.v_kwargs_name in fields:\n self.v_kwargs_name = ALT_V_KWARGS\n\n if not takes_args:\n # we add the field so validation below can raise the correct exception\n fields[self.v_args_name] = List[Any], None\n\n if not takes_kwargs:\n # same with kwargs\n fields[self.v_kwargs_name] = Dict[Any, Any], None\n\n self.create_model(fields, takes_args, takes_kwargs, config)\n\n def init_model_instance(self, *args: Any, **kwargs: Any) -> BaseModel:\n values = self.build_values(args, kwargs)\n return self.model(**values)\n\n def call(self, *args: Any, **kwargs: Any) -> Any:\n m = self.init_model_instance(*args, **kwargs)\n return self.execute(m)\n\n def build_values(self, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Dict[str, Any]:\n values: Dict[str, Any] = {}\n if args:\n arg_iter = enumerate(args)\n while True:\n try:\n i, a = next(arg_iter)\n except StopIteration:\n break\n arg_name = self.arg_mapping.get(i)\n if arg_name is not None:\n values[arg_name] = a\n else:\n values[self.v_args_name] = [a] + [a for _, a in arg_iter]\n break\n\n var_kwargs = {}\n wrong_positional_args = []\n duplicate_kwargs = []\n non_var_fields = set(self.model.__fields__) - {self.v_args_name, self.v_kwargs_name}\n for k, v in kwargs.items():\n if k in non_var_fields:\n if k in self.positional_only_args:\n wrong_positional_args.append(k)\n if k in values:\n duplicate_kwargs.append(k)\n values[k] = v\n else:\n var_kwargs[k] = v\n\n if var_kwargs:\n values[self.v_kwargs_name] = var_kwargs\n if wrong_positional_args:\n values[V_POSITIONAL_ONLY_NAME] = wrong_positional_args\n if duplicate_kwargs:\n values[V_DUPLICATE_KWARGS] = duplicate_kwargs\n return values\n\n def execute(self, m: BaseModel) -> Any:\n d = {k: v for k, v in m._iter() if k in m.__fields_set__ or m.__fields__[k].default_factory}\n var_kwargs = d.pop(self.v_kwargs_name, {})\n\n if self.v_args_name in d:\n args_: List[Any] = []\n in_kwargs = False\n kwargs = {}\n for name, value in d.items():\n if in_kwargs:\n kwargs[name] = value\n elif name == self.v_args_name:\n args_ += value\n in_kwargs = True\n else:\n args_.append(value)\n return self.raw_function(*args_, **kwargs, **var_kwargs)\n elif self.positional_only_args:\n args_ = []\n kwargs = {}\n for name, value in d.items():\n if name in self.positional_only_args:\n args_.append(value)\n else:\n kwargs[name] = value\n return self.raw_function(*args_, **kwargs, **var_kwargs)\n else:\n return self.raw_function(**d, **var_kwargs)\n\n def create_model(self, fields: Dict[str, Any], takes_args: bool, takes_kwargs: bool, config: 'ConfigType') -> None:\n pos_args = len(self.arg_mapping)\n\n class CustomConfig:\n pass\n\n if not TYPE_CHECKING: # pragma: no branch\n if isinstance(config, dict):\n CustomConfig = type('Config', (), config) # noqa: F811\n elif config is not None:\n CustomConfig = config # noqa: F811\n\n if hasattr(CustomConfig, 'fields') or hasattr(CustomConfig, 'alias_generator'):\n raise ConfigError(\n 'Setting the \"fields\" and \"alias_generator\" property on custom Config for '\n '@validate_arguments is not yet supported, please remove.'\n )\n\n class DecoratorBaseModel(BaseModel):\n @validator(self.v_args_name, check_fields=False, allow_reuse=True)\n def check_args(cls, v: Optional[List[Any]]) -> Optional[List[Any]]:\n if takes_args or v is None:\n return v\n\n raise TypeError(f'{pos_args} positional arguments expected but {pos_args + len(v)} given')\n\n @validator(self.v_kwargs_name, check_fields=False, allow_reuse=True)\n def check_kwargs(cls, v: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]:\n if takes_kwargs or v is None:\n return v\n\n plural = '' if len(v) == 1 else 's'\n keys = ', '.join(map(repr, v.keys()))\n raise TypeError(f'unexpected keyword argument{plural}: {keys}')\n\n @validator(V_POSITIONAL_ONLY_NAME, check_fields=False, allow_reuse=True)\n def check_positional_only(cls, v: Optional[List[str]]) -> None:\n if v is None:\n return\n\n plural = '' if len(v) == 1 else 's'\n keys = ', '.join(map(repr, v))\n raise TypeError(f'positional-only argument{plural} passed as keyword argument{plural}: {keys}')\n\n @validator(V_DUPLICATE_KWARGS, check_fields=False, allow_reuse=True)\n def check_duplicate_kwargs(cls, v: Optional[List[str]]) -> None:\n if v is None:\n return\n\n plural = '' if len(v) == 1 else 's'\n keys = ', '.join(map(repr, v))\n raise TypeError(f'multiple values for argument{plural}: {keys}')\n\n class Config(CustomConfig):\n extra = getattr(CustomConfig, 'extra', Extra.forbid)\n\n self.model = create_model(to_camel(self.raw_function.__name__), __base__=DecoratorBaseModel, **fields)\n", "path": "pydantic/decorator.py"}]}
3,693
128
gh_patches_debug_30879
rasdani/github-patches
git_diff
localstack__localstack-2487
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- S3: POST/PUT to bucket URLs don't route correctly on port 4566 <!-- Love localstack? Please consider supporting our collective: 👉 https://opencollective.com/localstack/donate --> # Type of request: This is a ... [x] bug report [ ] feature request # Detailed description We noticed this while converting a service to use Localstack for tests. The service generates S3 presigned post URLs. We're able to create and use presigned S3 URLs on port 4572 (deprecated S3 port), but not 4566, the new shared one. The same issue happens with PUT requests, which is the simplest to repro. While just using 4572 works, this does force us to use the deprecated port and I figured it was worth opening an issue because of the discrepancy. ## Expected behavior POST http://localhost:4566/hello (with appropriate form params) should return a 204, in the same way that POST http://localhost:4572/hello does. PUT http://localhost:4566/hello should create a bucket and return a 200, in the same way that PUT http://localhost:4572/hello does. ## Actual behavior Both PUT and POST http://localhost:4566/hello return a 404. In the localstack logs: 2020-05-20T13:37:41:INFO:localstack.services.edge: Unable to find forwarding rule for host "localhost:4566", path "/hello", target header "", auth header "" # Steps to reproduce ```bash $ curl -i -XPUT http://localhost:4572/hello HTTP/1.1 200 OK Server: BaseHTTP/0.6 Python/3.8.2 Date: Wed, 20 May 2020 13:43:17 GMT Content-Type: application/xml; charset=utf-8 content-length: 159 Access-Control-Allow-Origin: * Last-Modified: Wed, 20 May 2020 13:43:17 GMT x-amz-request-id: 0ABD347D7A4E0697 x-amz-id-2: MzRISOwyjmnup0ABD347D7A4E06977/JypPGXLh0OVFGcJaaO3KW/hRAqKOpIEEp Access-Control-Allow-Methods: HEAD,GET,PUT,POST,DELETE,OPTIONS,PATCH Access-Control-Allow-Headers: authorization,content-type,content-md5,cache-control,x-amz-content-sha256,x-amz-date,x-amz-security-token,x-amz-user-agent,x-amz-target,x-amz-acl,x-amz-version-id,x-localstack-target,x-amz-tagging Access-Control-Expose-Headers: x-amz-version-id <CreateBucketResponse xmlns="http://s3.amazonaws.com/doc/2006-03-01"><CreateBucketResponse><Bucket>hello</Bucket></CreateBucketResponse></CreateBucketResponse>% $ curl -i -XPUT http://localhost:4566/hello HTTP/1.1 404 Not Found Server: BaseHTTP/0.6 Python/3.8.2 Date: Wed, 20 May 2020 13:43:22 GMT Content-Length: 21 Access-Control-Allow-Origin: * Access-Control-Allow-Methods: HEAD,GET,PUT,POST,DELETE,OPTIONS,PATCH Access-Control-Allow-Headers: authorization,content-type,content-md5,cache-control,x-amz-content-sha256,x-amz-date,x-amz-security-token,x-amz-user-agent,x-amz-target,x-amz-acl,x-amz-version-id,x-localstack-target,x-amz-tagging Access-Control-Expose-Headers: x-amz-version-id {"status": "running"}% ``` ## Command used to start LocalStack `localstack start` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `localstack/services/edge.py` Content: ``` 1 import re 2 import os 3 import sys 4 import json 5 import logging 6 from requests.models import Response 7 from localstack import config 8 from localstack.constants import HEADER_LOCALSTACK_TARGET, HEADER_LOCALSTACK_EDGE_URL, LOCALSTACK_ROOT_FOLDER 9 from localstack.utils.common import run, is_root, TMP_THREADS 10 from localstack.utils.common import safe_requests as requests 11 from localstack.services.generic_proxy import ProxyListener, GenericProxy 12 13 LOG = logging.getLogger(__name__) 14 15 # Header to indicate that the process should kill itself. This is required because if 16 # this process is started as root, then we cannot kill it from a non-root process 17 HEADER_KILL_SIGNAL = 'x-localstack-kill' 18 19 20 class ProxyListenerEdge(ProxyListener): 21 22 def forward_request(self, method, path, data, headers): 23 if method == 'OPTIONS': 24 return 200 25 26 # kill the process if we receive this header 27 headers.get(HEADER_KILL_SIGNAL) and os._exit(0) 28 29 target = headers.get('x-amz-target', '') 30 auth_header = headers.get('authorization', '') 31 host = headers.get('host', '') 32 headers[HEADER_LOCALSTACK_EDGE_URL] = 'https://%s' % host 33 34 # extract API details 35 api, port, path, host = get_api_from_headers(headers, path) 36 37 if port and int(port) < 0: 38 return 404 39 40 if not port: 41 # detect S3 presigned URLs 42 if 'AWSAccessKeyId=' in path or 'Signature=' in path: 43 port = config.PORT_S3 44 # assume that this is an S3 GET request with URL path `/<bucket>/<key ...>` 45 # TODO: move S3 public URLs to a separate port/endpoint, OR check ACLs here first 46 if method == 'GET' and '/' in path.strip('/'): 47 port = config.PORT_S3 48 49 if not port: 50 if api in ['', None, '_unknown_']: 51 LOG.info(('Unable to find forwarding rule for host "%s", path "%s", ' 52 'target header "%s", auth header "%s"') % (host, path, target, auth_header)) 53 else: 54 LOG.info(('Unable to determine forwarding port for API "%s" - please ' 55 'make sure this API is enabled via the SERVICES configuration') % api) 56 response = Response() 57 response.status_code = 404 58 response._content = '{"status": "running"}' 59 return response 60 61 use_ssl = config.USE_SSL 62 63 connect_host = '%s:%s' % (config.HOSTNAME, port) 64 url = 'http%s://%s%s' % ('s' if use_ssl else '', connect_host, path) 65 headers['Host'] = host 66 function = getattr(requests, method.lower()) 67 if isinstance(data, dict): 68 data = json.dumps(data) 69 70 response = function(url, data=data, headers=headers, verify=False) 71 return response 72 73 74 def get_api_from_headers(headers, path=None): 75 target = headers.get('x-amz-target', '') 76 host = headers.get('host', '') 77 auth_header = headers.get('authorization', '') 78 ls_target = headers.get(HEADER_LOCALSTACK_TARGET, '') 79 path = path or '/' 80 81 # initialize result 82 result = '_unknown_', 0 83 84 # https://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html 85 try: 86 credential_scope = auth_header.split(',')[0].split()[1] 87 _, _, _, service, _ = credential_scope.split('/') 88 result = service, get_service_port_for_account(service, headers) 89 except Exception: 90 pass 91 92 result_before = result 93 94 # Fallback rules and route customizations applied below 95 96 if host.endswith('cloudfront.net'): 97 path = path or '/' 98 result = 'cloudfront', config.PORT_CLOUDFRONT 99 elif target.startswith('AWSCognitoIdentityProviderService') or 'cognito-idp.' in host: 100 result = 'cognito-idp', config.PORT_COGNITO_IDP 101 elif target.startswith('AWSCognitoIdentityService') or 'cognito-identity.' in host: 102 result = 'cognito-identity', config.PORT_COGNITO_IDENTITY 103 elif result[0] == 's3' or re.match(r'.*s3(\-website)?\.([^\.]+\.)?amazonaws.com', host): 104 host = re.sub(r's3-website\..*\.amazonaws', 's3.amazonaws', host) 105 result = 's3', config.PORT_S3 106 elif result[0] == 'states' in auth_header or host.startswith('states.'): 107 result = 'stepfunctions', config.PORT_STEPFUNCTIONS 108 elif '.execute-api.' in host: 109 result = 'apigateway', config.PORT_APIGATEWAY 110 elif target.startswith('DynamoDBStreams') or host.startswith('streams.dynamodb.'): 111 result = 'dynamodbstreams', config.PORT_DYNAMODBSTREAMS 112 elif ls_target == 'web' or path == '/graph': 113 result = 'web', config.PORT_WEB_UI 114 115 return result[0], result_before[1] or result[1], path, host 116 117 118 def get_service_port_for_account(service, headers): 119 # assume we're only using a single account, hence return the static port mapping from config.py 120 return config.service_port(service) 121 122 123 def do_start_edge(port, use_ssl, asynchronous=False): 124 try: 125 # start local DNS server, if present 126 from localstack_ext.services import dns_server 127 dns_server.start_servers() 128 except Exception: 129 pass 130 131 # get port and start Edge 132 print('Starting edge router (http%s port %s)...' % ('s' if use_ssl else '', port)) 133 # use use=True here because our proxy allows both, HTTP and HTTPS traffic 134 proxy = GenericProxy(port, ssl=True, update_listener=ProxyListenerEdge()) 135 proxy.start() 136 if not asynchronous: 137 proxy.join() 138 return proxy 139 140 141 def can_use_sudo(): 142 try: 143 run('echo | sudo -S echo', print_error=False) 144 return True 145 except Exception: 146 return False 147 148 149 def ensure_can_use_sudo(): 150 if not is_root() and not can_use_sudo(): 151 print('Please enter your sudo password (required to configure local network):') 152 run('sudo echo', stdin=True) 153 154 155 def start_edge(port=None, use_ssl=True, asynchronous=False): 156 if not port: 157 port = config.EDGE_PORT 158 if config.EDGE_PORT_HTTP: 159 do_start_edge(config.EDGE_PORT_HTTP, use_ssl=False, asynchronous=True) 160 if port > 1024 or is_root(): 161 return do_start_edge(port, use_ssl, asynchronous=asynchronous) 162 163 # process requires priviledged port but we're not root -> try running as sudo 164 165 class Terminator(object): 166 167 def stop(self, quiet=True): 168 try: 169 url = 'http%s://localhost:%s' % ('s' if use_ssl else '', port) 170 requests.verify_ssl = False 171 requests.post(url, headers={HEADER_KILL_SIGNAL: 'kill'}) 172 except Exception: 173 pass 174 175 # make sure we can run sudo commands 176 ensure_can_use_sudo() 177 178 # register a signal handler to terminate the sudo process later on 179 TMP_THREADS.append(Terminator()) 180 181 # start the process as sudo 182 sudo_cmd = 'sudo ' 183 python_cmd = sys.executable 184 cmd = '%sPYTHONPATH=.:%s %s %s %s' % (sudo_cmd, LOCALSTACK_ROOT_FOLDER, python_cmd, __file__, port) 185 process = run(cmd, asynchronous=asynchronous) 186 return process 187 188 189 if __name__ == '__main__': 190 logging.basicConfig() 191 start_edge(int(sys.argv[1])) 192 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/localstack/services/edge.py b/localstack/services/edge.py --- a/localstack/services/edge.py +++ b/localstack/services/edge.py @@ -6,7 +6,7 @@ from requests.models import Response from localstack import config from localstack.constants import HEADER_LOCALSTACK_TARGET, HEADER_LOCALSTACK_EDGE_URL, LOCALSTACK_ROOT_FOLDER -from localstack.utils.common import run, is_root, TMP_THREADS +from localstack.utils.common import run, is_root, TMP_THREADS, to_bytes from localstack.utils.common import safe_requests as requests from localstack.services.generic_proxy import ProxyListener, GenericProxy @@ -41,10 +41,18 @@ # detect S3 presigned URLs if 'AWSAccessKeyId=' in path or 'Signature=' in path: port = config.PORT_S3 - # assume that this is an S3 GET request with URL path `/<bucket>/<key ...>` # TODO: move S3 public URLs to a separate port/endpoint, OR check ACLs here first - if method == 'GET' and '/' in path.strip('/'): + stripped = path.strip('/') + if method == 'GET' and '/' in stripped: + # assume that this is an S3 GET request with URL path `/<bucket>/<key ...>` port = config.PORT_S3 + if stripped and '/' not in stripped: + if method == 'PUT': + # assume that this is an S3 PUT bucket request with URL path `/<bucket>` + port = config.PORT_S3 + elif method == 'POST' and to_bytes('key=') in to_bytes(data or ''): + # assume that this is an S3 POST request with form parameters in the body + port = config.PORT_S3 if not port: if api in ['', None, '_unknown_']:
{"golden_diff": "diff --git a/localstack/services/edge.py b/localstack/services/edge.py\n--- a/localstack/services/edge.py\n+++ b/localstack/services/edge.py\n@@ -6,7 +6,7 @@\n from requests.models import Response\n from localstack import config\n from localstack.constants import HEADER_LOCALSTACK_TARGET, HEADER_LOCALSTACK_EDGE_URL, LOCALSTACK_ROOT_FOLDER\n-from localstack.utils.common import run, is_root, TMP_THREADS\n+from localstack.utils.common import run, is_root, TMP_THREADS, to_bytes\n from localstack.utils.common import safe_requests as requests\n from localstack.services.generic_proxy import ProxyListener, GenericProxy\n \n@@ -41,10 +41,18 @@\n # detect S3 presigned URLs\n if 'AWSAccessKeyId=' in path or 'Signature=' in path:\n port = config.PORT_S3\n- # assume that this is an S3 GET request with URL path `/<bucket>/<key ...>`\n # TODO: move S3 public URLs to a separate port/endpoint, OR check ACLs here first\n- if method == 'GET' and '/' in path.strip('/'):\n+ stripped = path.strip('/')\n+ if method == 'GET' and '/' in stripped:\n+ # assume that this is an S3 GET request with URL path `/<bucket>/<key ...>`\n port = config.PORT_S3\n+ if stripped and '/' not in stripped:\n+ if method == 'PUT':\n+ # assume that this is an S3 PUT bucket request with URL path `/<bucket>`\n+ port = config.PORT_S3\n+ elif method == 'POST' and to_bytes('key=') in to_bytes(data or ''):\n+ # assume that this is an S3 POST request with form parameters in the body\n+ port = config.PORT_S3\n \n if not port:\n if api in ['', None, '_unknown_']:\n", "issue": "S3: POST/PUT to bucket URLs don't route correctly on port 4566\n<!-- Love localstack? Please consider supporting our collective:\r\n\ud83d\udc49 https://opencollective.com/localstack/donate -->\r\n\r\n# Type of request: This is a ...\r\n\r\n[x] bug report\r\n[ ] feature request\r\n\r\n# Detailed description\r\n\r\nWe noticed this while converting a service to use Localstack for tests. The service generates S3 presigned post URLs. We're able to create and use presigned S3 URLs on port 4572 (deprecated S3 port), but not 4566, the new shared one. The same issue happens with PUT requests, which is the simplest to repro.\r\n\r\nWhile just using 4572 works, this does force us to use the deprecated port and I figured it was worth opening an issue because of the discrepancy.\r\n\r\n## Expected behavior\r\n\r\nPOST http://localhost:4566/hello (with appropriate form params) should return a 204, in the same way that POST http://localhost:4572/hello does.\r\n\r\nPUT http://localhost:4566/hello should create a bucket and return a 200, in the same way that PUT http://localhost:4572/hello does.\r\n\r\n## Actual behavior\r\n\r\nBoth PUT and POST http://localhost:4566/hello return a 404.\r\n\r\nIn the localstack logs:\r\n2020-05-20T13:37:41:INFO:localstack.services.edge: Unable to find forwarding rule for host \"localhost:4566\", path \"/hello\", target header \"\", auth header \"\"\r\n\r\n# Steps to reproduce\r\n\r\n```bash\r\n$ curl -i -XPUT http://localhost:4572/hello\r\nHTTP/1.1 200 OK\r\nServer: BaseHTTP/0.6 Python/3.8.2\r\nDate: Wed, 20 May 2020 13:43:17 GMT\r\nContent-Type: application/xml; charset=utf-8\r\ncontent-length: 159\r\nAccess-Control-Allow-Origin: *\r\nLast-Modified: Wed, 20 May 2020 13:43:17 GMT\r\nx-amz-request-id: 0ABD347D7A4E0697\r\nx-amz-id-2: MzRISOwyjmnup0ABD347D7A4E06977/JypPGXLh0OVFGcJaaO3KW/hRAqKOpIEEp\r\nAccess-Control-Allow-Methods: HEAD,GET,PUT,POST,DELETE,OPTIONS,PATCH\r\nAccess-Control-Allow-Headers: authorization,content-type,content-md5,cache-control,x-amz-content-sha256,x-amz-date,x-amz-security-token,x-amz-user-agent,x-amz-target,x-amz-acl,x-amz-version-id,x-localstack-target,x-amz-tagging\r\nAccess-Control-Expose-Headers: x-amz-version-id\r\n\r\n<CreateBucketResponse xmlns=\"http://s3.amazonaws.com/doc/2006-03-01\"><CreateBucketResponse><Bucket>hello</Bucket></CreateBucketResponse></CreateBucketResponse>% \r\n\r\n$ curl -i -XPUT http://localhost:4566/hello\r\nHTTP/1.1 404 Not Found\r\nServer: BaseHTTP/0.6 Python/3.8.2\r\nDate: Wed, 20 May 2020 13:43:22 GMT\r\nContent-Length: 21\r\nAccess-Control-Allow-Origin: *\r\nAccess-Control-Allow-Methods: HEAD,GET,PUT,POST,DELETE,OPTIONS,PATCH\r\nAccess-Control-Allow-Headers: authorization,content-type,content-md5,cache-control,x-amz-content-sha256,x-amz-date,x-amz-security-token,x-amz-user-agent,x-amz-target,x-amz-acl,x-amz-version-id,x-localstack-target,x-amz-tagging\r\nAccess-Control-Expose-Headers: x-amz-version-id\r\n\r\n{\"status\": \"running\"}%\r\n```\r\n\r\n## Command used to start LocalStack\r\n\r\n`localstack start`\n", "before_files": [{"content": "import re\nimport os\nimport sys\nimport json\nimport logging\nfrom requests.models import Response\nfrom localstack import config\nfrom localstack.constants import HEADER_LOCALSTACK_TARGET, HEADER_LOCALSTACK_EDGE_URL, LOCALSTACK_ROOT_FOLDER\nfrom localstack.utils.common import run, is_root, TMP_THREADS\nfrom localstack.utils.common import safe_requests as requests\nfrom localstack.services.generic_proxy import ProxyListener, GenericProxy\n\nLOG = logging.getLogger(__name__)\n\n# Header to indicate that the process should kill itself. This is required because if\n# this process is started as root, then we cannot kill it from a non-root process\nHEADER_KILL_SIGNAL = 'x-localstack-kill'\n\n\nclass ProxyListenerEdge(ProxyListener):\n\n def forward_request(self, method, path, data, headers):\n if method == 'OPTIONS':\n return 200\n\n # kill the process if we receive this header\n headers.get(HEADER_KILL_SIGNAL) and os._exit(0)\n\n target = headers.get('x-amz-target', '')\n auth_header = headers.get('authorization', '')\n host = headers.get('host', '')\n headers[HEADER_LOCALSTACK_EDGE_URL] = 'https://%s' % host\n\n # extract API details\n api, port, path, host = get_api_from_headers(headers, path)\n\n if port and int(port) < 0:\n return 404\n\n if not port:\n # detect S3 presigned URLs\n if 'AWSAccessKeyId=' in path or 'Signature=' in path:\n port = config.PORT_S3\n # assume that this is an S3 GET request with URL path `/<bucket>/<key ...>`\n # TODO: move S3 public URLs to a separate port/endpoint, OR check ACLs here first\n if method == 'GET' and '/' in path.strip('/'):\n port = config.PORT_S3\n\n if not port:\n if api in ['', None, '_unknown_']:\n LOG.info(('Unable to find forwarding rule for host \"%s\", path \"%s\", '\n 'target header \"%s\", auth header \"%s\"') % (host, path, target, auth_header))\n else:\n LOG.info(('Unable to determine forwarding port for API \"%s\" - please '\n 'make sure this API is enabled via the SERVICES configuration') % api)\n response = Response()\n response.status_code = 404\n response._content = '{\"status\": \"running\"}'\n return response\n\n use_ssl = config.USE_SSL\n\n connect_host = '%s:%s' % (config.HOSTNAME, port)\n url = 'http%s://%s%s' % ('s' if use_ssl else '', connect_host, path)\n headers['Host'] = host\n function = getattr(requests, method.lower())\n if isinstance(data, dict):\n data = json.dumps(data)\n\n response = function(url, data=data, headers=headers, verify=False)\n return response\n\n\ndef get_api_from_headers(headers, path=None):\n target = headers.get('x-amz-target', '')\n host = headers.get('host', '')\n auth_header = headers.get('authorization', '')\n ls_target = headers.get(HEADER_LOCALSTACK_TARGET, '')\n path = path or '/'\n\n # initialize result\n result = '_unknown_', 0\n\n # https://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html\n try:\n credential_scope = auth_header.split(',')[0].split()[1]\n _, _, _, service, _ = credential_scope.split('/')\n result = service, get_service_port_for_account(service, headers)\n except Exception:\n pass\n\n result_before = result\n\n # Fallback rules and route customizations applied below\n\n if host.endswith('cloudfront.net'):\n path = path or '/'\n result = 'cloudfront', config.PORT_CLOUDFRONT\n elif target.startswith('AWSCognitoIdentityProviderService') or 'cognito-idp.' in host:\n result = 'cognito-idp', config.PORT_COGNITO_IDP\n elif target.startswith('AWSCognitoIdentityService') or 'cognito-identity.' in host:\n result = 'cognito-identity', config.PORT_COGNITO_IDENTITY\n elif result[0] == 's3' or re.match(r'.*s3(\\-website)?\\.([^\\.]+\\.)?amazonaws.com', host):\n host = re.sub(r's3-website\\..*\\.amazonaws', 's3.amazonaws', host)\n result = 's3', config.PORT_S3\n elif result[0] == 'states' in auth_header or host.startswith('states.'):\n result = 'stepfunctions', config.PORT_STEPFUNCTIONS\n elif '.execute-api.' in host:\n result = 'apigateway', config.PORT_APIGATEWAY\n elif target.startswith('DynamoDBStreams') or host.startswith('streams.dynamodb.'):\n result = 'dynamodbstreams', config.PORT_DYNAMODBSTREAMS\n elif ls_target == 'web' or path == '/graph':\n result = 'web', config.PORT_WEB_UI\n\n return result[0], result_before[1] or result[1], path, host\n\n\ndef get_service_port_for_account(service, headers):\n # assume we're only using a single account, hence return the static port mapping from config.py\n return config.service_port(service)\n\n\ndef do_start_edge(port, use_ssl, asynchronous=False):\n try:\n # start local DNS server, if present\n from localstack_ext.services import dns_server\n dns_server.start_servers()\n except Exception:\n pass\n\n # get port and start Edge\n print('Starting edge router (http%s port %s)...' % ('s' if use_ssl else '', port))\n # use use=True here because our proxy allows both, HTTP and HTTPS traffic\n proxy = GenericProxy(port, ssl=True, update_listener=ProxyListenerEdge())\n proxy.start()\n if not asynchronous:\n proxy.join()\n return proxy\n\n\ndef can_use_sudo():\n try:\n run('echo | sudo -S echo', print_error=False)\n return True\n except Exception:\n return False\n\n\ndef ensure_can_use_sudo():\n if not is_root() and not can_use_sudo():\n print('Please enter your sudo password (required to configure local network):')\n run('sudo echo', stdin=True)\n\n\ndef start_edge(port=None, use_ssl=True, asynchronous=False):\n if not port:\n port = config.EDGE_PORT\n if config.EDGE_PORT_HTTP:\n do_start_edge(config.EDGE_PORT_HTTP, use_ssl=False, asynchronous=True)\n if port > 1024 or is_root():\n return do_start_edge(port, use_ssl, asynchronous=asynchronous)\n\n # process requires priviledged port but we're not root -> try running as sudo\n\n class Terminator(object):\n\n def stop(self, quiet=True):\n try:\n url = 'http%s://localhost:%s' % ('s' if use_ssl else '', port)\n requests.verify_ssl = False\n requests.post(url, headers={HEADER_KILL_SIGNAL: 'kill'})\n except Exception:\n pass\n\n # make sure we can run sudo commands\n ensure_can_use_sudo()\n\n # register a signal handler to terminate the sudo process later on\n TMP_THREADS.append(Terminator())\n\n # start the process as sudo\n sudo_cmd = 'sudo '\n python_cmd = sys.executable\n cmd = '%sPYTHONPATH=.:%s %s %s %s' % (sudo_cmd, LOCALSTACK_ROOT_FOLDER, python_cmd, __file__, port)\n process = run(cmd, asynchronous=asynchronous)\n return process\n\n\nif __name__ == '__main__':\n logging.basicConfig()\n start_edge(int(sys.argv[1]))\n", "path": "localstack/services/edge.py"}], "after_files": [{"content": "import re\nimport os\nimport sys\nimport json\nimport logging\nfrom requests.models import Response\nfrom localstack import config\nfrom localstack.constants import HEADER_LOCALSTACK_TARGET, HEADER_LOCALSTACK_EDGE_URL, LOCALSTACK_ROOT_FOLDER\nfrom localstack.utils.common import run, is_root, TMP_THREADS, to_bytes\nfrom localstack.utils.common import safe_requests as requests\nfrom localstack.services.generic_proxy import ProxyListener, GenericProxy\n\nLOG = logging.getLogger(__name__)\n\n# Header to indicate that the process should kill itself. This is required because if\n# this process is started as root, then we cannot kill it from a non-root process\nHEADER_KILL_SIGNAL = 'x-localstack-kill'\n\n\nclass ProxyListenerEdge(ProxyListener):\n\n def forward_request(self, method, path, data, headers):\n if method == 'OPTIONS':\n return 200\n\n # kill the process if we receive this header\n headers.get(HEADER_KILL_SIGNAL) and os._exit(0)\n\n target = headers.get('x-amz-target', '')\n auth_header = headers.get('authorization', '')\n host = headers.get('host', '')\n headers[HEADER_LOCALSTACK_EDGE_URL] = 'https://%s' % host\n\n # extract API details\n api, port, path, host = get_api_from_headers(headers, path)\n\n if port and int(port) < 0:\n return 404\n\n if not port:\n # detect S3 presigned URLs\n if 'AWSAccessKeyId=' in path or 'Signature=' in path:\n port = config.PORT_S3\n # TODO: move S3 public URLs to a separate port/endpoint, OR check ACLs here first\n stripped = path.strip('/')\n if method == 'GET' and '/' in stripped:\n # assume that this is an S3 GET request with URL path `/<bucket>/<key ...>`\n port = config.PORT_S3\n if stripped and '/' not in stripped:\n if method == 'PUT':\n # assume that this is an S3 PUT bucket request with URL path `/<bucket>`\n port = config.PORT_S3\n elif method == 'POST' and to_bytes('key=') in to_bytes(data or ''):\n # assume that this is an S3 POST request with form parameters in the body\n port = config.PORT_S3\n\n if not port:\n if api in ['', None, '_unknown_']:\n LOG.info(('Unable to find forwarding rule for host \"%s\", path \"%s\", '\n 'target header \"%s\", auth header \"%s\"') % (host, path, target, auth_header))\n else:\n LOG.info(('Unable to determine forwarding port for API \"%s\" - please '\n 'make sure this API is enabled via the SERVICES configuration') % api)\n response = Response()\n response.status_code = 404\n response._content = '{\"status\": \"running\"}'\n return response\n\n use_ssl = config.USE_SSL\n\n connect_host = '%s:%s' % (config.HOSTNAME, port)\n url = 'http%s://%s%s' % ('s' if use_ssl else '', connect_host, path)\n headers['Host'] = host\n function = getattr(requests, method.lower())\n if isinstance(data, dict):\n data = json.dumps(data)\n\n response = function(url, data=data, headers=headers, verify=False)\n return response\n\n\ndef get_api_from_headers(headers, path=None):\n target = headers.get('x-amz-target', '')\n host = headers.get('host', '')\n auth_header = headers.get('authorization', '')\n ls_target = headers.get(HEADER_LOCALSTACK_TARGET, '')\n path = path or '/'\n\n # initialize result\n result = '_unknown_', 0\n\n # https://docs.aws.amazon.com/general/latest/gr/sigv4-signed-request-examples.html\n try:\n credential_scope = auth_header.split(',')[0].split()[1]\n _, _, _, service, _ = credential_scope.split('/')\n result = service, get_service_port_for_account(service, headers)\n except Exception:\n pass\n\n result_before = result\n\n # Fallback rules and route customizations applied below\n\n if host.endswith('cloudfront.net'):\n path = path or '/'\n result = 'cloudfront', config.PORT_CLOUDFRONT\n elif target.startswith('AWSCognitoIdentityProviderService') or 'cognito-idp.' in host:\n result = 'cognito-idp', config.PORT_COGNITO_IDP\n elif target.startswith('AWSCognitoIdentityService') or 'cognito-identity.' in host:\n result = 'cognito-identity', config.PORT_COGNITO_IDENTITY\n elif result[0] == 's3' or re.match(r'.*s3(\\-website)?\\.([^\\.]+\\.)?amazonaws.com', host):\n host = re.sub(r's3-website\\..*\\.amazonaws', 's3.amazonaws', host)\n result = 's3', config.PORT_S3\n elif result[0] == 'states' in auth_header or host.startswith('states.'):\n result = 'stepfunctions', config.PORT_STEPFUNCTIONS\n elif '.execute-api.' in host:\n result = 'apigateway', config.PORT_APIGATEWAY\n elif target.startswith('DynamoDBStreams') or host.startswith('streams.dynamodb.'):\n result = 'dynamodbstreams', config.PORT_DYNAMODBSTREAMS\n elif ls_target == 'web' or path == '/graph':\n result = 'web', config.PORT_WEB_UI\n\n return result[0], result_before[1] or result[1], path, host\n\n\ndef get_service_port_for_account(service, headers):\n # assume we're only using a single account, hence return the static port mapping from config.py\n return config.service_port(service)\n\n\ndef do_start_edge(port, use_ssl, asynchronous=False):\n try:\n # start local DNS server, if present\n from localstack_ext.services import dns_server\n dns_server.start_servers()\n except Exception:\n pass\n\n # get port and start Edge\n print('Starting edge router (http%s port %s)...' % ('s' if use_ssl else '', port))\n # use use=True here because our proxy allows both, HTTP and HTTPS traffic\n proxy = GenericProxy(port, ssl=True, update_listener=ProxyListenerEdge())\n proxy.start()\n if not asynchronous:\n proxy.join()\n return proxy\n\n\ndef can_use_sudo():\n try:\n run('echo | sudo -S echo', print_error=False)\n return True\n except Exception:\n return False\n\n\ndef ensure_can_use_sudo():\n if not is_root() and not can_use_sudo():\n print('Please enter your sudo password (required to configure local network):')\n run('sudo echo', stdin=True)\n\n\ndef start_edge(port=None, use_ssl=True, asynchronous=False):\n if not port:\n port = config.EDGE_PORT\n if config.EDGE_PORT_HTTP:\n do_start_edge(config.EDGE_PORT_HTTP, use_ssl=False, asynchronous=True)\n if port > 1024 or is_root():\n return do_start_edge(port, use_ssl, asynchronous=asynchronous)\n\n # process requires priviledged port but we're not root -> try running as sudo\n\n class Terminator(object):\n\n def stop(self, quiet=True):\n try:\n url = 'http%s://localhost:%s' % ('s' if use_ssl else '', port)\n requests.verify_ssl = False\n requests.post(url, headers={HEADER_KILL_SIGNAL: 'kill'})\n except Exception:\n pass\n\n # make sure we can run sudo commands\n ensure_can_use_sudo()\n\n # register a signal handler to terminate the sudo process later on\n TMP_THREADS.append(Terminator())\n\n # start the process as sudo\n sudo_cmd = 'sudo '\n python_cmd = sys.executable\n cmd = '%sPYTHONPATH=.:%s %s %s %s' % (sudo_cmd, LOCALSTACK_ROOT_FOLDER, python_cmd, __file__, port)\n process = run(cmd, asynchronous=asynchronous)\n return process\n\n\nif __name__ == '__main__':\n logging.basicConfig()\n start_edge(int(sys.argv[1]))\n", "path": "localstack/services/edge.py"}]}
3,315
405
gh_patches_debug_27244
rasdani/github-patches
git_diff
falconry__falcon-1182
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Drop support for Python 3.3 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 import glob 2 import imp 3 import io 4 import os 5 from os import path 6 import re 7 import sys 8 9 from setuptools import Extension, find_packages, setup 10 11 MYDIR = path.abspath(os.path.dirname(__file__)) 12 13 VERSION = imp.load_source('version', path.join('.', 'falcon', 'version.py')) 14 VERSION = VERSION.__version__ 15 16 # NOTE(kgriffs): python-mimeparse is better-maintained fork of mimeparse 17 REQUIRES = ['six>=1.4.0', 'python-mimeparse>=1.5.2'] 18 19 try: 20 sys.pypy_version_info 21 PYPY = True 22 except AttributeError: 23 PYPY = False 24 25 if PYPY: 26 CYTHON = False 27 else: 28 try: 29 from Cython.Distutils import build_ext 30 CYTHON = True 31 except ImportError: 32 # TODO(kgriffs): pip now ignores all output, so the user 33 # may not see this message. See also: 34 # 35 # https://github.com/pypa/pip/issues/2732 36 # 37 print('\nNOTE: Cython not installed. ' 38 'Falcon will still work fine, but may run ' 39 'a bit slower.\n') 40 CYTHON = False 41 42 if CYTHON: 43 def list_modules(dirname): 44 filenames = glob.glob(path.join(dirname, '*.py')) 45 46 module_names = [] 47 for name in filenames: 48 module, ext = path.splitext(path.basename(name)) 49 if module != '__init__': 50 module_names.append(module) 51 52 return module_names 53 54 package_names = ['falcon', 'falcon.util', 'falcon.routing', 'falcon.media'] 55 ext_modules = [ 56 Extension( 57 package + '.' + module, 58 [path.join(*(package.split('.') + [module + '.py']))] 59 ) 60 for package in package_names 61 for module in list_modules(path.join(MYDIR, *package.split('.'))) 62 ] 63 64 cmdclass = {'build_ext': build_ext} 65 66 else: 67 cmdclass = {} 68 ext_modules = [] 69 70 71 def load_description(): 72 in_raw = False 73 74 description_lines = [] 75 76 # NOTE(kgriffs): PyPI does not support the raw directive 77 for readme_line in io.open('README.rst', 'r', encoding='utf-8'): 78 if readme_line.startswith('.. raw::'): 79 in_raw = True 80 elif in_raw: 81 if readme_line and not re.match('\s', readme_line): 82 in_raw = False 83 84 if not in_raw: 85 description_lines.append(readme_line) 86 87 return ''.join(description_lines) 88 89 90 setup( 91 name='falcon', 92 version=VERSION, 93 description='An unladen web framework for building APIs and app backends.', 94 long_description=load_description(), 95 classifiers=[ 96 'Development Status :: 5 - Production/Stable', 97 'Environment :: Web Environment', 98 'Natural Language :: English', 99 'Intended Audience :: Developers', 100 'Intended Audience :: System Administrators', 101 'License :: OSI Approved :: Apache Software License', 102 'Operating System :: MacOS :: MacOS X', 103 'Operating System :: Microsoft :: Windows', 104 'Operating System :: POSIX', 105 'Topic :: Internet :: WWW/HTTP :: WSGI', 106 'Topic :: Software Development :: Libraries :: Application Frameworks', 107 'Programming Language :: Python', 108 'Programming Language :: Python :: Implementation :: CPython', 109 'Programming Language :: Python :: Implementation :: PyPy', 110 'Programming Language :: Python :: 2.7', 111 'Programming Language :: Python :: 3.3', 112 'Programming Language :: Python :: 3.4', 113 'Programming Language :: Python :: 3.5', 114 'Programming Language :: Python :: 3.6', 115 ], 116 keywords='wsgi web api framework rest http cloud', 117 author='Kurt Griffiths', 118 author_email='[email protected]', 119 url='http://falconframework.org', 120 license='Apache 2.0', 121 packages=find_packages(exclude=['tests']), 122 include_package_data=True, 123 zip_safe=False, 124 python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*', 125 install_requires=REQUIRES, 126 cmdclass=cmdclass, 127 ext_modules=ext_modules, 128 tests_require=['testtools', 'requests', 'pyyaml', 'pytest', 'pytest-runner'], 129 entry_points={ 130 'console_scripts': [ 131 'falcon-bench = falcon.cmd.bench:main', 132 'falcon-print-routes = falcon.cmd.print_routes:main' 133 ] 134 } 135 ) 136 ``` Path: `falcon/media/msgpack.py` Content: ``` 1 from __future__ import absolute_import 2 3 from falcon import errors 4 from falcon.media import BaseHandler 5 6 7 class MessagePackHandler(BaseHandler): 8 """Handler built using the :py:mod:`msgpack` module. 9 10 This handler uses ``msgpack.unpackb()`` and ``msgpack.packb()``. The 11 MessagePack ``bin`` type is used to distinguish between Unicode strings 12 (``str`` on Python 3, ``unicode`` on Python 2) and byte strings 13 (``bytes`` on Python 2/3, or ``str`` on Python 2). 14 15 Note: 16 This handler requires the extra ``msgpack`` package, which must be 17 installed in addition to ``falcon`` from PyPI: 18 19 .. code:: 20 21 $ pip install msgpack 22 23 Python 2.6 users will need to use the deprecated ``msgpack-python`` 24 package instead, pinned to version 0.4.8. 25 """ 26 27 def __init__(self): 28 import msgpack 29 30 self.msgpack = msgpack 31 self.packer = msgpack.Packer( 32 encoding='utf-8', 33 autoreset=True, 34 use_bin_type=True, 35 ) 36 37 def deserialize(self, raw): 38 try: 39 # NOTE(jmvrbanac): Using unpackb since we would need to manage 40 # a buffer for Unpacker() which wouldn't gain us much. 41 return self.msgpack.unpackb(raw, encoding='utf-8') 42 except ValueError as err: 43 raise errors.HTTPBadRequest( 44 'Invalid MessagePack', 45 'Could not parse MessagePack body - {0}'.format(err) 46 ) 47 48 def serialize(self, media): 49 return self.packer.pack(media) 50 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/falcon/media/msgpack.py b/falcon/media/msgpack.py --- a/falcon/media/msgpack.py +++ b/falcon/media/msgpack.py @@ -19,9 +19,6 @@ .. code:: $ pip install msgpack - - Python 2.6 users will need to use the deprecated ``msgpack-python`` - package instead, pinned to version 0.4.8. """ def __init__(self): diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -107,8 +107,9 @@ 'Programming Language :: Python', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', + 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', - 'Programming Language :: Python :: 3.3', + 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', @@ -121,7 +122,7 @@ packages=find_packages(exclude=['tests']), include_package_data=True, zip_safe=False, - python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*', + python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*', install_requires=REQUIRES, cmdclass=cmdclass, ext_modules=ext_modules,
{"golden_diff": "diff --git a/falcon/media/msgpack.py b/falcon/media/msgpack.py\n--- a/falcon/media/msgpack.py\n+++ b/falcon/media/msgpack.py\n@@ -19,9 +19,6 @@\n .. code::\n \n $ pip install msgpack\n-\n- Python 2.6 users will need to use the deprecated ``msgpack-python``\n- package instead, pinned to version 0.4.8.\n \"\"\"\n \n def __init__(self):\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -107,8 +107,9 @@\n 'Programming Language :: Python',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n+ 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n- 'Programming Language :: Python :: 3.3',\n+ 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n@@ -121,7 +122,7 @@\n packages=find_packages(exclude=['tests']),\n include_package_data=True,\n zip_safe=False,\n- python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*',\n+ python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n install_requires=REQUIRES,\n cmdclass=cmdclass,\n ext_modules=ext_modules,\n", "issue": "Drop support for Python 3.3\n\n", "before_files": [{"content": "import glob\nimport imp\nimport io\nimport os\nfrom os import path\nimport re\nimport sys\n\nfrom setuptools import Extension, find_packages, setup\n\nMYDIR = path.abspath(os.path.dirname(__file__))\n\nVERSION = imp.load_source('version', path.join('.', 'falcon', 'version.py'))\nVERSION = VERSION.__version__\n\n# NOTE(kgriffs): python-mimeparse is better-maintained fork of mimeparse\nREQUIRES = ['six>=1.4.0', 'python-mimeparse>=1.5.2']\n\ntry:\n sys.pypy_version_info\n PYPY = True\nexcept AttributeError:\n PYPY = False\n\nif PYPY:\n CYTHON = False\nelse:\n try:\n from Cython.Distutils import build_ext\n CYTHON = True\n except ImportError:\n # TODO(kgriffs): pip now ignores all output, so the user\n # may not see this message. See also:\n #\n # https://github.com/pypa/pip/issues/2732\n #\n print('\\nNOTE: Cython not installed. '\n 'Falcon will still work fine, but may run '\n 'a bit slower.\\n')\n CYTHON = False\n\nif CYTHON:\n def list_modules(dirname):\n filenames = glob.glob(path.join(dirname, '*.py'))\n\n module_names = []\n for name in filenames:\n module, ext = path.splitext(path.basename(name))\n if module != '__init__':\n module_names.append(module)\n\n return module_names\n\n package_names = ['falcon', 'falcon.util', 'falcon.routing', 'falcon.media']\n ext_modules = [\n Extension(\n package + '.' + module,\n [path.join(*(package.split('.') + [module + '.py']))]\n )\n for package in package_names\n for module in list_modules(path.join(MYDIR, *package.split('.')))\n ]\n\n cmdclass = {'build_ext': build_ext}\n\nelse:\n cmdclass = {}\n ext_modules = []\n\n\ndef load_description():\n in_raw = False\n\n description_lines = []\n\n # NOTE(kgriffs): PyPI does not support the raw directive\n for readme_line in io.open('README.rst', 'r', encoding='utf-8'):\n if readme_line.startswith('.. raw::'):\n in_raw = True\n elif in_raw:\n if readme_line and not re.match('\\s', readme_line):\n in_raw = False\n\n if not in_raw:\n description_lines.append(readme_line)\n\n return ''.join(description_lines)\n\n\nsetup(\n name='falcon',\n version=VERSION,\n description='An unladen web framework for building APIs and app backends.',\n long_description=load_description(),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Natural Language :: English',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Topic :: Internet :: WWW/HTTP :: WSGI',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords='wsgi web api framework rest http cloud',\n author='Kurt Griffiths',\n author_email='[email protected]',\n url='http://falconframework.org',\n license='Apache 2.0',\n packages=find_packages(exclude=['tests']),\n include_package_data=True,\n zip_safe=False,\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*',\n install_requires=REQUIRES,\n cmdclass=cmdclass,\n ext_modules=ext_modules,\n tests_require=['testtools', 'requests', 'pyyaml', 'pytest', 'pytest-runner'],\n entry_points={\n 'console_scripts': [\n 'falcon-bench = falcon.cmd.bench:main',\n 'falcon-print-routes = falcon.cmd.print_routes:main'\n ]\n }\n)\n", "path": "setup.py"}, {"content": "from __future__ import absolute_import\n\nfrom falcon import errors\nfrom falcon.media import BaseHandler\n\n\nclass MessagePackHandler(BaseHandler):\n \"\"\"Handler built using the :py:mod:`msgpack` module.\n\n This handler uses ``msgpack.unpackb()`` and ``msgpack.packb()``. The\n MessagePack ``bin`` type is used to distinguish between Unicode strings\n (``str`` on Python 3, ``unicode`` on Python 2) and byte strings\n (``bytes`` on Python 2/3, or ``str`` on Python 2).\n\n Note:\n This handler requires the extra ``msgpack`` package, which must be\n installed in addition to ``falcon`` from PyPI:\n\n .. code::\n\n $ pip install msgpack\n\n Python 2.6 users will need to use the deprecated ``msgpack-python``\n package instead, pinned to version 0.4.8.\n \"\"\"\n\n def __init__(self):\n import msgpack\n\n self.msgpack = msgpack\n self.packer = msgpack.Packer(\n encoding='utf-8',\n autoreset=True,\n use_bin_type=True,\n )\n\n def deserialize(self, raw):\n try:\n # NOTE(jmvrbanac): Using unpackb since we would need to manage\n # a buffer for Unpacker() which wouldn't gain us much.\n return self.msgpack.unpackb(raw, encoding='utf-8')\n except ValueError as err:\n raise errors.HTTPBadRequest(\n 'Invalid MessagePack',\n 'Could not parse MessagePack body - {0}'.format(err)\n )\n\n def serialize(self, media):\n return self.packer.pack(media)\n", "path": "falcon/media/msgpack.py"}], "after_files": [{"content": "import glob\nimport imp\nimport io\nimport os\nfrom os import path\nimport re\nimport sys\n\nfrom setuptools import Extension, find_packages, setup\n\nMYDIR = path.abspath(os.path.dirname(__file__))\n\nVERSION = imp.load_source('version', path.join('.', 'falcon', 'version.py'))\nVERSION = VERSION.__version__\n\n# NOTE(kgriffs): python-mimeparse is better-maintained fork of mimeparse\nREQUIRES = ['six>=1.4.0', 'python-mimeparse>=1.5.2']\n\ntry:\n sys.pypy_version_info\n PYPY = True\nexcept AttributeError:\n PYPY = False\n\nif PYPY:\n CYTHON = False\nelse:\n try:\n from Cython.Distutils import build_ext\n CYTHON = True\n except ImportError:\n # TODO(kgriffs): pip now ignores all output, so the user\n # may not see this message. See also:\n #\n # https://github.com/pypa/pip/issues/2732\n #\n print('\\nNOTE: Cython not installed. '\n 'Falcon will still work fine, but may run '\n 'a bit slower.\\n')\n CYTHON = False\n\nif CYTHON:\n def list_modules(dirname):\n filenames = glob.glob(path.join(dirname, '*.py'))\n\n module_names = []\n for name in filenames:\n module, ext = path.splitext(path.basename(name))\n if module != '__init__':\n module_names.append(module)\n\n return module_names\n\n package_names = ['falcon', 'falcon.util', 'falcon.routing', 'falcon.media']\n ext_modules = [\n Extension(\n package + '.' + module,\n [path.join(*(package.split('.') + [module + '.py']))]\n )\n for package in package_names\n for module in list_modules(path.join(MYDIR, *package.split('.')))\n ]\n\n cmdclass = {'build_ext': build_ext}\n\nelse:\n cmdclass = {}\n ext_modules = []\n\n\ndef load_description():\n in_raw = False\n\n description_lines = []\n\n # NOTE(kgriffs): PyPI does not support the raw directive\n for readme_line in io.open('README.rst', 'r', encoding='utf-8'):\n if readme_line.startswith('.. raw::'):\n in_raw = True\n elif in_raw:\n if readme_line and not re.match('\\s', readme_line):\n in_raw = False\n\n if not in_raw:\n description_lines.append(readme_line)\n\n return ''.join(description_lines)\n\n\nsetup(\n name='falcon',\n version=VERSION,\n description='An unladen web framework for building APIs and app backends.',\n long_description=load_description(),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Natural Language :: English',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Topic :: Internet :: WWW/HTTP :: WSGI',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n keywords='wsgi web api framework rest http cloud',\n author='Kurt Griffiths',\n author_email='[email protected]',\n url='http://falconframework.org',\n license='Apache 2.0',\n packages=find_packages(exclude=['tests']),\n include_package_data=True,\n zip_safe=False,\n python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',\n install_requires=REQUIRES,\n cmdclass=cmdclass,\n ext_modules=ext_modules,\n tests_require=['testtools', 'requests', 'pyyaml', 'pytest', 'pytest-runner'],\n entry_points={\n 'console_scripts': [\n 'falcon-bench = falcon.cmd.bench:main',\n 'falcon-print-routes = falcon.cmd.print_routes:main'\n ]\n }\n)\n", "path": "setup.py"}, {"content": "from __future__ import absolute_import\n\nfrom falcon import errors\nfrom falcon.media import BaseHandler\n\n\nclass MessagePackHandler(BaseHandler):\n \"\"\"Handler built using the :py:mod:`msgpack` module.\n\n This handler uses ``msgpack.unpackb()`` and ``msgpack.packb()``. The\n MessagePack ``bin`` type is used to distinguish between Unicode strings\n (``str`` on Python 3, ``unicode`` on Python 2) and byte strings\n (``bytes`` on Python 2/3, or ``str`` on Python 2).\n\n Note:\n This handler requires the extra ``msgpack`` package, which must be\n installed in addition to ``falcon`` from PyPI:\n\n .. code::\n\n $ pip install msgpack\n \"\"\"\n\n def __init__(self):\n import msgpack\n\n self.msgpack = msgpack\n self.packer = msgpack.Packer(\n encoding='utf-8',\n autoreset=True,\n use_bin_type=True,\n )\n\n def deserialize(self, raw):\n try:\n # NOTE(jmvrbanac): Using unpackb since we would need to manage\n # a buffer for Unpacker() which wouldn't gain us much.\n return self.msgpack.unpackb(raw, encoding='utf-8')\n except ValueError as err:\n raise errors.HTTPBadRequest(\n 'Invalid MessagePack',\n 'Could not parse MessagePack body - {0}'.format(err)\n )\n\n def serialize(self, media):\n return self.packer.pack(media)\n", "path": "falcon/media/msgpack.py"}]}
2,037
365
gh_patches_debug_6453
rasdani/github-patches
git_diff
urllib3__urllib3-2762
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `python_requires` prevents install of recent urllib3 when using Poetry ### Subject re: https://github.com/urllib3/urllib3/pull/1309#issuecomment-356650894 I no longer see the suggestion of including `<4` in the link referenced earlier in that PR (content relocated to https://packaging.python.org/en/latest/guides/distributing-packages-using-setuptools/#python-requires). Its inclusion is causing `1.22` to be installed. I use `poetry` to manage dependencies for https://github.com/cj81499/advent-of-code, and today I got a dependabot alert for urllib3 (CVE-2018-20060). I tried a `poetry update`, but poetry did not update urllib. `poetry add urllib3@latest` revealed the problem. ```command > poetry add urllib3@latest Using version ^1.26.12 for urllib3 Updating dependencies Resolving dependencies... (0.0s) The current project's Python requirement (>=3.9) is not compatible with some of the required packages Python requirement: - urllib3 requires Python >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4, so it will not be satisfied for Python >=4 Because urllib3 (1.26.12) requires Python >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4 and no versions of urllib3 match >1.26.12,<2.0.0, urllib3 is forbidden. So, because aoc-cj depends on urllib3 (^1.26.12), version solving failed. • Check your dependencies Python requirement: The Python requirement can be specified via the `python` or `markers` properties For urllib3, a possible solution would be to set the `python` property to ">=3.9,<4" https://python-poetry.org/docs/dependency-specification/#python-restricted-dependencies, https://python-poetry.org/docs/dependency-specification/#using-environment-markers ``` As a result of this change, I've been quietly stuck on `urllib3 v1.22` without even realizing it. As suggested by poetry, I've changed _my_ python requirement from ">=3.9" to ">=3.9,<4" to be able to update, but I'm not sure there's a good reason to restrict the supported python versions to `<4`. Like I mentioned earlier, it seems this is no longer recommended by the Python docs. In addition, since it didn't set a `python_requires`, a (future) Python 4 user might try to install urllib3 and get `v1.22`(!), which seems like pretty undesirable behavior to me. _Originally posted by @cj81499 in https://github.com/urllib3/urllib3/issues/1309#issuecomment-1301625738_ ### Environment poetry 1.2.2 OS macOS-12.6-x86_64-i386-64bit Python 3.9.15 urllib3 1.22 ### Steps to Reproduce - Install poetry - Clone https://github.com/cj81499/advent-of-code - Checkout commit `a8fecd3` - run `poetry install` - run `poetry show urllib3` - Observe that version 1.22 is installed ### Expected Behavior Latest urllib3 (`1.26.12` at time of writing) is installed ### Actual Behavior urllib3 `1.22` is installed --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 # This file is protected via CODEOWNERS 3 4 import codecs 5 import os 6 import re 7 8 from setuptools import setup 9 10 base_path = os.path.dirname(__file__) 11 12 # Get the version (borrowed from SQLAlchemy) 13 with open(os.path.join(base_path, "src", "urllib3", "_version.py")) as fp: 14 VERSION = ( 15 re.compile(r""".*__version__ = ["'](.*?)['"]""", re.S).match(fp.read()).group(1) 16 ) 17 18 19 with codecs.open("README.rst", encoding="utf-8") as fp: 20 # Remove reST raw directive from README as they're not allowed on PyPI 21 # Those blocks start with a newline and continue until the next newline 22 mode = None 23 lines = [] 24 for line in fp: 25 if line.startswith(".. raw::"): 26 mode = "ignore_nl" 27 elif line == "\n": 28 mode = "wait_nl" if mode == "ignore_nl" else None 29 30 if mode is None: 31 lines.append(line) 32 readme = "".join(lines) 33 34 with codecs.open("CHANGES.rst", encoding="utf-8") as fp: 35 changes = fp.read() 36 37 version = VERSION 38 39 setup( 40 name="urllib3", 41 version=version, 42 description="HTTP library with thread-safe connection pooling, file post, and more.", 43 long_description=u"\n\n".join([readme, changes]), 44 long_description_content_type="text/x-rst", 45 classifiers=[ 46 "Environment :: Web Environment", 47 "Intended Audience :: Developers", 48 "License :: OSI Approved :: MIT License", 49 "Operating System :: OS Independent", 50 "Programming Language :: Python", 51 "Programming Language :: Python :: 2", 52 "Programming Language :: Python :: 2.7", 53 "Programming Language :: Python :: 3", 54 "Programming Language :: Python :: 3.6", 55 "Programming Language :: Python :: 3.7", 56 "Programming Language :: Python :: 3.8", 57 "Programming Language :: Python :: 3.9", 58 "Programming Language :: Python :: 3.10", 59 "Programming Language :: Python :: 3.11", 60 "Programming Language :: Python :: Implementation :: CPython", 61 "Programming Language :: Python :: Implementation :: PyPy", 62 "Topic :: Internet :: WWW/HTTP", 63 "Topic :: Software Development :: Libraries", 64 ], 65 keywords="urllib httplib threadsafe filepost http https ssl pooling", 66 author="Andrey Petrov", 67 author_email="[email protected]", 68 url="https://urllib3.readthedocs.io/", 69 project_urls={ 70 "Documentation": "https://urllib3.readthedocs.io/", 71 "Code": "https://github.com/urllib3/urllib3", 72 "Issue tracker": "https://github.com/urllib3/urllib3/issues", 73 }, 74 license="MIT", 75 packages=[ 76 "urllib3", 77 "urllib3.packages", 78 "urllib3.packages.backports", 79 "urllib3.contrib", 80 "urllib3.contrib._securetransport", 81 "urllib3.util", 82 ], 83 package_dir={"": "src"}, 84 requires=[], 85 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4", 86 extras_require={ 87 "brotli": [ 88 "brotli>=1.0.9; (os_name != 'nt' or python_version >= '3') and platform_python_implementation == 'CPython'", 89 "brotlicffi>=0.8.0; (os_name != 'nt' or python_version >= '3') and platform_python_implementation != 'CPython'", 90 "brotlipy>=0.6.0; os_name == 'nt' and python_version < '3'", 91 ], 92 "secure": [ 93 "pyOpenSSL>=0.14", 94 "cryptography>=1.3.4", 95 "idna>=2.0.0", 96 "certifi", 97 "ipaddress; python_version=='2.7'", 98 "urllib3-secure-extra", 99 ], 100 "socks": ["PySocks>=1.5.6,<2.0,!=1.5.7"], 101 }, 102 ) 103 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -82,7 +82,7 @@ ], package_dir={"": "src"}, requires=[], - python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4", + python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*", extras_require={ "brotli": [ "brotli>=1.0.9; (os_name != 'nt' or python_version >= '3') and platform_python_implementation == 'CPython'",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -82,7 +82,7 @@\n ],\n package_dir={\"\": \"src\"},\n requires=[],\n- python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4\",\n+ python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*\",\n extras_require={\n \"brotli\": [\n \"brotli>=1.0.9; (os_name != 'nt' or python_version >= '3') and platform_python_implementation == 'CPython'\",\n", "issue": "`python_requires` prevents install of recent urllib3 when using Poetry\n### Subject\r\n\r\nre: https://github.com/urllib3/urllib3/pull/1309#issuecomment-356650894\r\n\r\nI no longer see the suggestion of including `<4` in the link referenced earlier in that PR (content relocated to https://packaging.python.org/en/latest/guides/distributing-packages-using-setuptools/#python-requires). Its inclusion is causing `1.22` to be installed.\r\n\r\nI use `poetry` to manage dependencies for https://github.com/cj81499/advent-of-code, and today I got a dependabot alert for urllib3 (CVE-2018-20060).\r\n\r\nI tried a `poetry update`, but poetry did not update urllib. `poetry add urllib3@latest` revealed the problem.\r\n\r\n```command\r\n> poetry add urllib3@latest\r\nUsing version ^1.26.12 for urllib3\r\n\r\nUpdating dependencies\r\nResolving dependencies... (0.0s)\r\n\r\nThe current project's Python requirement (>=3.9) is not compatible with some of the required packages Python requirement:\r\n - urllib3 requires Python >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4, so it will not be satisfied for Python >=4\r\n\r\nBecause urllib3 (1.26.12) requires Python >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4\r\n and no versions of urllib3 match >1.26.12,<2.0.0, urllib3 is forbidden.\r\nSo, because aoc-cj depends on urllib3 (^1.26.12), version solving failed.\r\n\r\n \u2022 Check your dependencies Python requirement: The Python requirement can be specified via the `python` or `markers` properties\r\n \r\n For urllib3, a possible solution would be to set the `python` property to \">=3.9,<4\"\r\n\r\n https://python-poetry.org/docs/dependency-specification/#python-restricted-dependencies,\r\n https://python-poetry.org/docs/dependency-specification/#using-environment-markers\r\n```\r\n\r\nAs a result of this change, I've been quietly stuck on `urllib3 v1.22` without even realizing it.\r\n\r\nAs suggested by poetry, I've changed _my_ python requirement from \">=3.9\" to \">=3.9,<4\" to be able to update, but I'm not sure there's a good reason to restrict the supported python versions to `<4`. Like I mentioned earlier, it seems this is no longer recommended by the Python docs.\r\n\r\nIn addition, since it didn't set a `python_requires`, a (future) Python 4 user might try to install urllib3 and get `v1.22`(!), which seems like pretty undesirable behavior to me.\r\n\r\n_Originally posted by @cj81499 in https://github.com/urllib3/urllib3/issues/1309#issuecomment-1301625738_\r\n\r\n### Environment\r\n\r\npoetry 1.2.2\r\n\r\nOS macOS-12.6-x86_64-i386-64bit\r\nPython 3.9.15\r\nurllib3 1.22\r\n\r\n### Steps to Reproduce\r\n\r\n- Install poetry\r\n- Clone https://github.com/cj81499/advent-of-code\r\n- Checkout commit `a8fecd3`\r\n- run `poetry install`\r\n- run `poetry show urllib3`\r\n- Observe that version 1.22 is installed\r\n\r\n### Expected Behavior\r\n\r\nLatest urllib3 (`1.26.12` at time of writing) is installed\r\n\r\n### Actual Behavior\r\n\r\nurllib3 `1.22` is installed\n", "before_files": [{"content": "#!/usr/bin/env python\n# This file is protected via CODEOWNERS\n\nimport codecs\nimport os\nimport re\n\nfrom setuptools import setup\n\nbase_path = os.path.dirname(__file__)\n\n# Get the version (borrowed from SQLAlchemy)\nwith open(os.path.join(base_path, \"src\", \"urllib3\", \"_version.py\")) as fp:\n VERSION = (\n re.compile(r\"\"\".*__version__ = [\"'](.*?)['\"]\"\"\", re.S).match(fp.read()).group(1)\n )\n\n\nwith codecs.open(\"README.rst\", encoding=\"utf-8\") as fp:\n # Remove reST raw directive from README as they're not allowed on PyPI\n # Those blocks start with a newline and continue until the next newline\n mode = None\n lines = []\n for line in fp:\n if line.startswith(\".. raw::\"):\n mode = \"ignore_nl\"\n elif line == \"\\n\":\n mode = \"wait_nl\" if mode == \"ignore_nl\" else None\n\n if mode is None:\n lines.append(line)\n readme = \"\".join(lines)\n\nwith codecs.open(\"CHANGES.rst\", encoding=\"utf-8\") as fp:\n changes = fp.read()\n\nversion = VERSION\n\nsetup(\n name=\"urllib3\",\n version=version,\n description=\"HTTP library with thread-safe connection pooling, file post, and more.\",\n long_description=u\"\\n\\n\".join([readme, changes]),\n long_description_content_type=\"text/x-rst\",\n classifiers=[\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Software Development :: Libraries\",\n ],\n keywords=\"urllib httplib threadsafe filepost http https ssl pooling\",\n author=\"Andrey Petrov\",\n author_email=\"[email protected]\",\n url=\"https://urllib3.readthedocs.io/\",\n project_urls={\n \"Documentation\": \"https://urllib3.readthedocs.io/\",\n \"Code\": \"https://github.com/urllib3/urllib3\",\n \"Issue tracker\": \"https://github.com/urllib3/urllib3/issues\",\n },\n license=\"MIT\",\n packages=[\n \"urllib3\",\n \"urllib3.packages\",\n \"urllib3.packages.backports\",\n \"urllib3.contrib\",\n \"urllib3.contrib._securetransport\",\n \"urllib3.util\",\n ],\n package_dir={\"\": \"src\"},\n requires=[],\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, <4\",\n extras_require={\n \"brotli\": [\n \"brotli>=1.0.9; (os_name != 'nt' or python_version >= '3') and platform_python_implementation == 'CPython'\",\n \"brotlicffi>=0.8.0; (os_name != 'nt' or python_version >= '3') and platform_python_implementation != 'CPython'\",\n \"brotlipy>=0.6.0; os_name == 'nt' and python_version < '3'\",\n ],\n \"secure\": [\n \"pyOpenSSL>=0.14\",\n \"cryptography>=1.3.4\",\n \"idna>=2.0.0\",\n \"certifi\",\n \"ipaddress; python_version=='2.7'\",\n \"urllib3-secure-extra\",\n ],\n \"socks\": [\"PySocks>=1.5.6,<2.0,!=1.5.7\"],\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# This file is protected via CODEOWNERS\n\nimport codecs\nimport os\nimport re\n\nfrom setuptools import setup\n\nbase_path = os.path.dirname(__file__)\n\n# Get the version (borrowed from SQLAlchemy)\nwith open(os.path.join(base_path, \"src\", \"urllib3\", \"_version.py\")) as fp:\n VERSION = (\n re.compile(r\"\"\".*__version__ = [\"'](.*?)['\"]\"\"\", re.S).match(fp.read()).group(1)\n )\n\n\nwith codecs.open(\"README.rst\", encoding=\"utf-8\") as fp:\n # Remove reST raw directive from README as they're not allowed on PyPI\n # Those blocks start with a newline and continue until the next newline\n mode = None\n lines = []\n for line in fp:\n if line.startswith(\".. raw::\"):\n mode = \"ignore_nl\"\n elif line == \"\\n\":\n mode = \"wait_nl\" if mode == \"ignore_nl\" else None\n\n if mode is None:\n lines.append(line)\n readme = \"\".join(lines)\n\nwith codecs.open(\"CHANGES.rst\", encoding=\"utf-8\") as fp:\n changes = fp.read()\n\nversion = VERSION\n\nsetup(\n name=\"urllib3\",\n version=version,\n description=\"HTTP library with thread-safe connection pooling, file post, and more.\",\n long_description=u\"\\n\\n\".join([readme, changes]),\n long_description_content_type=\"text/x-rst\",\n classifiers=[\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Software Development :: Libraries\",\n ],\n keywords=\"urllib httplib threadsafe filepost http https ssl pooling\",\n author=\"Andrey Petrov\",\n author_email=\"[email protected]\",\n url=\"https://urllib3.readthedocs.io/\",\n project_urls={\n \"Documentation\": \"https://urllib3.readthedocs.io/\",\n \"Code\": \"https://github.com/urllib3/urllib3\",\n \"Issue tracker\": \"https://github.com/urllib3/urllib3/issues\",\n },\n license=\"MIT\",\n packages=[\n \"urllib3\",\n \"urllib3.packages\",\n \"urllib3.packages.backports\",\n \"urllib3.contrib\",\n \"urllib3.contrib._securetransport\",\n \"urllib3.util\",\n ],\n package_dir={\"\": \"src\"},\n requires=[],\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*\",\n extras_require={\n \"brotli\": [\n \"brotli>=1.0.9; (os_name != 'nt' or python_version >= '3') and platform_python_implementation == 'CPython'\",\n \"brotlicffi>=0.8.0; (os_name != 'nt' or python_version >= '3') and platform_python_implementation != 'CPython'\",\n \"brotlipy>=0.6.0; os_name == 'nt' and python_version < '3'\",\n ],\n \"secure\": [\n \"pyOpenSSL>=0.14\",\n \"cryptography>=1.3.4\",\n \"idna>=2.0.0\",\n \"certifi\",\n \"ipaddress; python_version=='2.7'\",\n \"urllib3-secure-extra\",\n ],\n \"socks\": [\"PySocks>=1.5.6,<2.0,!=1.5.7\"],\n },\n)\n", "path": "setup.py"}]}
2,261
177
gh_patches_debug_29225
rasdani/github-patches
git_diff
mampfes__hacs_waste_collection_schedule-196
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ZAS Abfallwirtschaft I'm trying to integrate the ZAS Abfallwirtschaft ICS. But i can't seem to get it to work. Is there anything i need to configure specifically? The Website is https://www.za-sws.de/abfallkalender.cfm And the generated ICS is https://online-portal.za-sws.de/WasteManagementSuedwestsachsen/WasteManagementServiceServlet?ApplicationName=Calendar&SubmitAction=sync&StandortID=52554001&AboID=33626&Fra=R;P;B;W;L;C;S _(The adress used is a random one, not mine of course)_ My configuration looks like this: ``` waste_collection_schedule: sources: - name: ics args: url: https://online-portal.za-sws.de/WasteManagementSuedwestsachsen/WasteManagementServiceServlet?ApplicationName=Calendar&SubmitAction=sync&StandortID=52554001&AboID=33626&Fra=R;P;B;W;L;C;S customize: - type: Restmuelltonne 02-woechentl. alias: Restmüll icon: mdi:trash-can - type: Biotonnenwaesche alias: Biotonnenwäsche show: false icon: mdi:flower-outline - type: Papiertonne 04-woechentl. alias: Papiertonne icon: mdi:trash-can-outline - type: LVP alias: Gelbe Tonne icon: mdi:recycle ``` Thanks in advance. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py` Content: ``` 1 import datetime 2 import logging 3 from pathlib import Path 4 5 import requests 6 from waste_collection_schedule import Collection # type: ignore[attr-defined] 7 from waste_collection_schedule.service.ICS import ICS 8 from waste_collection_schedule.service.ICS_v1 import ICS_v1 9 10 TITLE = "ICS" 11 DESCRIPTION = "Source for ICS based schedules." 12 URL = None 13 TEST_CASES = { 14 "Dortmund, Dudenstr. 5": { 15 "url": "https://www.edg.de/ical/kalender.ics?Strasse=Dudenstr.&Hausnummer=5&Erinnerung=-1&Abfallart=1,2,3,4" 16 }, 17 "Leipzig, Sandgrubenweg 27": { 18 "url": "https://stadtreinigung-leipzig.de/wir-kommen-zu-ihnen/abfallkalender/ical.ics?position_nos=38296&name=Sandgrubenweg%2027" 19 }, 20 "Ludwigsburg": { 21 "url": "https://www.avl-ludwigsburg.de/fileadmin/Files/Abfallkalender/ICS/Privat/Privat_{%Y}_Ossweil.ics" 22 }, 23 "Esslingen, Bahnhof": { 24 "url": "https://api.abfall.io/?kh=DaA02103019b46345f1998698563DaAd&t=ics&s=1a862df26f6943997cef90233877a4fe" 25 }, 26 "Test File": { 27 # Path is used here to allow to call the Source from any location. 28 # This is not required in a yaml configuration! 29 "file": str(Path(__file__).resolve().parents[1].joinpath("test/test.ics")) 30 }, 31 "Test File (recurring)": { 32 # Path is used here to allow to call the Source from any location. 33 # This is not required in a yaml configuration! 34 "file": str(Path(__file__).resolve().parents[1].joinpath("test/recurring.ics")) 35 }, 36 "München, Bahnstr. 11": { 37 "url": "https://www.awm-muenchen.de/entsorgen/abfuhrkalender?tx_awmabfuhrkalender_abfuhrkalender%5Bhausnummer%5D=11&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BB%5D=1%2F2%3BU&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BP%5D=1%2F2%3BG&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BR%5D=001%3BU&tx_awmabfuhrkalender_abfuhrkalender%5Bsection%5D=ics&tx_awmabfuhrkalender_abfuhrkalender%5Bsinglestandplatz%5D=false&tx_awmabfuhrkalender_abfuhrkalender%5Bstandplatzwahl%5D=true&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Bbio%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Bpapier%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Brestmuell%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstrasse%5D=bahnstr.&tx_awmabfuhrkalender_abfuhrkalender%5Byear%5D={%Y}", 38 "version": 1, 39 }, 40 "Buxtehude, Am Berg": { 41 "url": "https://abfall.landkreis-stade.de/api_v2/collection_dates/1/ort/10/strasse/90/hausnummern/1/abfallarten/R02-R04-B02-D04-D12-P04-R12-R14-W0-R22-R24-R31/kalender.ics" 42 }, 43 # "Hausmüllinfo: ASR Chemnitz": { 44 # "url": "https://asc.hausmuell.info/ics/ics.php", 45 # "method": "POST", 46 # "params": { 47 # "hidden_id_egebiet": 439087, 48 # "input_ort": "Chemnitz", 49 # "input_str": "Straße der Nationen", 50 # "input_hnr": 2, 51 # "hidden_send_btn": "ics", 52 # # "hiddenYear": 2021, 53 # "hidden_id_ort": 10, 54 # "hidden_id_ortsteil": 0, 55 # "hidden_id_str": 17814, 56 # "hidden_id_hnr": 5538100, 57 # "hidden_kalenderart": "privat", 58 # "showBinsBio": "on", 59 # "showBinsRest": "on", 60 # "showBinsRest_rc": "on", 61 # "showBinsPapier": "on", 62 # "showBinsOrganic": "on", 63 # "showBinsXmas": "on", 64 # "showBinsDsd": "on", 65 # "showBinsProb": "on", 66 # }, 67 # "year_field": "hiddenYear", 68 # }, 69 "Abfall Zollernalbkreis, Ebingen": { 70 "url": "https://www.abfallkalender-zak.de", 71 "params": { 72 "city": "2,3,4", 73 "street": "3", 74 "types[]": [ 75 "restmuell", 76 "gelbersack", 77 "papiertonne", 78 "biomuell", 79 "gruenabfall", 80 "schadstoffsammlung", 81 "altpapiersammlung", 82 "schrottsammlung", 83 "weihnachtsbaeume", 84 "elektrosammlung", 85 ], 86 "go_ics": "Download", 87 }, 88 "year_field": "year", 89 }, 90 "Detmold": { 91 "url": "https://abfuhrkalender.detmold.de/icsmaker.php", 92 "method": "GET", 93 "params": {"strid": 338}, 94 "year_field": "year", 95 }, 96 "EAW Rheingau Taunus": { 97 "url": "https://www.eaw-rheingau-taunus.de/abfallkalender/calendar.ics?streetid=1429", 98 "split_at": ",", 99 }, 100 "Recollect, Ottawa": { 101 "url": "https://recollect.a.ssl.fastly.net/api/places/BCCDF30E-578B-11E4-AD38-5839C200407A/services/208/events.en.ics", 102 "split_at": "\\, [and ]*", 103 }, 104 "Frankfurt am Main, Achenbachstrasse 3": { 105 "url": "https://www.fes-frankfurt.de/abfallkalender/QWNoZW5iYWNoc3RyLnwzfDYwNTk2.ics" 106 }, 107 "Erlensee, Am Haspel": { 108 "url": "https://sperrmuell.erlensee.de/?type=reminder", 109 "method": "POST", 110 "params": { 111 "street": 8, 112 "eventType[]": [27, 23, 19, 20, 21, 24, 22, 25, 26], 113 "timeframe": 23, 114 "download": "ical", 115 }, 116 }, 117 } 118 119 120 HEADERS = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64)"} 121 _LOGGER = logging.getLogger(__name__) 122 123 124 class Source: 125 def __init__( 126 self, 127 url=None, 128 file=None, 129 offset=None, 130 params=None, 131 year_field=None, 132 method="GET", 133 split_at=None, 134 version=2, 135 ): 136 self._url = url 137 self._file = file 138 if bool(self._url is not None) == bool(self._file is not None): 139 raise RuntimeError("Specify either url or file") 140 if version == 1: 141 self._ics = ICS_v1(offset=offset, split_at=split_at) 142 else: 143 self._ics = ICS(offset=offset, split_at=split_at) 144 self._params = params 145 self._year_field = year_field # replace this field in params with current year 146 self._method = method # The method to send the params 147 148 def fetch(self): 149 if self._url is not None: 150 if "{%Y}" in self._url or self._year_field is not None: 151 # url contains wildcard or params contains year field 152 now = datetime.datetime.now() 153 154 # replace year in url 155 url = self._url.replace("{%Y}", str(now.year)) 156 157 # replace year in params 158 if self._year_field is not None: 159 if self._params is None: 160 raise RuntimeError("year_field specified without params") 161 self._params[self._year_field] = str(now.year) 162 163 entries = self.fetch_url(url, self._params) 164 165 if now.month == 12: 166 # also get data for next year if we are already in december 167 url = self._url.replace("{%Y}", str(now.year + 1)) 168 if self._year_field is not None: 169 self._params[self._year_field] = str(now.year + 1) 170 171 try: 172 entries.extend(self.fetch_url(url, self._params)) 173 except Exception: 174 # ignore if fetch for next year fails 175 pass 176 return entries 177 else: 178 return self.fetch_url(self._url, self._params) 179 elif self._file is not None: 180 return self.fetch_file(self._file) 181 182 def fetch_url(self, url, params=None): 183 # get ics file 184 if self._method == "GET": 185 r = requests.get(url, params=params, headers=HEADERS) 186 elif self._method == "POST": 187 r = requests.post(url, data=params, headers=HEADERS) 188 else: 189 raise RuntimeError( 190 "Error: unknown method to fetch URL, use GET or POST; got {self._method}" 191 ) 192 r.encoding = "utf-8" # requests doesn't guess the encoding correctly 193 194 # check the return code 195 if not r.ok: 196 _LOGGER.error( 197 "Error: the response is not ok; need code 200, but got code %s" 198 % r.status_code 199 ) 200 return [] 201 202 return self._convert(r.text) 203 204 def fetch_file(self, file): 205 f = open(file) 206 return self._convert(f.read()) 207 208 def _convert(self, data): 209 dates = self._ics.convert(data) 210 211 entries = [] 212 for d in dates: 213 entries.append(Collection(d[0], d[1])) 214 return entries 215 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py --- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py +++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py @@ -132,6 +132,7 @@ method="GET", split_at=None, version=2, + verify_ssl=True, ): self._url = url self._file = file @@ -144,6 +145,7 @@ self._params = params self._year_field = year_field # replace this field in params with current year self._method = method # The method to send the params + self._verify_ssl = verify_ssl def fetch(self): if self._url is not None: @@ -182,9 +184,13 @@ def fetch_url(self, url, params=None): # get ics file if self._method == "GET": - r = requests.get(url, params=params, headers=HEADERS) + r = requests.get( + url, params=params, headers=HEADERS, verify=self._verify_ssl + ) elif self._method == "POST": - r = requests.post(url, data=params, headers=HEADERS) + r = requests.post( + url, data=params, headers=HEADERS, verify=self._verify_ssl + ) else: raise RuntimeError( "Error: unknown method to fetch URL, use GET or POST; got {self._method}"
{"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py\n@@ -132,6 +132,7 @@\n method=\"GET\",\n split_at=None,\n version=2,\n+ verify_ssl=True,\n ):\n self._url = url\n self._file = file\n@@ -144,6 +145,7 @@\n self._params = params\n self._year_field = year_field # replace this field in params with current year\n self._method = method # The method to send the params\n+ self._verify_ssl = verify_ssl\n \n def fetch(self):\n if self._url is not None:\n@@ -182,9 +184,13 @@\n def fetch_url(self, url, params=None):\n # get ics file\n if self._method == \"GET\":\n- r = requests.get(url, params=params, headers=HEADERS)\n+ r = requests.get(\n+ url, params=params, headers=HEADERS, verify=self._verify_ssl\n+ )\n elif self._method == \"POST\":\n- r = requests.post(url, data=params, headers=HEADERS)\n+ r = requests.post(\n+ url, data=params, headers=HEADERS, verify=self._verify_ssl\n+ )\n else:\n raise RuntimeError(\n \"Error: unknown method to fetch URL, use GET or POST; got {self._method}\"\n", "issue": "ZAS Abfallwirtschaft\nI'm trying to integrate the ZAS Abfallwirtschaft ICS. But i can't seem to get it to work.\r\nIs there anything i need to configure specifically?\r\n\r\nThe Website is\r\nhttps://www.za-sws.de/abfallkalender.cfm\r\n\r\nAnd the generated ICS is\r\nhttps://online-portal.za-sws.de/WasteManagementSuedwestsachsen/WasteManagementServiceServlet?ApplicationName=Calendar&SubmitAction=sync&StandortID=52554001&AboID=33626&Fra=R;P;B;W;L;C;S\r\n_(The adress used is a random one, not mine of course)_\r\n\r\nMy configuration looks like this:\r\n\r\n```\r\nwaste_collection_schedule:\r\n sources:\r\n - name: ics\r\n args:\r\n url: https://online-portal.za-sws.de/WasteManagementSuedwestsachsen/WasteManagementServiceServlet?ApplicationName=Calendar&SubmitAction=sync&StandortID=52554001&AboID=33626&Fra=R;P;B;W;L;C;S\r\n customize:\r\n - type: Restmuelltonne 02-woechentl.\r\n alias: Restm\u00fcll\r\n icon: mdi:trash-can\r\n - type: Biotonnenwaesche\r\n alias: Biotonnenw\u00e4sche\r\n show: false\r\n icon: mdi:flower-outline\r\n - type: Papiertonne 04-woechentl.\r\n alias: Papiertonne\r\n icon: mdi:trash-can-outline\r\n - type: LVP\r\n alias: Gelbe Tonne\r\n icon: mdi:recycle\r\n```\r\n\r\nThanks in advance.\n", "before_files": [{"content": "import datetime\nimport logging\nfrom pathlib import Path\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\nfrom waste_collection_schedule.service.ICS_v1 import ICS_v1\n\nTITLE = \"ICS\"\nDESCRIPTION = \"Source for ICS based schedules.\"\nURL = None\nTEST_CASES = {\n \"Dortmund, Dudenstr. 5\": {\n \"url\": \"https://www.edg.de/ical/kalender.ics?Strasse=Dudenstr.&Hausnummer=5&Erinnerung=-1&Abfallart=1,2,3,4\"\n },\n \"Leipzig, Sandgrubenweg 27\": {\n \"url\": \"https://stadtreinigung-leipzig.de/wir-kommen-zu-ihnen/abfallkalender/ical.ics?position_nos=38296&name=Sandgrubenweg%2027\"\n },\n \"Ludwigsburg\": {\n \"url\": \"https://www.avl-ludwigsburg.de/fileadmin/Files/Abfallkalender/ICS/Privat/Privat_{%Y}_Ossweil.ics\"\n },\n \"Esslingen, Bahnhof\": {\n \"url\": \"https://api.abfall.io/?kh=DaA02103019b46345f1998698563DaAd&t=ics&s=1a862df26f6943997cef90233877a4fe\"\n },\n \"Test File\": {\n # Path is used here to allow to call the Source from any location.\n # This is not required in a yaml configuration!\n \"file\": str(Path(__file__).resolve().parents[1].joinpath(\"test/test.ics\"))\n },\n \"Test File (recurring)\": {\n # Path is used here to allow to call the Source from any location.\n # This is not required in a yaml configuration!\n \"file\": str(Path(__file__).resolve().parents[1].joinpath(\"test/recurring.ics\"))\n },\n \"M\u00fcnchen, Bahnstr. 11\": {\n \"url\": \"https://www.awm-muenchen.de/entsorgen/abfuhrkalender?tx_awmabfuhrkalender_abfuhrkalender%5Bhausnummer%5D=11&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BB%5D=1%2F2%3BU&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BP%5D=1%2F2%3BG&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BR%5D=001%3BU&tx_awmabfuhrkalender_abfuhrkalender%5Bsection%5D=ics&tx_awmabfuhrkalender_abfuhrkalender%5Bsinglestandplatz%5D=false&tx_awmabfuhrkalender_abfuhrkalender%5Bstandplatzwahl%5D=true&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Bbio%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Bpapier%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Brestmuell%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstrasse%5D=bahnstr.&tx_awmabfuhrkalender_abfuhrkalender%5Byear%5D={%Y}\",\n \"version\": 1,\n },\n \"Buxtehude, Am Berg\": {\n \"url\": \"https://abfall.landkreis-stade.de/api_v2/collection_dates/1/ort/10/strasse/90/hausnummern/1/abfallarten/R02-R04-B02-D04-D12-P04-R12-R14-W0-R22-R24-R31/kalender.ics\"\n },\n # \"Hausm\u00fcllinfo: ASR Chemnitz\": {\n # \"url\": \"https://asc.hausmuell.info/ics/ics.php\",\n # \"method\": \"POST\",\n # \"params\": {\n # \"hidden_id_egebiet\": 439087,\n # \"input_ort\": \"Chemnitz\",\n # \"input_str\": \"Stra\u00dfe der Nationen\",\n # \"input_hnr\": 2,\n # \"hidden_send_btn\": \"ics\",\n # # \"hiddenYear\": 2021,\n # \"hidden_id_ort\": 10,\n # \"hidden_id_ortsteil\": 0,\n # \"hidden_id_str\": 17814,\n # \"hidden_id_hnr\": 5538100,\n # \"hidden_kalenderart\": \"privat\",\n # \"showBinsBio\": \"on\",\n # \"showBinsRest\": \"on\",\n # \"showBinsRest_rc\": \"on\",\n # \"showBinsPapier\": \"on\",\n # \"showBinsOrganic\": \"on\",\n # \"showBinsXmas\": \"on\",\n # \"showBinsDsd\": \"on\",\n # \"showBinsProb\": \"on\",\n # },\n # \"year_field\": \"hiddenYear\",\n # },\n \"Abfall Zollernalbkreis, Ebingen\": {\n \"url\": \"https://www.abfallkalender-zak.de\",\n \"params\": {\n \"city\": \"2,3,4\",\n \"street\": \"3\",\n \"types[]\": [\n \"restmuell\",\n \"gelbersack\",\n \"papiertonne\",\n \"biomuell\",\n \"gruenabfall\",\n \"schadstoffsammlung\",\n \"altpapiersammlung\",\n \"schrottsammlung\",\n \"weihnachtsbaeume\",\n \"elektrosammlung\",\n ],\n \"go_ics\": \"Download\",\n },\n \"year_field\": \"year\",\n },\n \"Detmold\": {\n \"url\": \"https://abfuhrkalender.detmold.de/icsmaker.php\",\n \"method\": \"GET\",\n \"params\": {\"strid\": 338},\n \"year_field\": \"year\",\n },\n \"EAW Rheingau Taunus\": {\n \"url\": \"https://www.eaw-rheingau-taunus.de/abfallkalender/calendar.ics?streetid=1429\",\n \"split_at\": \",\",\n },\n \"Recollect, Ottawa\": {\n \"url\": \"https://recollect.a.ssl.fastly.net/api/places/BCCDF30E-578B-11E4-AD38-5839C200407A/services/208/events.en.ics\",\n \"split_at\": \"\\\\, [and ]*\",\n },\n \"Frankfurt am Main, Achenbachstrasse 3\": {\n \"url\": \"https://www.fes-frankfurt.de/abfallkalender/QWNoZW5iYWNoc3RyLnwzfDYwNTk2.ics\"\n },\n \"Erlensee, Am Haspel\": {\n \"url\": \"https://sperrmuell.erlensee.de/?type=reminder\",\n \"method\": \"POST\",\n \"params\": {\n \"street\": 8,\n \"eventType[]\": [27, 23, 19, 20, 21, 24, 22, 25, 26],\n \"timeframe\": 23,\n \"download\": \"ical\",\n },\n },\n}\n\n\nHEADERS = {\"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64)\"}\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Source:\n def __init__(\n self,\n url=None,\n file=None,\n offset=None,\n params=None,\n year_field=None,\n method=\"GET\",\n split_at=None,\n version=2,\n ):\n self._url = url\n self._file = file\n if bool(self._url is not None) == bool(self._file is not None):\n raise RuntimeError(\"Specify either url or file\")\n if version == 1:\n self._ics = ICS_v1(offset=offset, split_at=split_at)\n else:\n self._ics = ICS(offset=offset, split_at=split_at)\n self._params = params\n self._year_field = year_field # replace this field in params with current year\n self._method = method # The method to send the params\n\n def fetch(self):\n if self._url is not None:\n if \"{%Y}\" in self._url or self._year_field is not None:\n # url contains wildcard or params contains year field\n now = datetime.datetime.now()\n\n # replace year in url\n url = self._url.replace(\"{%Y}\", str(now.year))\n\n # replace year in params\n if self._year_field is not None:\n if self._params is None:\n raise RuntimeError(\"year_field specified without params\")\n self._params[self._year_field] = str(now.year)\n\n entries = self.fetch_url(url, self._params)\n\n if now.month == 12:\n # also get data for next year if we are already in december\n url = self._url.replace(\"{%Y}\", str(now.year + 1))\n if self._year_field is not None:\n self._params[self._year_field] = str(now.year + 1)\n\n try:\n entries.extend(self.fetch_url(url, self._params))\n except Exception:\n # ignore if fetch for next year fails\n pass\n return entries\n else:\n return self.fetch_url(self._url, self._params)\n elif self._file is not None:\n return self.fetch_file(self._file)\n\n def fetch_url(self, url, params=None):\n # get ics file\n if self._method == \"GET\":\n r = requests.get(url, params=params, headers=HEADERS)\n elif self._method == \"POST\":\n r = requests.post(url, data=params, headers=HEADERS)\n else:\n raise RuntimeError(\n \"Error: unknown method to fetch URL, use GET or POST; got {self._method}\"\n )\n r.encoding = \"utf-8\" # requests doesn't guess the encoding correctly\n\n # check the return code\n if not r.ok:\n _LOGGER.error(\n \"Error: the response is not ok; need code 200, but got code %s\"\n % r.status_code\n )\n return []\n\n return self._convert(r.text)\n\n def fetch_file(self, file):\n f = open(file)\n return self._convert(f.read())\n\n def _convert(self, data):\n dates = self._ics.convert(data)\n\n entries = []\n for d in dates:\n entries.append(Collection(d[0], d[1]))\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py"}], "after_files": [{"content": "import datetime\nimport logging\nfrom pathlib import Path\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\nfrom waste_collection_schedule.service.ICS import ICS\nfrom waste_collection_schedule.service.ICS_v1 import ICS_v1\n\nTITLE = \"ICS\"\nDESCRIPTION = \"Source for ICS based schedules.\"\nURL = None\nTEST_CASES = {\n \"Dortmund, Dudenstr. 5\": {\n \"url\": \"https://www.edg.de/ical/kalender.ics?Strasse=Dudenstr.&Hausnummer=5&Erinnerung=-1&Abfallart=1,2,3,4\"\n },\n \"Leipzig, Sandgrubenweg 27\": {\n \"url\": \"https://stadtreinigung-leipzig.de/wir-kommen-zu-ihnen/abfallkalender/ical.ics?position_nos=38296&name=Sandgrubenweg%2027\"\n },\n \"Ludwigsburg\": {\n \"url\": \"https://www.avl-ludwigsburg.de/fileadmin/Files/Abfallkalender/ICS/Privat/Privat_{%Y}_Ossweil.ics\"\n },\n \"Esslingen, Bahnhof\": {\n \"url\": \"https://api.abfall.io/?kh=DaA02103019b46345f1998698563DaAd&t=ics&s=1a862df26f6943997cef90233877a4fe\"\n },\n \"Test File\": {\n # Path is used here to allow to call the Source from any location.\n # This is not required in a yaml configuration!\n \"file\": str(Path(__file__).resolve().parents[1].joinpath(\"test/test.ics\"))\n },\n \"Test File (recurring)\": {\n # Path is used here to allow to call the Source from any location.\n # This is not required in a yaml configuration!\n \"file\": str(Path(__file__).resolve().parents[1].joinpath(\"test/recurring.ics\"))\n },\n \"M\u00fcnchen, Bahnstr. 11\": {\n \"url\": \"https://www.awm-muenchen.de/entsorgen/abfuhrkalender?tx_awmabfuhrkalender_abfuhrkalender%5Bhausnummer%5D=11&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BB%5D=1%2F2%3BU&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BP%5D=1%2F2%3BG&tx_awmabfuhrkalender_abfuhrkalender%5Bleerungszyklus%5D%5BR%5D=001%3BU&tx_awmabfuhrkalender_abfuhrkalender%5Bsection%5D=ics&tx_awmabfuhrkalender_abfuhrkalender%5Bsinglestandplatz%5D=false&tx_awmabfuhrkalender_abfuhrkalender%5Bstandplatzwahl%5D=true&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Bbio%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Bpapier%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstellplatz%5D%5Brestmuell%5D=70024507&tx_awmabfuhrkalender_abfuhrkalender%5Bstrasse%5D=bahnstr.&tx_awmabfuhrkalender_abfuhrkalender%5Byear%5D={%Y}\",\n \"version\": 1,\n },\n \"Buxtehude, Am Berg\": {\n \"url\": \"https://abfall.landkreis-stade.de/api_v2/collection_dates/1/ort/10/strasse/90/hausnummern/1/abfallarten/R02-R04-B02-D04-D12-P04-R12-R14-W0-R22-R24-R31/kalender.ics\"\n },\n # \"Hausm\u00fcllinfo: ASR Chemnitz\": {\n # \"url\": \"https://asc.hausmuell.info/ics/ics.php\",\n # \"method\": \"POST\",\n # \"params\": {\n # \"hidden_id_egebiet\": 439087,\n # \"input_ort\": \"Chemnitz\",\n # \"input_str\": \"Stra\u00dfe der Nationen\",\n # \"input_hnr\": 2,\n # \"hidden_send_btn\": \"ics\",\n # # \"hiddenYear\": 2021,\n # \"hidden_id_ort\": 10,\n # \"hidden_id_ortsteil\": 0,\n # \"hidden_id_str\": 17814,\n # \"hidden_id_hnr\": 5538100,\n # \"hidden_kalenderart\": \"privat\",\n # \"showBinsBio\": \"on\",\n # \"showBinsRest\": \"on\",\n # \"showBinsRest_rc\": \"on\",\n # \"showBinsPapier\": \"on\",\n # \"showBinsOrganic\": \"on\",\n # \"showBinsXmas\": \"on\",\n # \"showBinsDsd\": \"on\",\n # \"showBinsProb\": \"on\",\n # },\n # \"year_field\": \"hiddenYear\",\n # },\n \"Abfall Zollernalbkreis, Ebingen\": {\n \"url\": \"https://www.abfallkalender-zak.de\",\n \"params\": {\n \"city\": \"2,3,4\",\n \"street\": \"3\",\n \"types[]\": [\n \"restmuell\",\n \"gelbersack\",\n \"papiertonne\",\n \"biomuell\",\n \"gruenabfall\",\n \"schadstoffsammlung\",\n \"altpapiersammlung\",\n \"schrottsammlung\",\n \"weihnachtsbaeume\",\n \"elektrosammlung\",\n ],\n \"go_ics\": \"Download\",\n },\n \"year_field\": \"year\",\n },\n \"Detmold\": {\n \"url\": \"https://abfuhrkalender.detmold.de/icsmaker.php\",\n \"method\": \"GET\",\n \"params\": {\"strid\": 338},\n \"year_field\": \"year\",\n },\n \"EAW Rheingau Taunus\": {\n \"url\": \"https://www.eaw-rheingau-taunus.de/abfallkalender/calendar.ics?streetid=1429\",\n \"split_at\": \",\",\n },\n \"Recollect, Ottawa\": {\n \"url\": \"https://recollect.a.ssl.fastly.net/api/places/BCCDF30E-578B-11E4-AD38-5839C200407A/services/208/events.en.ics\",\n \"split_at\": \"\\\\, [and ]*\",\n },\n \"Frankfurt am Main, Achenbachstrasse 3\": {\n \"url\": \"https://www.fes-frankfurt.de/abfallkalender/QWNoZW5iYWNoc3RyLnwzfDYwNTk2.ics\"\n },\n \"Erlensee, Am Haspel\": {\n \"url\": \"https://sperrmuell.erlensee.de/?type=reminder\",\n \"method\": \"POST\",\n \"params\": {\n \"street\": 8,\n \"eventType[]\": [27, 23, 19, 20, 21, 24, 22, 25, 26],\n \"timeframe\": 23,\n \"download\": \"ical\",\n },\n },\n}\n\n\nHEADERS = {\"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64)\"}\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Source:\n def __init__(\n self,\n url=None,\n file=None,\n offset=None,\n params=None,\n year_field=None,\n method=\"GET\",\n split_at=None,\n version=2,\n verify_ssl=True,\n ):\n self._url = url\n self._file = file\n if bool(self._url is not None) == bool(self._file is not None):\n raise RuntimeError(\"Specify either url or file\")\n if version == 1:\n self._ics = ICS_v1(offset=offset, split_at=split_at)\n else:\n self._ics = ICS(offset=offset, split_at=split_at)\n self._params = params\n self._year_field = year_field # replace this field in params with current year\n self._method = method # The method to send the params\n self._verify_ssl = verify_ssl\n\n def fetch(self):\n if self._url is not None:\n if \"{%Y}\" in self._url or self._year_field is not None:\n # url contains wildcard or params contains year field\n now = datetime.datetime.now()\n\n # replace year in url\n url = self._url.replace(\"{%Y}\", str(now.year))\n\n # replace year in params\n if self._year_field is not None:\n if self._params is None:\n raise RuntimeError(\"year_field specified without params\")\n self._params[self._year_field] = str(now.year)\n\n entries = self.fetch_url(url, self._params)\n\n if now.month == 12:\n # also get data for next year if we are already in december\n url = self._url.replace(\"{%Y}\", str(now.year + 1))\n if self._year_field is not None:\n self._params[self._year_field] = str(now.year + 1)\n\n try:\n entries.extend(self.fetch_url(url, self._params))\n except Exception:\n # ignore if fetch for next year fails\n pass\n return entries\n else:\n return self.fetch_url(self._url, self._params)\n elif self._file is not None:\n return self.fetch_file(self._file)\n\n def fetch_url(self, url, params=None):\n # get ics file\n if self._method == \"GET\":\n r = requests.get(\n url, params=params, headers=HEADERS, verify=self._verify_ssl\n )\n elif self._method == \"POST\":\n r = requests.post(\n url, data=params, headers=HEADERS, verify=self._verify_ssl\n )\n else:\n raise RuntimeError(\n \"Error: unknown method to fetch URL, use GET or POST; got {self._method}\"\n )\n r.encoding = \"utf-8\" # requests doesn't guess the encoding correctly\n\n # check the return code\n if not r.ok:\n _LOGGER.error(\n \"Error: the response is not ok; need code 200, but got code %s\"\n % r.status_code\n )\n return []\n\n return self._convert(r.text)\n\n def fetch_file(self, file):\n f = open(file)\n return self._convert(f.read())\n\n def _convert(self, data):\n dates = self._ics.convert(data)\n\n entries = []\n for d in dates:\n entries.append(Collection(d[0], d[1]))\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/ics.py"}]}
3,805
365
gh_patches_debug_38419
rasdani/github-patches
git_diff
PaddlePaddle__models-1540
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- fluid/PaddleNLP/sequence_tagging_for_ner 预测问题 用ner模型在预测的时候发现,给预测数据随意标记的label会影响到最终的预测结果;当把数据减少一些(比如10w)后,这个问题就没有了,数据比较大的时候term的预测结果就会往这个随意标记的类别大量倾斜。。。 定义term的类别只有1和0, 预测是时候任意定义所有label=2,发现用较大预测数据集时候,大部分term的标签被预测为2,数据量较少时候,则是正常的 预测结果正常: ![image](https://user-images.githubusercontent.com/19464821/49934978-c2566d80-ff0a-11e8-8726-091f6b1a7549.png) 预测结果出现问题: ![image](https://user-images.githubusercontent.com/19464821/49934993-cf735c80-ff0a-11e8-8fb0-0a8bb720777c.png) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `fluid/PaddleNLP/sequence_tagging_for_ner/train.py` Content: ``` 1 from __future__ import print_function 2 3 import os 4 import math 5 import time 6 import numpy as np 7 import six 8 9 import paddle 10 import paddle.fluid as fluid 11 12 import reader 13 from network_conf import ner_net 14 from utils import logger, load_dict 15 from utils_extend import to_lodtensor, get_embedding 16 17 18 def test(exe, chunk_evaluator, inference_program, test_data, test_fetch_list, 19 place): 20 chunk_evaluator.reset() 21 for data in test_data(): 22 word = to_lodtensor([x[0] for x in data], place) 23 mark = to_lodtensor([x[1] for x in data], place) 24 target = to_lodtensor([x[2] for x in data], place) 25 rets = exe.run(inference_program, 26 feed={"word": word, 27 "mark": mark, 28 "target": target}, 29 fetch_list=test_fetch_list) 30 num_infer = np.array(rets[0]) 31 num_label = np.array(rets[1]) 32 num_correct = np.array(rets[2]) 33 chunk_evaluator.update(num_infer[0], num_label[0], num_correct[0]) 34 return chunk_evaluator.eval() 35 36 37 def main(train_data_file, 38 test_data_file, 39 vocab_file, 40 target_file, 41 emb_file, 42 model_save_dir, 43 num_passes, 44 use_gpu, 45 parallel, 46 batch_size=200): 47 if not os.path.exists(model_save_dir): 48 os.mkdir(model_save_dir) 49 50 word_dict = load_dict(vocab_file) 51 label_dict = load_dict(target_file) 52 53 word_vector_values = get_embedding(emb_file) 54 55 word_dict_len = len(word_dict) 56 label_dict_len = len(label_dict) 57 58 if "CE_MODE_X" in os.environ: 59 fluid.default_startup_program().random_seed = 110 60 61 avg_cost, feature_out, word, mark, target = ner_net( 62 word_dict_len, label_dict_len, parallel) 63 64 sgd_optimizer = fluid.optimizer.SGD(learning_rate=1e-3) 65 sgd_optimizer.minimize(avg_cost) 66 67 crf_decode = fluid.layers.crf_decoding( 68 input=feature_out, param_attr=fluid.ParamAttr(name='crfw')) 69 70 (precision, recall, f1_score, num_infer_chunks, num_label_chunks, 71 num_correct_chunks) = fluid.layers.chunk_eval( 72 input=crf_decode, 73 label=target, 74 chunk_scheme="IOB", 75 num_chunk_types=int(math.ceil((label_dict_len - 1) / 2.0))) 76 chunk_evaluator = fluid.metrics.ChunkEvaluator() 77 78 inference_program = fluid.default_main_program().clone(for_test=True) 79 test_fetch_list = [num_infer_chunks, num_label_chunks, num_correct_chunks] 80 81 if "CE_MODE_X" not in os.environ: 82 train_reader = paddle.batch( 83 paddle.reader.shuffle( 84 reader.data_reader(train_data_file, word_dict, label_dict), 85 buf_size=20000), 86 batch_size=batch_size) 87 test_reader = paddle.batch( 88 paddle.reader.shuffle( 89 reader.data_reader(test_data_file, word_dict, label_dict), 90 buf_size=20000), 91 batch_size=batch_size) 92 else: 93 train_reader = paddle.batch( 94 reader.data_reader(train_data_file, word_dict, label_dict), 95 batch_size=batch_size) 96 test_reader = paddle.batch( 97 reader.data_reader(test_data_file, word_dict, label_dict), 98 batch_size=batch_size) 99 100 place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() 101 feeder = fluid.DataFeeder(feed_list=[word, mark, target], place=place) 102 exe = fluid.Executor(place) 103 104 exe.run(fluid.default_startup_program()) 105 106 embedding_name = 'emb' 107 embedding_param = fluid.global_scope().find_var(embedding_name).get_tensor() 108 embedding_param.set(word_vector_values, place) 109 110 time_begin = time.time() 111 for pass_id in six.moves.xrange(num_passes): 112 chunk_evaluator.reset() 113 for batch_id, data in enumerate(train_reader()): 114 cost_var, nums_infer, nums_label, nums_correct = exe.run( 115 fluid.default_main_program(), 116 feed=feeder.feed(data), 117 fetch_list=[ 118 avg_cost, num_infer_chunks, num_label_chunks, 119 num_correct_chunks 120 ]) 121 if batch_id % 5 == 0: 122 print("Pass " + str(pass_id) + ", Batch " + str(batch_id) + 123 ", Cost " + str(cost_var[0])) 124 chunk_evaluator.update(nums_infer, nums_label, nums_correct) 125 pass_precision, pass_recall, pass_f1_score = chunk_evaluator.eval() 126 print("[TrainSet] pass_id:" + str(pass_id) + " pass_precision:" + str( 127 pass_precision) + " pass_recall:" + str(pass_recall) + 128 " pass_f1_score:" + str(pass_f1_score)) 129 130 test_pass_precision, test_pass_recall, test_pass_f1_score = test( 131 exe, chunk_evaluator, inference_program, test_reader, 132 test_fetch_list, place) 133 print("[TestSet] pass_id:" + str(pass_id) + " pass_precision:" + str( 134 test_pass_precision) + " pass_recall:" + str(test_pass_recall) + 135 " pass_f1_score:" + str(test_pass_f1_score)) 136 137 save_dirname = os.path.join(model_save_dir, "params_pass_%d" % pass_id) 138 fluid.io.save_inference_model(save_dirname, ['word', 'mark', 'target'], 139 crf_decode, exe) 140 141 if "CE_MODE_X" in os.environ: 142 print("kpis train_precision %f" % pass_precision) 143 print("kpis test_precision %f" % test_pass_precision) 144 print("kpis train_duration %f" % (time.time() - time_begin)) 145 146 147 if __name__ == "__main__": 148 main( 149 train_data_file="data/train", 150 test_data_file="data/test", 151 vocab_file="data/vocab.txt", 152 target_file="data/target.txt", 153 emb_file="data/wordVectors.txt", 154 model_save_dir="models", 155 num_passes=2000, 156 use_gpu=False, 157 parallel=False) 158 ``` Path: `fluid/PaddleNLP/sequence_tagging_for_ner/infer.py` Content: ``` 1 from __future__ import print_function 2 3 import numpy as np 4 import six 5 6 import paddle 7 import paddle.fluid as fluid 8 9 from network_conf import ner_net 10 import reader 11 from utils import load_dict, load_reverse_dict 12 from utils_extend import to_lodtensor 13 14 15 def infer(model_path, batch_size, test_data_file, vocab_file, target_file, 16 use_gpu): 17 """ 18 use the model under model_path to predict the test data, the result will be printed on the screen 19 20 return nothing 21 """ 22 word_dict = load_dict(vocab_file) 23 word_reverse_dict = load_reverse_dict(vocab_file) 24 25 label_dict = load_dict(target_file) 26 label_reverse_dict = load_reverse_dict(target_file) 27 28 test_data = paddle.batch( 29 reader.data_reader(test_data_file, word_dict, label_dict), 30 batch_size=batch_size) 31 place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() 32 exe = fluid.Executor(place) 33 34 inference_scope = fluid.core.Scope() 35 with fluid.scope_guard(inference_scope): 36 [inference_program, feed_target_names, 37 fetch_targets] = fluid.io.load_inference_model(model_path, exe) 38 for data in test_data(): 39 word = to_lodtensor([x[0] for x in data], place) 40 mark = to_lodtensor([x[1] for x in data], place) 41 target = to_lodtensor([x[2] for x in data], place) 42 crf_decode = exe.run( 43 inference_program, 44 feed={"word": word, 45 "mark": mark, 46 "target": target}, 47 fetch_list=fetch_targets, 48 return_numpy=False) 49 lod_info = (crf_decode[0].lod())[0] 50 np_data = np.array(crf_decode[0]) 51 assert len(data) == len(lod_info) - 1 52 for sen_index in six.moves.xrange(len(data)): 53 assert len(data[sen_index][0]) == lod_info[ 54 sen_index + 1] - lod_info[sen_index] 55 word_index = 0 56 for tag_index in six.moves.xrange(lod_info[sen_index], 57 lod_info[sen_index + 1]): 58 word = word_reverse_dict[data[sen_index][0][word_index]] 59 gold_tag = label_reverse_dict[data[sen_index][2][ 60 word_index]] 61 tag = label_reverse_dict[np_data[tag_index][0]] 62 print(word + "\t" + gold_tag + "\t" + tag) 63 word_index += 1 64 print("") 65 66 67 if __name__ == "__main__": 68 infer( 69 model_path="models/params_pass_0", 70 batch_size=6, 71 test_data_file="data/test", 72 vocab_file="data/vocab.txt", 73 target_file="data/target.txt", 74 use_gpu=False) 75 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/fluid/PaddleNLP/sequence_tagging_for_ner/infer.py b/fluid/PaddleNLP/sequence_tagging_for_ner/infer.py --- a/fluid/PaddleNLP/sequence_tagging_for_ner/infer.py +++ b/fluid/PaddleNLP/sequence_tagging_for_ner/infer.py @@ -38,12 +38,10 @@ for data in test_data(): word = to_lodtensor([x[0] for x in data], place) mark = to_lodtensor([x[1] for x in data], place) - target = to_lodtensor([x[2] for x in data], place) crf_decode = exe.run( inference_program, feed={"word": word, - "mark": mark, - "target": target}, + "mark": mark}, fetch_list=fetch_targets, return_numpy=False) lod_info = (crf_decode[0].lod())[0] diff --git a/fluid/PaddleNLP/sequence_tagging_for_ner/train.py b/fluid/PaddleNLP/sequence_tagging_for_ner/train.py --- a/fluid/PaddleNLP/sequence_tagging_for_ner/train.py +++ b/fluid/PaddleNLP/sequence_tagging_for_ner/train.py @@ -61,22 +61,21 @@ avg_cost, feature_out, word, mark, target = ner_net( word_dict_len, label_dict_len, parallel) - sgd_optimizer = fluid.optimizer.SGD(learning_rate=1e-3) - sgd_optimizer.minimize(avg_cost) - crf_decode = fluid.layers.crf_decoding( input=feature_out, param_attr=fluid.ParamAttr(name='crfw')) (precision, recall, f1_score, num_infer_chunks, num_label_chunks, - num_correct_chunks) = fluid.layers.chunk_eval( - input=crf_decode, - label=target, - chunk_scheme="IOB", - num_chunk_types=int(math.ceil((label_dict_len - 1) / 2.0))) + num_correct_chunks) = fluid.layers.chunk_eval( + input=crf_decode, + label=target, + chunk_scheme="IOB", + num_chunk_types=int(math.ceil((label_dict_len - 1) / 2.0))) chunk_evaluator = fluid.metrics.ChunkEvaluator() inference_program = fluid.default_main_program().clone(for_test=True) test_fetch_list = [num_infer_chunks, num_label_chunks, num_correct_chunks] + sgd_optimizer = fluid.optimizer.SGD(learning_rate=1e-3) + sgd_optimizer.minimize(avg_cost) if "CE_MODE_X" not in os.environ: train_reader = paddle.batch( @@ -135,7 +134,7 @@ " pass_f1_score:" + str(test_pass_f1_score)) save_dirname = os.path.join(model_save_dir, "params_pass_%d" % pass_id) - fluid.io.save_inference_model(save_dirname, ['word', 'mark', 'target'], + fluid.io.save_inference_model(save_dirname, ['word', 'mark'], crf_decode, exe) if "CE_MODE_X" in os.environ:
{"golden_diff": "diff --git a/fluid/PaddleNLP/sequence_tagging_for_ner/infer.py b/fluid/PaddleNLP/sequence_tagging_for_ner/infer.py\n--- a/fluid/PaddleNLP/sequence_tagging_for_ner/infer.py\n+++ b/fluid/PaddleNLP/sequence_tagging_for_ner/infer.py\n@@ -38,12 +38,10 @@\n for data in test_data():\n word = to_lodtensor([x[0] for x in data], place)\n mark = to_lodtensor([x[1] for x in data], place)\n- target = to_lodtensor([x[2] for x in data], place)\n crf_decode = exe.run(\n inference_program,\n feed={\"word\": word,\n- \"mark\": mark,\n- \"target\": target},\n+ \"mark\": mark},\n fetch_list=fetch_targets,\n return_numpy=False)\n lod_info = (crf_decode[0].lod())[0]\ndiff --git a/fluid/PaddleNLP/sequence_tagging_for_ner/train.py b/fluid/PaddleNLP/sequence_tagging_for_ner/train.py\n--- a/fluid/PaddleNLP/sequence_tagging_for_ner/train.py\n+++ b/fluid/PaddleNLP/sequence_tagging_for_ner/train.py\n@@ -61,22 +61,21 @@\n avg_cost, feature_out, word, mark, target = ner_net(\n word_dict_len, label_dict_len, parallel)\n \n- sgd_optimizer = fluid.optimizer.SGD(learning_rate=1e-3)\n- sgd_optimizer.minimize(avg_cost)\n-\n crf_decode = fluid.layers.crf_decoding(\n input=feature_out, param_attr=fluid.ParamAttr(name='crfw'))\n \n (precision, recall, f1_score, num_infer_chunks, num_label_chunks,\n- num_correct_chunks) = fluid.layers.chunk_eval(\n- input=crf_decode,\n- label=target,\n- chunk_scheme=\"IOB\",\n- num_chunk_types=int(math.ceil((label_dict_len - 1) / 2.0)))\n+ num_correct_chunks) = fluid.layers.chunk_eval(\n+ input=crf_decode,\n+ label=target,\n+ chunk_scheme=\"IOB\",\n+ num_chunk_types=int(math.ceil((label_dict_len - 1) / 2.0)))\n chunk_evaluator = fluid.metrics.ChunkEvaluator()\n \n inference_program = fluid.default_main_program().clone(for_test=True)\n test_fetch_list = [num_infer_chunks, num_label_chunks, num_correct_chunks]\n+ sgd_optimizer = fluid.optimizer.SGD(learning_rate=1e-3)\n+ sgd_optimizer.minimize(avg_cost)\n \n if \"CE_MODE_X\" not in os.environ:\n train_reader = paddle.batch(\n@@ -135,7 +134,7 @@\n \" pass_f1_score:\" + str(test_pass_f1_score))\n \n save_dirname = os.path.join(model_save_dir, \"params_pass_%d\" % pass_id)\n- fluid.io.save_inference_model(save_dirname, ['word', 'mark', 'target'],\n+ fluid.io.save_inference_model(save_dirname, ['word', 'mark'],\n crf_decode, exe)\n \n if \"CE_MODE_X\" in os.environ:\n", "issue": "fluid/PaddleNLP/sequence_tagging_for_ner \u9884\u6d4b\u95ee\u9898\n\u7528ner\u6a21\u578b\u5728\u9884\u6d4b\u7684\u65f6\u5019\u53d1\u73b0\uff0c\u7ed9\u9884\u6d4b\u6570\u636e\u968f\u610f\u6807\u8bb0\u7684label\u4f1a\u5f71\u54cd\u5230\u6700\u7ec8\u7684\u9884\u6d4b\u7ed3\u679c\uff1b\u5f53\u628a\u6570\u636e\u51cf\u5c11\u4e00\u4e9b(\u6bd4\u598210w)\u540e\uff0c\u8fd9\u4e2a\u95ee\u9898\u5c31\u6ca1\u6709\u4e86\uff0c\u6570\u636e\u6bd4\u8f83\u5927\u7684\u65f6\u5019term\u7684\u9884\u6d4b\u7ed3\u679c\u5c31\u4f1a\u5f80\u8fd9\u4e2a\u968f\u610f\u6807\u8bb0\u7684\u7c7b\u522b\u5927\u91cf\u503e\u659c\u3002\u3002\u3002\r\n\r\n\u5b9a\u4e49term\u7684\u7c7b\u522b\u53ea\u67091\u548c0\uff0c \u9884\u6d4b\u662f\u65f6\u5019\u4efb\u610f\u5b9a\u4e49\u6240\u6709label=2\uff0c\u53d1\u73b0\u7528\u8f83\u5927\u9884\u6d4b\u6570\u636e\u96c6\u65f6\u5019\uff0c\u5927\u90e8\u5206term\u7684\u6807\u7b7e\u88ab\u9884\u6d4b\u4e3a2\uff0c\u6570\u636e\u91cf\u8f83\u5c11\u65f6\u5019\uff0c\u5219\u662f\u6b63\u5e38\u7684\r\n\r\n\u9884\u6d4b\u7ed3\u679c\u6b63\u5e38\uff1a\r\n![image](https://user-images.githubusercontent.com/19464821/49934978-c2566d80-ff0a-11e8-8726-091f6b1a7549.png)\r\n\r\n\u9884\u6d4b\u7ed3\u679c\u51fa\u73b0\u95ee\u9898\uff1a\r\n![image](https://user-images.githubusercontent.com/19464821/49934993-cf735c80-ff0a-11e8-8fb0-0a8bb720777c.png)\r\n\n", "before_files": [{"content": "from __future__ import print_function\n\nimport os\nimport math\nimport time\nimport numpy as np\nimport six\n\nimport paddle\nimport paddle.fluid as fluid\n\nimport reader\nfrom network_conf import ner_net\nfrom utils import logger, load_dict\nfrom utils_extend import to_lodtensor, get_embedding\n\n\ndef test(exe, chunk_evaluator, inference_program, test_data, test_fetch_list,\n place):\n chunk_evaluator.reset()\n for data in test_data():\n word = to_lodtensor([x[0] for x in data], place)\n mark = to_lodtensor([x[1] for x in data], place)\n target = to_lodtensor([x[2] for x in data], place)\n rets = exe.run(inference_program,\n feed={\"word\": word,\n \"mark\": mark,\n \"target\": target},\n fetch_list=test_fetch_list)\n num_infer = np.array(rets[0])\n num_label = np.array(rets[1])\n num_correct = np.array(rets[2])\n chunk_evaluator.update(num_infer[0], num_label[0], num_correct[0])\n return chunk_evaluator.eval()\n\n\ndef main(train_data_file,\n test_data_file,\n vocab_file,\n target_file,\n emb_file,\n model_save_dir,\n num_passes,\n use_gpu,\n parallel,\n batch_size=200):\n if not os.path.exists(model_save_dir):\n os.mkdir(model_save_dir)\n\n word_dict = load_dict(vocab_file)\n label_dict = load_dict(target_file)\n\n word_vector_values = get_embedding(emb_file)\n\n word_dict_len = len(word_dict)\n label_dict_len = len(label_dict)\n\n if \"CE_MODE_X\" in os.environ:\n fluid.default_startup_program().random_seed = 110\n\n avg_cost, feature_out, word, mark, target = ner_net(\n word_dict_len, label_dict_len, parallel)\n\n sgd_optimizer = fluid.optimizer.SGD(learning_rate=1e-3)\n sgd_optimizer.minimize(avg_cost)\n\n crf_decode = fluid.layers.crf_decoding(\n input=feature_out, param_attr=fluid.ParamAttr(name='crfw'))\n\n (precision, recall, f1_score, num_infer_chunks, num_label_chunks,\n num_correct_chunks) = fluid.layers.chunk_eval(\n input=crf_decode,\n label=target,\n chunk_scheme=\"IOB\",\n num_chunk_types=int(math.ceil((label_dict_len - 1) / 2.0)))\n chunk_evaluator = fluid.metrics.ChunkEvaluator()\n\n inference_program = fluid.default_main_program().clone(for_test=True)\n test_fetch_list = [num_infer_chunks, num_label_chunks, num_correct_chunks]\n\n if \"CE_MODE_X\" not in os.environ:\n train_reader = paddle.batch(\n paddle.reader.shuffle(\n reader.data_reader(train_data_file, word_dict, label_dict),\n buf_size=20000),\n batch_size=batch_size)\n test_reader = paddle.batch(\n paddle.reader.shuffle(\n reader.data_reader(test_data_file, word_dict, label_dict),\n buf_size=20000),\n batch_size=batch_size)\n else:\n train_reader = paddle.batch(\n reader.data_reader(train_data_file, word_dict, label_dict),\n batch_size=batch_size)\n test_reader = paddle.batch(\n reader.data_reader(test_data_file, word_dict, label_dict),\n batch_size=batch_size)\n\n place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()\n feeder = fluid.DataFeeder(feed_list=[word, mark, target], place=place)\n exe = fluid.Executor(place)\n\n exe.run(fluid.default_startup_program())\n\n embedding_name = 'emb'\n embedding_param = fluid.global_scope().find_var(embedding_name).get_tensor()\n embedding_param.set(word_vector_values, place)\n\n time_begin = time.time()\n for pass_id in six.moves.xrange(num_passes):\n chunk_evaluator.reset()\n for batch_id, data in enumerate(train_reader()):\n cost_var, nums_infer, nums_label, nums_correct = exe.run(\n fluid.default_main_program(),\n feed=feeder.feed(data),\n fetch_list=[\n avg_cost, num_infer_chunks, num_label_chunks,\n num_correct_chunks\n ])\n if batch_id % 5 == 0:\n print(\"Pass \" + str(pass_id) + \", Batch \" + str(batch_id) +\n \", Cost \" + str(cost_var[0]))\n chunk_evaluator.update(nums_infer, nums_label, nums_correct)\n pass_precision, pass_recall, pass_f1_score = chunk_evaluator.eval()\n print(\"[TrainSet] pass_id:\" + str(pass_id) + \" pass_precision:\" + str(\n pass_precision) + \" pass_recall:\" + str(pass_recall) +\n \" pass_f1_score:\" + str(pass_f1_score))\n\n test_pass_precision, test_pass_recall, test_pass_f1_score = test(\n exe, chunk_evaluator, inference_program, test_reader,\n test_fetch_list, place)\n print(\"[TestSet] pass_id:\" + str(pass_id) + \" pass_precision:\" + str(\n test_pass_precision) + \" pass_recall:\" + str(test_pass_recall) +\n \" pass_f1_score:\" + str(test_pass_f1_score))\n\n save_dirname = os.path.join(model_save_dir, \"params_pass_%d\" % pass_id)\n fluid.io.save_inference_model(save_dirname, ['word', 'mark', 'target'],\n crf_decode, exe)\n\n if \"CE_MODE_X\" in os.environ:\n print(\"kpis\ttrain_precision\t%f\" % pass_precision)\n print(\"kpis\ttest_precision\t%f\" % test_pass_precision)\n print(\"kpis\ttrain_duration\t%f\" % (time.time() - time_begin))\n\n\nif __name__ == \"__main__\":\n main(\n train_data_file=\"data/train\",\n test_data_file=\"data/test\",\n vocab_file=\"data/vocab.txt\",\n target_file=\"data/target.txt\",\n emb_file=\"data/wordVectors.txt\",\n model_save_dir=\"models\",\n num_passes=2000,\n use_gpu=False,\n parallel=False)\n", "path": "fluid/PaddleNLP/sequence_tagging_for_ner/train.py"}, {"content": "from __future__ import print_function\n\nimport numpy as np\nimport six\n\nimport paddle\nimport paddle.fluid as fluid\n\nfrom network_conf import ner_net\nimport reader\nfrom utils import load_dict, load_reverse_dict\nfrom utils_extend import to_lodtensor\n\n\ndef infer(model_path, batch_size, test_data_file, vocab_file, target_file,\n use_gpu):\n \"\"\"\n use the model under model_path to predict the test data, the result will be printed on the screen\n\n return nothing\n \"\"\"\n word_dict = load_dict(vocab_file)\n word_reverse_dict = load_reverse_dict(vocab_file)\n\n label_dict = load_dict(target_file)\n label_reverse_dict = load_reverse_dict(target_file)\n\n test_data = paddle.batch(\n reader.data_reader(test_data_file, word_dict, label_dict),\n batch_size=batch_size)\n place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()\n exe = fluid.Executor(place)\n\n inference_scope = fluid.core.Scope()\n with fluid.scope_guard(inference_scope):\n [inference_program, feed_target_names,\n fetch_targets] = fluid.io.load_inference_model(model_path, exe)\n for data in test_data():\n word = to_lodtensor([x[0] for x in data], place)\n mark = to_lodtensor([x[1] for x in data], place)\n target = to_lodtensor([x[2] for x in data], place)\n crf_decode = exe.run(\n inference_program,\n feed={\"word\": word,\n \"mark\": mark,\n \"target\": target},\n fetch_list=fetch_targets,\n return_numpy=False)\n lod_info = (crf_decode[0].lod())[0]\n np_data = np.array(crf_decode[0])\n assert len(data) == len(lod_info) - 1\n for sen_index in six.moves.xrange(len(data)):\n assert len(data[sen_index][0]) == lod_info[\n sen_index + 1] - lod_info[sen_index]\n word_index = 0\n for tag_index in six.moves.xrange(lod_info[sen_index],\n lod_info[sen_index + 1]):\n word = word_reverse_dict[data[sen_index][0][word_index]]\n gold_tag = label_reverse_dict[data[sen_index][2][\n word_index]]\n tag = label_reverse_dict[np_data[tag_index][0]]\n print(word + \"\\t\" + gold_tag + \"\\t\" + tag)\n word_index += 1\n print(\"\")\n\n\nif __name__ == \"__main__\":\n infer(\n model_path=\"models/params_pass_0\",\n batch_size=6,\n test_data_file=\"data/test\",\n vocab_file=\"data/vocab.txt\",\n target_file=\"data/target.txt\",\n use_gpu=False)\n", "path": "fluid/PaddleNLP/sequence_tagging_for_ner/infer.py"}], "after_files": [{"content": "from __future__ import print_function\n\nimport os\nimport math\nimport time\nimport numpy as np\nimport six\n\nimport paddle\nimport paddle.fluid as fluid\n\nimport reader\nfrom network_conf import ner_net\nfrom utils import logger, load_dict\nfrom utils_extend import to_lodtensor, get_embedding\n\n\ndef test(exe, chunk_evaluator, inference_program, test_data, test_fetch_list,\n place):\n chunk_evaluator.reset()\n for data in test_data():\n word = to_lodtensor([x[0] for x in data], place)\n mark = to_lodtensor([x[1] for x in data], place)\n target = to_lodtensor([x[2] for x in data], place)\n rets = exe.run(inference_program,\n feed={\"word\": word,\n \"mark\": mark,\n \"target\": target},\n fetch_list=test_fetch_list)\n num_infer = np.array(rets[0])\n num_label = np.array(rets[1])\n num_correct = np.array(rets[2])\n chunk_evaluator.update(num_infer[0], num_label[0], num_correct[0])\n return chunk_evaluator.eval()\n\n\ndef main(train_data_file,\n test_data_file,\n vocab_file,\n target_file,\n emb_file,\n model_save_dir,\n num_passes,\n use_gpu,\n parallel,\n batch_size=200):\n if not os.path.exists(model_save_dir):\n os.mkdir(model_save_dir)\n\n word_dict = load_dict(vocab_file)\n label_dict = load_dict(target_file)\n\n word_vector_values = get_embedding(emb_file)\n\n word_dict_len = len(word_dict)\n label_dict_len = len(label_dict)\n\n if \"CE_MODE_X\" in os.environ:\n fluid.default_startup_program().random_seed = 110\n\n avg_cost, feature_out, word, mark, target = ner_net(\n word_dict_len, label_dict_len, parallel)\n\n crf_decode = fluid.layers.crf_decoding(\n input=feature_out, param_attr=fluid.ParamAttr(name='crfw'))\n\n (precision, recall, f1_score, num_infer_chunks, num_label_chunks,\n num_correct_chunks) = fluid.layers.chunk_eval(\n input=crf_decode,\n label=target,\n chunk_scheme=\"IOB\",\n num_chunk_types=int(math.ceil((label_dict_len - 1) / 2.0)))\n chunk_evaluator = fluid.metrics.ChunkEvaluator()\n\n inference_program = fluid.default_main_program().clone(for_test=True)\n test_fetch_list = [num_infer_chunks, num_label_chunks, num_correct_chunks]\n sgd_optimizer = fluid.optimizer.SGD(learning_rate=1e-3)\n sgd_optimizer.minimize(avg_cost)\n\n if \"CE_MODE_X\" not in os.environ:\n train_reader = paddle.batch(\n paddle.reader.shuffle(\n reader.data_reader(train_data_file, word_dict, label_dict),\n buf_size=20000),\n batch_size=batch_size)\n test_reader = paddle.batch(\n paddle.reader.shuffle(\n reader.data_reader(test_data_file, word_dict, label_dict),\n buf_size=20000),\n batch_size=batch_size)\n else:\n train_reader = paddle.batch(\n reader.data_reader(train_data_file, word_dict, label_dict),\n batch_size=batch_size)\n test_reader = paddle.batch(\n reader.data_reader(test_data_file, word_dict, label_dict),\n batch_size=batch_size)\n\n place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()\n feeder = fluid.DataFeeder(feed_list=[word, mark, target], place=place)\n exe = fluid.Executor(place)\n\n exe.run(fluid.default_startup_program())\n\n embedding_name = 'emb'\n embedding_param = fluid.global_scope().find_var(embedding_name).get_tensor()\n embedding_param.set(word_vector_values, place)\n\n time_begin = time.time()\n for pass_id in six.moves.xrange(num_passes):\n chunk_evaluator.reset()\n for batch_id, data in enumerate(train_reader()):\n cost_var, nums_infer, nums_label, nums_correct = exe.run(\n fluid.default_main_program(),\n feed=feeder.feed(data),\n fetch_list=[\n avg_cost, num_infer_chunks, num_label_chunks,\n num_correct_chunks\n ])\n if batch_id % 5 == 0:\n print(\"Pass \" + str(pass_id) + \", Batch \" + str(batch_id) +\n \", Cost \" + str(cost_var[0]))\n chunk_evaluator.update(nums_infer, nums_label, nums_correct)\n pass_precision, pass_recall, pass_f1_score = chunk_evaluator.eval()\n print(\"[TrainSet] pass_id:\" + str(pass_id) + \" pass_precision:\" + str(\n pass_precision) + \" pass_recall:\" + str(pass_recall) +\n \" pass_f1_score:\" + str(pass_f1_score))\n\n test_pass_precision, test_pass_recall, test_pass_f1_score = test(\n exe, chunk_evaluator, inference_program, test_reader,\n test_fetch_list, place)\n print(\"[TestSet] pass_id:\" + str(pass_id) + \" pass_precision:\" + str(\n test_pass_precision) + \" pass_recall:\" + str(test_pass_recall) +\n \" pass_f1_score:\" + str(test_pass_f1_score))\n\n save_dirname = os.path.join(model_save_dir, \"params_pass_%d\" % pass_id)\n fluid.io.save_inference_model(save_dirname, ['word', 'mark'],\n crf_decode, exe)\n\n if \"CE_MODE_X\" in os.environ:\n print(\"kpis\ttrain_precision\t%f\" % pass_precision)\n print(\"kpis\ttest_precision\t%f\" % test_pass_precision)\n print(\"kpis\ttrain_duration\t%f\" % (time.time() - time_begin))\n\n\nif __name__ == \"__main__\":\n main(\n train_data_file=\"data/train\",\n test_data_file=\"data/test\",\n vocab_file=\"data/vocab.txt\",\n target_file=\"data/target.txt\",\n emb_file=\"data/wordVectors.txt\",\n model_save_dir=\"models\",\n num_passes=2000,\n use_gpu=False,\n parallel=False)\n", "path": "fluid/PaddleNLP/sequence_tagging_for_ner/train.py"}, {"content": "from __future__ import print_function\n\nimport numpy as np\nimport six\n\nimport paddle\nimport paddle.fluid as fluid\n\nfrom network_conf import ner_net\nimport reader\nfrom utils import load_dict, load_reverse_dict\nfrom utils_extend import to_lodtensor\n\n\ndef infer(model_path, batch_size, test_data_file, vocab_file, target_file,\n use_gpu):\n \"\"\"\n use the model under model_path to predict the test data, the result will be printed on the screen\n\n return nothing\n \"\"\"\n word_dict = load_dict(vocab_file)\n word_reverse_dict = load_reverse_dict(vocab_file)\n\n label_dict = load_dict(target_file)\n label_reverse_dict = load_reverse_dict(target_file)\n\n test_data = paddle.batch(\n reader.data_reader(test_data_file, word_dict, label_dict),\n batch_size=batch_size)\n place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()\n exe = fluid.Executor(place)\n\n inference_scope = fluid.core.Scope()\n with fluid.scope_guard(inference_scope):\n [inference_program, feed_target_names,\n fetch_targets] = fluid.io.load_inference_model(model_path, exe)\n for data in test_data():\n word = to_lodtensor([x[0] for x in data], place)\n mark = to_lodtensor([x[1] for x in data], place)\n crf_decode = exe.run(\n inference_program,\n feed={\"word\": word,\n \"mark\": mark},\n fetch_list=fetch_targets,\n return_numpy=False)\n lod_info = (crf_decode[0].lod())[0]\n np_data = np.array(crf_decode[0])\n assert len(data) == len(lod_info) - 1\n for sen_index in six.moves.xrange(len(data)):\n assert len(data[sen_index][0]) == lod_info[\n sen_index + 1] - lod_info[sen_index]\n word_index = 0\n for tag_index in six.moves.xrange(lod_info[sen_index],\n lod_info[sen_index + 1]):\n word = word_reverse_dict[data[sen_index][0][word_index]]\n gold_tag = label_reverse_dict[data[sen_index][2][\n word_index]]\n tag = label_reverse_dict[np_data[tag_index][0]]\n print(word + \"\\t\" + gold_tag + \"\\t\" + tag)\n word_index += 1\n print(\"\")\n\n\nif __name__ == \"__main__\":\n infer(\n model_path=\"models/params_pass_0\",\n batch_size=6,\n test_data_file=\"data/test\",\n vocab_file=\"data/vocab.txt\",\n target_file=\"data/target.txt\",\n use_gpu=False)\n", "path": "fluid/PaddleNLP/sequence_tagging_for_ner/infer.py"}]}
3,027
729
gh_patches_debug_21223
rasdani/github-patches
git_diff
DataDog__dd-trace-py-2060
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add dogpile.cache to support libraries We need to update https://github.com/DataDog/dd-trace-py/blob/master/docs/index.rst after #1123 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ddtrace/contrib/dogpile_cache/__init__.py` Content: ``` 1 """ 2 Instrument dogpile.cache__ to report all cached lookups. 3 4 This will add spans around the calls to your cache backend (eg. redis, memory, 5 etc). The spans will also include the following tags: 6 7 - key/keys: The key(s) dogpile passed to your backend. Note that this will be 8 the output of the region's ``function_key_generator``, but before any key 9 mangling is applied (ie. the region's ``key_mangler``). 10 - region: Name of the region. 11 - backend: Name of the backend class. 12 - hit: If the key was found in the cache. 13 - expired: If the key is expired. This is only relevant if the key was found. 14 15 While cache tracing will generally already have keys in tags, some caching 16 setups will not have useful tag values - such as when you're using consistent 17 hashing with memcached - the key(s) will appear as a mangled hash. 18 :: 19 20 # Patch before importing dogpile.cache 21 from ddtrace import patch 22 patch(dogpile_cache=True) 23 24 from dogpile.cache import make_region 25 26 region = make_region().configure( 27 "dogpile.cache.pylibmc", 28 expiration_time=3600, 29 arguments={"url": ["127.0.0.1"]}, 30 ) 31 32 @region.cache_on_arguments() 33 def hello(name): 34 # Some complicated, slow calculation 35 return "Hello, {}".format(name) 36 37 .. __: https://dogpilecache.sqlalchemy.org/ 38 """ 39 from ...utils.importlib import require_modules 40 41 42 required_modules = ["dogpile.cache"] 43 44 with require_modules(required_modules) as missing_modules: 45 if not missing_modules: 46 from .patch import patch 47 from .patch import unpatch 48 49 __all__ = ["patch", "unpatch"] 50 ``` Path: `ddtrace/monkey.py` Content: ``` 1 """Patch libraries to be automatically instrumented. 2 3 It can monkey patch supported standard libraries and third party modules. 4 A patched module will automatically report spans with its default configuration. 5 6 A library instrumentation can be configured (for instance, to report as another service) 7 using Pin. For that, check its documentation. 8 """ 9 import importlib 10 import os 11 import sys 12 import threading 13 14 from ddtrace.vendor.wrapt.importer import when_imported 15 16 from .internal.logger import get_logger 17 from .settings import config 18 from .utils import formats 19 20 21 log = get_logger(__name__) 22 23 # Default set of modules to automatically patch or not 24 PATCH_MODULES = { 25 "asyncio": True, 26 "boto": True, 27 "botocore": True, 28 "bottle": False, 29 "cassandra": True, 30 "celery": True, 31 "consul": True, 32 "django": True, 33 "elasticsearch": True, 34 "algoliasearch": True, 35 "futures": False, # experimental propagation 36 "grpc": True, 37 "mongoengine": True, 38 "mysql": True, 39 "mysqldb": True, 40 "pymysql": True, 41 "psycopg": True, 42 "pylibmc": True, 43 "pymemcache": True, 44 "pymongo": True, 45 "redis": True, 46 "rediscluster": True, 47 "requests": True, 48 "sanic": True, 49 "sqlalchemy": False, # Prefer DB client instrumentation 50 "sqlite3": True, 51 "aiohttp": True, # requires asyncio (Python 3.4+) 52 "aiopg": True, 53 "aiobotocore": False, 54 "httplib": False, 55 "vertica": True, 56 "molten": True, 57 "jinja2": True, 58 "mako": True, 59 "flask": True, 60 "kombu": False, 61 "starlette": True, 62 # Ignore some web framework integrations that might be configured explicitly in code 63 "falcon": False, 64 "pylons": False, 65 "pyramid": False, 66 # Auto-enable logging if the environment variable DD_LOGS_INJECTION is true 67 "logging": config.logs_injection, 68 "pynamodb": True, 69 "pyodbc": True, 70 "fastapi": True, 71 } 72 73 _LOCK = threading.Lock() 74 _PATCHED_MODULES = set() 75 76 # Modules which are patched on first use 77 # DEV: These modules are patched when the user first imports them, rather than 78 # explicitly importing and patching them on application startup `ddtrace.patch_all(module=True)` 79 # DEV: This ensures we do not patch a module until it is needed 80 # DEV: <contrib name> => <list of module names that trigger a patch> 81 _PATCH_ON_IMPORT = { 82 "aiohttp": ("aiohttp",), 83 "aiobotocore": ("aiobotocore",), 84 "celery": ("celery",), 85 "flask": ("flask, "), 86 "gevent": ("gevent",), 87 "requests": ("requests",), 88 "botocore": ("botocore",), 89 "elasticsearch": ("elasticsearch",), 90 "pynamodb": ("pynamodb",), 91 } 92 93 94 class PatchException(Exception): 95 """Wraps regular `Exception` class when patching modules""" 96 97 pass 98 99 100 class ModuleNotFoundException(PatchException): 101 pass 102 103 104 def _on_import_factory(module, raise_errors=True): 105 """Factory to create an import hook for the provided module name""" 106 107 def on_import(hook): 108 # Import and patch module 109 path = "ddtrace.contrib.%s" % module 110 imported_module = importlib.import_module(path) 111 imported_module.patch() 112 113 return on_import 114 115 116 def patch_all(**patch_modules): 117 """Automatically patches all available modules. 118 119 In addition to ``patch_modules``, an override can be specified via an 120 environment variable, ``DD_TRACE_<module>_ENABLED`` for each module. 121 122 ``patch_modules`` have the highest precedence for overriding. 123 124 :param dict patch_modules: Override whether particular modules are patched or not. 125 126 >>> patch_all(redis=False, cassandra=False) 127 """ 128 modules = PATCH_MODULES.copy() 129 130 # The enabled setting can be overridden by environment variables 131 for module, enabled in modules.items(): 132 env_var = "DD_TRACE_%s_ENABLED" % module.upper() 133 if env_var not in os.environ: 134 continue 135 136 override_enabled = formats.asbool(os.environ[env_var]) 137 modules[module] = override_enabled 138 139 # Arguments take precedence over the environment and the defaults. 140 modules.update(patch_modules) 141 142 patch(raise_errors=False, **modules) 143 144 145 def patch(raise_errors=True, **patch_modules): 146 """Patch only a set of given modules. 147 148 :param bool raise_errors: Raise error if one patch fail. 149 :param dict patch_modules: List of modules to patch. 150 151 >>> patch(psycopg=True, elasticsearch=True) 152 """ 153 modules = [m for (m, should_patch) in patch_modules.items() if should_patch] 154 for module in modules: 155 if module in _PATCH_ON_IMPORT: 156 # If the module has already been imported then patch immediately 157 if module in sys.modules: 158 patch_module(module, raise_errors=raise_errors) 159 160 # Otherwise, add a hook to patch when it is imported for the first time 161 else: 162 # Use factory to create handler to close over `module` and `raise_errors` values from this loop 163 when_imported(module)(_on_import_factory(module, raise_errors)) 164 165 # manually add module to patched modules 166 with _LOCK: 167 _PATCHED_MODULES.add(module) 168 else: 169 patch_module(module, raise_errors=raise_errors) 170 171 patched_modules = get_patched_modules() 172 log.info( 173 "patched %s/%s modules (%s)", 174 len(patched_modules), 175 len(modules), 176 ",".join(patched_modules), 177 ) 178 179 180 def patch_module(module, raise_errors=True): 181 """Patch a single module 182 183 Returns if the module got properly patched. 184 """ 185 try: 186 return _patch_module(module) 187 except ModuleNotFoundException: 188 if raise_errors: 189 raise 190 return False 191 except Exception: 192 if raise_errors: 193 raise 194 log.debug("failed to patch %s", module, exc_info=True) 195 return False 196 197 198 def get_patched_modules(): 199 """Get the list of patched modules""" 200 with _LOCK: 201 return sorted(_PATCHED_MODULES) 202 203 204 def _patch_module(module): 205 """_patch_module will attempt to monkey patch the module. 206 207 Returns if the module got patched. 208 Can also raise errors if it fails. 209 """ 210 path = "ddtrace.contrib.%s" % module 211 with _LOCK: 212 if module in _PATCHED_MODULES and module not in _PATCH_ON_IMPORT: 213 log.debug("already patched: %s", path) 214 return False 215 216 try: 217 imported_module = importlib.import_module(path) 218 except ImportError: 219 # if the import fails, the integration is not available 220 raise PatchException("integration '%s' not available" % path) 221 else: 222 # if patch() is not available in the module, it means 223 # that the library is not installed in the environment 224 if not hasattr(imported_module, "patch"): 225 raise ModuleNotFoundException("module '%s' not installed" % module) 226 227 imported_module.patch() 228 _PATCHED_MODULES.add(module) 229 return True 230 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ddtrace/contrib/dogpile_cache/__init__.py b/ddtrace/contrib/dogpile_cache/__init__.py --- a/ddtrace/contrib/dogpile_cache/__init__.py +++ b/ddtrace/contrib/dogpile_cache/__init__.py @@ -1,12 +1,12 @@ """ Instrument dogpile.cache__ to report all cached lookups. -This will add spans around the calls to your cache backend (eg. redis, memory, +This will add spans around the calls to your cache backend (e.g. redis, memory, etc). The spans will also include the following tags: - key/keys: The key(s) dogpile passed to your backend. Note that this will be the output of the region's ``function_key_generator``, but before any key - mangling is applied (ie. the region's ``key_mangler``). + mangling is applied (i.e. the region's ``key_mangler``). - region: Name of the region. - backend: Name of the backend class. - hit: If the key was found in the cache. diff --git a/ddtrace/monkey.py b/ddtrace/monkey.py --- a/ddtrace/monkey.py +++ b/ddtrace/monkey.py @@ -68,6 +68,7 @@ "pynamodb": True, "pyodbc": True, "fastapi": True, + "dogpile_cache": True, } _LOCK = threading.Lock()
{"golden_diff": "diff --git a/ddtrace/contrib/dogpile_cache/__init__.py b/ddtrace/contrib/dogpile_cache/__init__.py\n--- a/ddtrace/contrib/dogpile_cache/__init__.py\n+++ b/ddtrace/contrib/dogpile_cache/__init__.py\n@@ -1,12 +1,12 @@\n \"\"\"\n Instrument dogpile.cache__ to report all cached lookups.\n \n-This will add spans around the calls to your cache backend (eg. redis, memory,\n+This will add spans around the calls to your cache backend (e.g. redis, memory,\n etc). The spans will also include the following tags:\n \n - key/keys: The key(s) dogpile passed to your backend. Note that this will be\n the output of the region's ``function_key_generator``, but before any key\n- mangling is applied (ie. the region's ``key_mangler``).\n+ mangling is applied (i.e. the region's ``key_mangler``).\n - region: Name of the region.\n - backend: Name of the backend class.\n - hit: If the key was found in the cache.\ndiff --git a/ddtrace/monkey.py b/ddtrace/monkey.py\n--- a/ddtrace/monkey.py\n+++ b/ddtrace/monkey.py\n@@ -68,6 +68,7 @@\n \"pynamodb\": True,\n \"pyodbc\": True,\n \"fastapi\": True,\n+ \"dogpile_cache\": True,\n }\n \n _LOCK = threading.Lock()\n", "issue": "Add dogpile.cache to support libraries\nWe need to update https://github.com/DataDog/dd-trace-py/blob/master/docs/index.rst after #1123 \n", "before_files": [{"content": "\"\"\"\nInstrument dogpile.cache__ to report all cached lookups.\n\nThis will add spans around the calls to your cache backend (eg. redis, memory,\netc). The spans will also include the following tags:\n\n- key/keys: The key(s) dogpile passed to your backend. Note that this will be\n the output of the region's ``function_key_generator``, but before any key\n mangling is applied (ie. the region's ``key_mangler``).\n- region: Name of the region.\n- backend: Name of the backend class.\n- hit: If the key was found in the cache.\n- expired: If the key is expired. This is only relevant if the key was found.\n\nWhile cache tracing will generally already have keys in tags, some caching\nsetups will not have useful tag values - such as when you're using consistent\nhashing with memcached - the key(s) will appear as a mangled hash.\n::\n\n # Patch before importing dogpile.cache\n from ddtrace import patch\n patch(dogpile_cache=True)\n\n from dogpile.cache import make_region\n\n region = make_region().configure(\n \"dogpile.cache.pylibmc\",\n expiration_time=3600,\n arguments={\"url\": [\"127.0.0.1\"]},\n )\n\n @region.cache_on_arguments()\n def hello(name):\n # Some complicated, slow calculation\n return \"Hello, {}\".format(name)\n\n.. __: https://dogpilecache.sqlalchemy.org/\n\"\"\"\nfrom ...utils.importlib import require_modules\n\n\nrequired_modules = [\"dogpile.cache\"]\n\nwith require_modules(required_modules) as missing_modules:\n if not missing_modules:\n from .patch import patch\n from .patch import unpatch\n\n __all__ = [\"patch\", \"unpatch\"]\n", "path": "ddtrace/contrib/dogpile_cache/__init__.py"}, {"content": "\"\"\"Patch libraries to be automatically instrumented.\n\nIt can monkey patch supported standard libraries and third party modules.\nA patched module will automatically report spans with its default configuration.\n\nA library instrumentation can be configured (for instance, to report as another service)\nusing Pin. For that, check its documentation.\n\"\"\"\nimport importlib\nimport os\nimport sys\nimport threading\n\nfrom ddtrace.vendor.wrapt.importer import when_imported\n\nfrom .internal.logger import get_logger\nfrom .settings import config\nfrom .utils import formats\n\n\nlog = get_logger(__name__)\n\n# Default set of modules to automatically patch or not\nPATCH_MODULES = {\n \"asyncio\": True,\n \"boto\": True,\n \"botocore\": True,\n \"bottle\": False,\n \"cassandra\": True,\n \"celery\": True,\n \"consul\": True,\n \"django\": True,\n \"elasticsearch\": True,\n \"algoliasearch\": True,\n \"futures\": False, # experimental propagation\n \"grpc\": True,\n \"mongoengine\": True,\n \"mysql\": True,\n \"mysqldb\": True,\n \"pymysql\": True,\n \"psycopg\": True,\n \"pylibmc\": True,\n \"pymemcache\": True,\n \"pymongo\": True,\n \"redis\": True,\n \"rediscluster\": True,\n \"requests\": True,\n \"sanic\": True,\n \"sqlalchemy\": False, # Prefer DB client instrumentation\n \"sqlite3\": True,\n \"aiohttp\": True, # requires asyncio (Python 3.4+)\n \"aiopg\": True,\n \"aiobotocore\": False,\n \"httplib\": False,\n \"vertica\": True,\n \"molten\": True,\n \"jinja2\": True,\n \"mako\": True,\n \"flask\": True,\n \"kombu\": False,\n \"starlette\": True,\n # Ignore some web framework integrations that might be configured explicitly in code\n \"falcon\": False,\n \"pylons\": False,\n \"pyramid\": False,\n # Auto-enable logging if the environment variable DD_LOGS_INJECTION is true\n \"logging\": config.logs_injection,\n \"pynamodb\": True,\n \"pyodbc\": True,\n \"fastapi\": True,\n}\n\n_LOCK = threading.Lock()\n_PATCHED_MODULES = set()\n\n# Modules which are patched on first use\n# DEV: These modules are patched when the user first imports them, rather than\n# explicitly importing and patching them on application startup `ddtrace.patch_all(module=True)`\n# DEV: This ensures we do not patch a module until it is needed\n# DEV: <contrib name> => <list of module names that trigger a patch>\n_PATCH_ON_IMPORT = {\n \"aiohttp\": (\"aiohttp\",),\n \"aiobotocore\": (\"aiobotocore\",),\n \"celery\": (\"celery\",),\n \"flask\": (\"flask, \"),\n \"gevent\": (\"gevent\",),\n \"requests\": (\"requests\",),\n \"botocore\": (\"botocore\",),\n \"elasticsearch\": (\"elasticsearch\",),\n \"pynamodb\": (\"pynamodb\",),\n}\n\n\nclass PatchException(Exception):\n \"\"\"Wraps regular `Exception` class when patching modules\"\"\"\n\n pass\n\n\nclass ModuleNotFoundException(PatchException):\n pass\n\n\ndef _on_import_factory(module, raise_errors=True):\n \"\"\"Factory to create an import hook for the provided module name\"\"\"\n\n def on_import(hook):\n # Import and patch module\n path = \"ddtrace.contrib.%s\" % module\n imported_module = importlib.import_module(path)\n imported_module.patch()\n\n return on_import\n\n\ndef patch_all(**patch_modules):\n \"\"\"Automatically patches all available modules.\n\n In addition to ``patch_modules``, an override can be specified via an\n environment variable, ``DD_TRACE_<module>_ENABLED`` for each module.\n\n ``patch_modules`` have the highest precedence for overriding.\n\n :param dict patch_modules: Override whether particular modules are patched or not.\n\n >>> patch_all(redis=False, cassandra=False)\n \"\"\"\n modules = PATCH_MODULES.copy()\n\n # The enabled setting can be overridden by environment variables\n for module, enabled in modules.items():\n env_var = \"DD_TRACE_%s_ENABLED\" % module.upper()\n if env_var not in os.environ:\n continue\n\n override_enabled = formats.asbool(os.environ[env_var])\n modules[module] = override_enabled\n\n # Arguments take precedence over the environment and the defaults.\n modules.update(patch_modules)\n\n patch(raise_errors=False, **modules)\n\n\ndef patch(raise_errors=True, **patch_modules):\n \"\"\"Patch only a set of given modules.\n\n :param bool raise_errors: Raise error if one patch fail.\n :param dict patch_modules: List of modules to patch.\n\n >>> patch(psycopg=True, elasticsearch=True)\n \"\"\"\n modules = [m for (m, should_patch) in patch_modules.items() if should_patch]\n for module in modules:\n if module in _PATCH_ON_IMPORT:\n # If the module has already been imported then patch immediately\n if module in sys.modules:\n patch_module(module, raise_errors=raise_errors)\n\n # Otherwise, add a hook to patch when it is imported for the first time\n else:\n # Use factory to create handler to close over `module` and `raise_errors` values from this loop\n when_imported(module)(_on_import_factory(module, raise_errors))\n\n # manually add module to patched modules\n with _LOCK:\n _PATCHED_MODULES.add(module)\n else:\n patch_module(module, raise_errors=raise_errors)\n\n patched_modules = get_patched_modules()\n log.info(\n \"patched %s/%s modules (%s)\",\n len(patched_modules),\n len(modules),\n \",\".join(patched_modules),\n )\n\n\ndef patch_module(module, raise_errors=True):\n \"\"\"Patch a single module\n\n Returns if the module got properly patched.\n \"\"\"\n try:\n return _patch_module(module)\n except ModuleNotFoundException:\n if raise_errors:\n raise\n return False\n except Exception:\n if raise_errors:\n raise\n log.debug(\"failed to patch %s\", module, exc_info=True)\n return False\n\n\ndef get_patched_modules():\n \"\"\"Get the list of patched modules\"\"\"\n with _LOCK:\n return sorted(_PATCHED_MODULES)\n\n\ndef _patch_module(module):\n \"\"\"_patch_module will attempt to monkey patch the module.\n\n Returns if the module got patched.\n Can also raise errors if it fails.\n \"\"\"\n path = \"ddtrace.contrib.%s\" % module\n with _LOCK:\n if module in _PATCHED_MODULES and module not in _PATCH_ON_IMPORT:\n log.debug(\"already patched: %s\", path)\n return False\n\n try:\n imported_module = importlib.import_module(path)\n except ImportError:\n # if the import fails, the integration is not available\n raise PatchException(\"integration '%s' not available\" % path)\n else:\n # if patch() is not available in the module, it means\n # that the library is not installed in the environment\n if not hasattr(imported_module, \"patch\"):\n raise ModuleNotFoundException(\"module '%s' not installed\" % module)\n\n imported_module.patch()\n _PATCHED_MODULES.add(module)\n return True\n", "path": "ddtrace/monkey.py"}], "after_files": [{"content": "\"\"\"\nInstrument dogpile.cache__ to report all cached lookups.\n\nThis will add spans around the calls to your cache backend (e.g. redis, memory,\netc). The spans will also include the following tags:\n\n- key/keys: The key(s) dogpile passed to your backend. Note that this will be\n the output of the region's ``function_key_generator``, but before any key\n mangling is applied (i.e. the region's ``key_mangler``).\n- region: Name of the region.\n- backend: Name of the backend class.\n- hit: If the key was found in the cache.\n- expired: If the key is expired. This is only relevant if the key was found.\n\nWhile cache tracing will generally already have keys in tags, some caching\nsetups will not have useful tag values - such as when you're using consistent\nhashing with memcached - the key(s) will appear as a mangled hash.\n::\n\n # Patch before importing dogpile.cache\n from ddtrace import patch\n patch(dogpile_cache=True)\n\n from dogpile.cache import make_region\n\n region = make_region().configure(\n \"dogpile.cache.pylibmc\",\n expiration_time=3600,\n arguments={\"url\": [\"127.0.0.1\"]},\n )\n\n @region.cache_on_arguments()\n def hello(name):\n # Some complicated, slow calculation\n return \"Hello, {}\".format(name)\n\n.. __: https://dogpilecache.sqlalchemy.org/\n\"\"\"\nfrom ...utils.importlib import require_modules\n\n\nrequired_modules = [\"dogpile.cache\"]\n\nwith require_modules(required_modules) as missing_modules:\n if not missing_modules:\n from .patch import patch\n from .patch import unpatch\n\n __all__ = [\"patch\", \"unpatch\"]\n", "path": "ddtrace/contrib/dogpile_cache/__init__.py"}, {"content": "\"\"\"Patch libraries to be automatically instrumented.\n\nIt can monkey patch supported standard libraries and third party modules.\nA patched module will automatically report spans with its default configuration.\n\nA library instrumentation can be configured (for instance, to report as another service)\nusing Pin. For that, check its documentation.\n\"\"\"\nimport importlib\nimport os\nimport sys\nimport threading\n\nfrom ddtrace.vendor.wrapt.importer import when_imported\n\nfrom .internal.logger import get_logger\nfrom .settings import config\nfrom .utils import formats\n\n\nlog = get_logger(__name__)\n\n# Default set of modules to automatically patch or not\nPATCH_MODULES = {\n \"asyncio\": True,\n \"boto\": True,\n \"botocore\": True,\n \"bottle\": False,\n \"cassandra\": True,\n \"celery\": True,\n \"consul\": True,\n \"django\": True,\n \"elasticsearch\": True,\n \"algoliasearch\": True,\n \"futures\": False, # experimental propagation\n \"grpc\": True,\n \"mongoengine\": True,\n \"mysql\": True,\n \"mysqldb\": True,\n \"pymysql\": True,\n \"psycopg\": True,\n \"pylibmc\": True,\n \"pymemcache\": True,\n \"pymongo\": True,\n \"redis\": True,\n \"rediscluster\": True,\n \"requests\": True,\n \"sanic\": True,\n \"sqlalchemy\": False, # Prefer DB client instrumentation\n \"sqlite3\": True,\n \"aiohttp\": True, # requires asyncio (Python 3.4+)\n \"aiopg\": True,\n \"aiobotocore\": False,\n \"httplib\": False,\n \"vertica\": True,\n \"molten\": True,\n \"jinja2\": True,\n \"mako\": True,\n \"flask\": True,\n \"kombu\": False,\n \"starlette\": True,\n # Ignore some web framework integrations that might be configured explicitly in code\n \"falcon\": False,\n \"pylons\": False,\n \"pyramid\": False,\n # Auto-enable logging if the environment variable DD_LOGS_INJECTION is true\n \"logging\": config.logs_injection,\n \"pynamodb\": True,\n \"pyodbc\": True,\n \"fastapi\": True,\n \"dogpile_cache\": True,\n}\n\n_LOCK = threading.Lock()\n_PATCHED_MODULES = set()\n\n# Modules which are patched on first use\n# DEV: These modules are patched when the user first imports them, rather than\n# explicitly importing and patching them on application startup `ddtrace.patch_all(module=True)`\n# DEV: This ensures we do not patch a module until it is needed\n# DEV: <contrib name> => <list of module names that trigger a patch>\n_PATCH_ON_IMPORT = {\n \"aiohttp\": (\"aiohttp\",),\n \"aiobotocore\": (\"aiobotocore\",),\n \"celery\": (\"celery\",),\n \"flask\": (\"flask, \"),\n \"gevent\": (\"gevent\",),\n \"requests\": (\"requests\",),\n \"botocore\": (\"botocore\",),\n \"elasticsearch\": (\"elasticsearch\",),\n \"pynamodb\": (\"pynamodb\",),\n}\n\n\nclass PatchException(Exception):\n \"\"\"Wraps regular `Exception` class when patching modules\"\"\"\n\n pass\n\n\nclass ModuleNotFoundException(PatchException):\n pass\n\n\ndef _on_import_factory(module, raise_errors=True):\n \"\"\"Factory to create an import hook for the provided module name\"\"\"\n\n def on_import(hook):\n # Import and patch module\n path = \"ddtrace.contrib.%s\" % module\n imported_module = importlib.import_module(path)\n imported_module.patch()\n\n return on_import\n\n\ndef patch_all(**patch_modules):\n \"\"\"Automatically patches all available modules.\n\n In addition to ``patch_modules``, an override can be specified via an\n environment variable, ``DD_TRACE_<module>_ENABLED`` for each module.\n\n ``patch_modules`` have the highest precedence for overriding.\n\n :param dict patch_modules: Override whether particular modules are patched or not.\n\n >>> patch_all(redis=False, cassandra=False)\n \"\"\"\n modules = PATCH_MODULES.copy()\n\n # The enabled setting can be overridden by environment variables\n for module, enabled in modules.items():\n env_var = \"DD_TRACE_%s_ENABLED\" % module.upper()\n if env_var not in os.environ:\n continue\n\n override_enabled = formats.asbool(os.environ[env_var])\n modules[module] = override_enabled\n\n # Arguments take precedence over the environment and the defaults.\n modules.update(patch_modules)\n\n patch(raise_errors=False, **modules)\n\n\ndef patch(raise_errors=True, **patch_modules):\n \"\"\"Patch only a set of given modules.\n\n :param bool raise_errors: Raise error if one patch fail.\n :param dict patch_modules: List of modules to patch.\n\n >>> patch(psycopg=True, elasticsearch=True)\n \"\"\"\n modules = [m for (m, should_patch) in patch_modules.items() if should_patch]\n for module in modules:\n if module in _PATCH_ON_IMPORT:\n # If the module has already been imported then patch immediately\n if module in sys.modules:\n patch_module(module, raise_errors=raise_errors)\n\n # Otherwise, add a hook to patch when it is imported for the first time\n else:\n # Use factory to create handler to close over `module` and `raise_errors` values from this loop\n when_imported(module)(_on_import_factory(module, raise_errors))\n\n # manually add module to patched modules\n with _LOCK:\n _PATCHED_MODULES.add(module)\n else:\n patch_module(module, raise_errors=raise_errors)\n\n patched_modules = get_patched_modules()\n log.info(\n \"patched %s/%s modules (%s)\",\n len(patched_modules),\n len(modules),\n \",\".join(patched_modules),\n )\n\n\ndef patch_module(module, raise_errors=True):\n \"\"\"Patch a single module\n\n Returns if the module got properly patched.\n \"\"\"\n try:\n return _patch_module(module)\n except ModuleNotFoundException:\n if raise_errors:\n raise\n return False\n except Exception:\n if raise_errors:\n raise\n log.debug(\"failed to patch %s\", module, exc_info=True)\n return False\n\n\ndef get_patched_modules():\n \"\"\"Get the list of patched modules\"\"\"\n with _LOCK:\n return sorted(_PATCHED_MODULES)\n\n\ndef _patch_module(module):\n \"\"\"_patch_module will attempt to monkey patch the module.\n\n Returns if the module got patched.\n Can also raise errors if it fails.\n \"\"\"\n path = \"ddtrace.contrib.%s\" % module\n with _LOCK:\n if module in _PATCHED_MODULES and module not in _PATCH_ON_IMPORT:\n log.debug(\"already patched: %s\", path)\n return False\n\n try:\n imported_module = importlib.import_module(path)\n except ImportError:\n # if the import fails, the integration is not available\n raise PatchException(\"integration '%s' not available\" % path)\n else:\n # if patch() is not available in the module, it means\n # that the library is not installed in the environment\n if not hasattr(imported_module, \"patch\"):\n raise ModuleNotFoundException(\"module '%s' not installed\" % module)\n\n imported_module.patch()\n _PATCHED_MODULES.add(module)\n return True\n", "path": "ddtrace/monkey.py"}]}
3,013
325
gh_patches_debug_19767
rasdani/github-patches
git_diff
SeldonIO__MLServer-624
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Support older versions of grpcio Some attributes, like `trailing_metadata()` don't seem to exist in older versions of grpcio (e.g. `1.34.0`). Ideally we should also provide support for these ones to avoid clashes with older versions of TF (e.g. `2.5.x`). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mlserver/grpc/utils.py` Content: ``` 1 import grpc 2 3 from typing import Callable, Dict, Tuple 4 from fastapi import status 5 6 from grpc import ServicerContext 7 8 from .logging import logger 9 from ..errors import MLServerError 10 11 12 STATUS_CODE_MAPPING = { 13 status.HTTP_400_BAD_REQUEST: grpc.StatusCode.INVALID_ARGUMENT, 14 status.HTTP_404_NOT_FOUND: grpc.StatusCode.NOT_FOUND, 15 status.HTTP_422_UNPROCESSABLE_ENTITY: grpc.StatusCode.FAILED_PRECONDITION, 16 status.HTTP_500_INTERNAL_SERVER_ERROR: grpc.StatusCode.INTERNAL, 17 } 18 19 20 def to_headers(context: ServicerContext) -> Dict[str, str]: 21 metadata = context.invocation_metadata() + context.trailing_metadata() 22 headers = {} 23 for metadatum in metadata: 24 headers[metadatum.key] = metadatum.value 25 26 return headers 27 28 29 def to_metadata(headers: Dict[str, str]) -> Tuple[Tuple[str, str], ...]: 30 return tuple(headers.items()) 31 32 33 def _grpc_status_code(err: MLServerError): 34 return STATUS_CODE_MAPPING.get(err.status_code, grpc.StatusCode.UNKNOWN) 35 36 37 def handle_mlserver_error(f: Callable): 38 async def _inner(self, request, context): 39 try: 40 return await f(self, request, context) 41 except MLServerError as err: 42 logger.error(err) 43 await context.abort(code=_grpc_status_code(err), details=str(err)) 44 45 return _inner 46 ``` Path: `mlserver/grpc/interceptors.py` Content: ``` 1 from typing import Awaitable, Callable, Tuple 2 from functools import partial 3 from timeit import default_timer 4 5 from grpc.aio import ServerInterceptor, ServicerContext 6 from grpc import HandlerCallDetails, RpcMethodHandler, RpcError, StatusCode 7 from py_grpc_prometheus.prometheus_server_interceptor import ( 8 grpc_utils, 9 PromServerInterceptor as _PromServerInterceptor, 10 ) 11 12 from .logging import logger 13 14 15 class LoggingInterceptor(ServerInterceptor): 16 def _get_log_message(self, handler_call_details: HandlerCallDetails) -> str: 17 return handler_call_details.method 18 19 async def intercept_service( 20 self, 21 continuation: Callable[[HandlerCallDetails], Awaitable[RpcMethodHandler]], 22 handler_call_details: HandlerCallDetails, 23 ) -> RpcMethodHandler: 24 logger.info(self._get_log_message(handler_call_details)) 25 handler = await continuation(handler_call_details) 26 return handler 27 28 29 class PromServerInterceptor(ServerInterceptor): 30 """ 31 Simple wrapper around `py_grpc_prometheus` to support `grpc.aio`. 32 33 TODO: Open PR to add support upstream for AsyncIO. 34 """ 35 36 def __init__(self, *args, **kwargs): 37 self._interceptor = _PromServerInterceptor(*args, **kwargs) 38 # We need a status code mapping to ensure we can convert from an int: 39 # https://groups.google.com/g/grpc-io/c/EdIXjMEaOyw/m/d3DeqmrJAAAJ 40 self._status_codes = {code.value[0]: code for code in StatusCode} 41 42 async def intercept_service( 43 self, 44 continuation: Callable[[HandlerCallDetails], Awaitable[RpcMethodHandler]], 45 handler_call_details: HandlerCallDetails, 46 ) -> RpcMethodHandler: 47 method_call = grpc_utils.split_method_call(handler_call_details) 48 handler = await continuation(handler_call_details) 49 50 metrics_wrapper = partial(self._metrics_wrapper, method_call) 51 return self._interceptor._wrap_rpc_behavior(handler, metrics_wrapper) 52 53 def _compute_status_code(self, servicer_context: ServicerContext) -> StatusCode: 54 """ 55 This method is mostly copied from `py-grpc-prometheus`, with a couple 56 minor changes to avoid using private APIs from ServicerContext which 57 don't exist anymore in `grpc.aio`. 58 To see the original implementation, please check: 59 60 https://github.com/lchenn/py-grpc-prometheus/blob/eb9dee1f0a4e57cef220193ee48021dc9a9f3d82/py_grpc_prometheus/prometheus_server_interceptor.py#L127-L134 61 """ 62 # Backwards compatibility for non-aio. 63 # TODO: It's not clear yet how to check whether the context has been 64 # cancelled with aio. 65 if hasattr(servicer_context, "_state"): 66 if servicer_context._state.client == "cancelled": 67 return StatusCode.CANCELLED 68 69 code = servicer_context.code() 70 if code is None: 71 return StatusCode.OK 72 73 # NOTE: With gRPC AIO, the `code` can be a plain integer that needs to 74 # be converted to an actual `StatusCode` entry 75 if isinstance(code, int): 76 if code not in self._status_codes: 77 return StatusCode.UNKNOWN 78 79 return self._status_codes[code] 80 81 return code 82 83 def _metrics_wrapper( 84 self, 85 method_call: Tuple[str, str, str], 86 old_handler: RpcMethodHandler, 87 request_streaming: bool, 88 response_streaming: bool, 89 ): 90 """ 91 Port of `py-grpc-prometheus` metrics_wrapper method to work with gRPC's 92 AsyncIO support. 93 To see the original implementation, please check: 94 95 https://github.com/lchenn/py-grpc-prometheus/blob/eb9dee1f0a4e57cef220193ee48021dc9a9f3d82/py_grpc_prometheus/prometheus_server_interceptor.py#L46-L120 96 """ 97 grpc_service_name, grpc_method_name, _ = method_call 98 99 async def _new_handler(request_or_iterator, servicer_context: ServicerContext): 100 response_or_iterator = None 101 try: 102 start = default_timer() 103 grpc_type = grpc_utils.get_method_type( 104 request_streaming, response_streaming 105 ) 106 try: 107 if request_streaming: 108 request_or_iterator = grpc_utils.wrap_iterator_inc_counter( 109 request_or_iterator, 110 self._interceptor._metrics[ 111 "grpc_server_stream_msg_received" 112 ], 113 grpc_type, 114 grpc_service_name, 115 grpc_method_name, 116 ) 117 else: 118 self._interceptor._metrics[ 119 "grpc_server_started_counter" 120 ].labels( 121 grpc_type=grpc_type, 122 grpc_service=grpc_service_name, 123 grpc_method=grpc_method_name, 124 ).inc() 125 126 # Invoke the original rpc behavior. 127 # NOTE: This is the main change required with respect to 128 # the original implementation in `py-grpc-prometheus`. 129 response_or_iterator = await old_handler( 130 request_or_iterator, servicer_context 131 ) 132 133 if response_streaming: 134 sent_metric = self._interceptor._metrics[ 135 "grpc_server_stream_msg_sent" 136 ] 137 response_or_iterator = grpc_utils.wrap_iterator_inc_counter( 138 response_or_iterator, 139 sent_metric, 140 grpc_type, 141 grpc_service_name, 142 grpc_method_name, 143 ) 144 145 else: 146 self._interceptor.increase_grpc_server_handled_total_counter( 147 grpc_type, 148 grpc_service_name, 149 grpc_method_name, 150 self._compute_status_code(servicer_context).name, 151 ) 152 return response_or_iterator 153 except RpcError as e: 154 self._interceptor.increase_grpc_server_handled_total_counter( 155 grpc_type, 156 grpc_service_name, 157 grpc_method_name, 158 self._interceptor._compute_error_code(e).name, 159 ) 160 raise e 161 162 finally: 163 164 if not response_streaming: 165 if self._interceptor._legacy: 166 self._interceptor._metrics[ 167 "legacy_grpc_server_handled_latency_seconds" 168 ].labels( 169 grpc_type=grpc_type, 170 grpc_service=grpc_service_name, 171 grpc_method=grpc_method_name, 172 ).observe( 173 max(default_timer() - start, 0) 174 ) 175 elif self._interceptor._enable_handling_time_histogram: 176 self._interceptor._metrics[ 177 "grpc_server_handled_histogram" 178 ].labels( 179 grpc_type=grpc_type, 180 grpc_service=grpc_service_name, 181 grpc_method=grpc_method_name, 182 ).observe( 183 max(default_timer() - start, 0) 184 ) 185 except Exception as e: # pylint: disable=broad-except 186 # Allow user to skip the exceptions in order to maintain 187 # the basic functionality in the server 188 # The logging function in exception can be toggled with log_exceptions 189 # in order to suppress the noise in logging 190 if self._interceptor._skip_exceptions: 191 if self._interceptor._log_exceptions: 192 logger.error(e) 193 if response_or_iterator is None: 194 return response_or_iterator 195 return old_handler(request_or_iterator, servicer_context) 196 raise e 197 198 return _new_handler 199 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mlserver/grpc/interceptors.py b/mlserver/grpc/interceptors.py --- a/mlserver/grpc/interceptors.py +++ b/mlserver/grpc/interceptors.py @@ -66,6 +66,9 @@ if servicer_context._state.client == "cancelled": return StatusCode.CANCELLED + if not hasattr(servicer_context, "code"): + return StatusCode.OK + code = servicer_context.code() if code is None: return StatusCode.OK diff --git a/mlserver/grpc/utils.py b/mlserver/grpc/utils.py --- a/mlserver/grpc/utils.py +++ b/mlserver/grpc/utils.py @@ -18,7 +18,11 @@ def to_headers(context: ServicerContext) -> Dict[str, str]: - metadata = context.invocation_metadata() + context.trailing_metadata() + metadata = context.invocation_metadata() + if hasattr(context, "trailing_metadata"): + # NOTE: Older versions of `grpcio` (e.g. `grpcio==1.34.0`) don't expose + # access to the trailing metadata on the service side + metadata += context.trailing_metadata() headers = {} for metadatum in metadata: headers[metadatum.key] = metadatum.value
{"golden_diff": "diff --git a/mlserver/grpc/interceptors.py b/mlserver/grpc/interceptors.py\n--- a/mlserver/grpc/interceptors.py\n+++ b/mlserver/grpc/interceptors.py\n@@ -66,6 +66,9 @@\n if servicer_context._state.client == \"cancelled\":\n return StatusCode.CANCELLED\n \n+ if not hasattr(servicer_context, \"code\"):\n+ return StatusCode.OK\n+\n code = servicer_context.code()\n if code is None:\n return StatusCode.OK\ndiff --git a/mlserver/grpc/utils.py b/mlserver/grpc/utils.py\n--- a/mlserver/grpc/utils.py\n+++ b/mlserver/grpc/utils.py\n@@ -18,7 +18,11 @@\n \n \n def to_headers(context: ServicerContext) -> Dict[str, str]:\n- metadata = context.invocation_metadata() + context.trailing_metadata()\n+ metadata = context.invocation_metadata()\n+ if hasattr(context, \"trailing_metadata\"):\n+ # NOTE: Older versions of `grpcio` (e.g. `grpcio==1.34.0`) don't expose\n+ # access to the trailing metadata on the service side\n+ metadata += context.trailing_metadata()\n headers = {}\n for metadatum in metadata:\n headers[metadatum.key] = metadatum.value\n", "issue": "Support older versions of grpcio\nSome attributes, like `trailing_metadata()` don't seem to exist in older versions of grpcio (e.g. `1.34.0`). Ideally we should also provide support for these ones to avoid clashes with older versions of TF (e.g. `2.5.x`). \n", "before_files": [{"content": "import grpc\n\nfrom typing import Callable, Dict, Tuple\nfrom fastapi import status\n\nfrom grpc import ServicerContext\n\nfrom .logging import logger\nfrom ..errors import MLServerError\n\n\nSTATUS_CODE_MAPPING = {\n status.HTTP_400_BAD_REQUEST: grpc.StatusCode.INVALID_ARGUMENT,\n status.HTTP_404_NOT_FOUND: grpc.StatusCode.NOT_FOUND,\n status.HTTP_422_UNPROCESSABLE_ENTITY: grpc.StatusCode.FAILED_PRECONDITION,\n status.HTTP_500_INTERNAL_SERVER_ERROR: grpc.StatusCode.INTERNAL,\n}\n\n\ndef to_headers(context: ServicerContext) -> Dict[str, str]:\n metadata = context.invocation_metadata() + context.trailing_metadata()\n headers = {}\n for metadatum in metadata:\n headers[metadatum.key] = metadatum.value\n\n return headers\n\n\ndef to_metadata(headers: Dict[str, str]) -> Tuple[Tuple[str, str], ...]:\n return tuple(headers.items())\n\n\ndef _grpc_status_code(err: MLServerError):\n return STATUS_CODE_MAPPING.get(err.status_code, grpc.StatusCode.UNKNOWN)\n\n\ndef handle_mlserver_error(f: Callable):\n async def _inner(self, request, context):\n try:\n return await f(self, request, context)\n except MLServerError as err:\n logger.error(err)\n await context.abort(code=_grpc_status_code(err), details=str(err))\n\n return _inner\n", "path": "mlserver/grpc/utils.py"}, {"content": "from typing import Awaitable, Callable, Tuple\nfrom functools import partial\nfrom timeit import default_timer\n\nfrom grpc.aio import ServerInterceptor, ServicerContext\nfrom grpc import HandlerCallDetails, RpcMethodHandler, RpcError, StatusCode\nfrom py_grpc_prometheus.prometheus_server_interceptor import (\n grpc_utils,\n PromServerInterceptor as _PromServerInterceptor,\n)\n\nfrom .logging import logger\n\n\nclass LoggingInterceptor(ServerInterceptor):\n def _get_log_message(self, handler_call_details: HandlerCallDetails) -> str:\n return handler_call_details.method\n\n async def intercept_service(\n self,\n continuation: Callable[[HandlerCallDetails], Awaitable[RpcMethodHandler]],\n handler_call_details: HandlerCallDetails,\n ) -> RpcMethodHandler:\n logger.info(self._get_log_message(handler_call_details))\n handler = await continuation(handler_call_details)\n return handler\n\n\nclass PromServerInterceptor(ServerInterceptor):\n \"\"\"\n Simple wrapper around `py_grpc_prometheus` to support `grpc.aio`.\n\n TODO: Open PR to add support upstream for AsyncIO.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self._interceptor = _PromServerInterceptor(*args, **kwargs)\n # We need a status code mapping to ensure we can convert from an int:\n # https://groups.google.com/g/grpc-io/c/EdIXjMEaOyw/m/d3DeqmrJAAAJ\n self._status_codes = {code.value[0]: code for code in StatusCode}\n\n async def intercept_service(\n self,\n continuation: Callable[[HandlerCallDetails], Awaitable[RpcMethodHandler]],\n handler_call_details: HandlerCallDetails,\n ) -> RpcMethodHandler:\n method_call = grpc_utils.split_method_call(handler_call_details)\n handler = await continuation(handler_call_details)\n\n metrics_wrapper = partial(self._metrics_wrapper, method_call)\n return self._interceptor._wrap_rpc_behavior(handler, metrics_wrapper)\n\n def _compute_status_code(self, servicer_context: ServicerContext) -> StatusCode:\n \"\"\"\n This method is mostly copied from `py-grpc-prometheus`, with a couple\n minor changes to avoid using private APIs from ServicerContext which\n don't exist anymore in `grpc.aio`.\n To see the original implementation, please check:\n\n https://github.com/lchenn/py-grpc-prometheus/blob/eb9dee1f0a4e57cef220193ee48021dc9a9f3d82/py_grpc_prometheus/prometheus_server_interceptor.py#L127-L134\n \"\"\"\n # Backwards compatibility for non-aio.\n # TODO: It's not clear yet how to check whether the context has been\n # cancelled with aio.\n if hasattr(servicer_context, \"_state\"):\n if servicer_context._state.client == \"cancelled\":\n return StatusCode.CANCELLED\n\n code = servicer_context.code()\n if code is None:\n return StatusCode.OK\n\n # NOTE: With gRPC AIO, the `code` can be a plain integer that needs to\n # be converted to an actual `StatusCode` entry\n if isinstance(code, int):\n if code not in self._status_codes:\n return StatusCode.UNKNOWN\n\n return self._status_codes[code]\n\n return code\n\n def _metrics_wrapper(\n self,\n method_call: Tuple[str, str, str],\n old_handler: RpcMethodHandler,\n request_streaming: bool,\n response_streaming: bool,\n ):\n \"\"\"\n Port of `py-grpc-prometheus` metrics_wrapper method to work with gRPC's\n AsyncIO support.\n To see the original implementation, please check:\n\n https://github.com/lchenn/py-grpc-prometheus/blob/eb9dee1f0a4e57cef220193ee48021dc9a9f3d82/py_grpc_prometheus/prometheus_server_interceptor.py#L46-L120\n \"\"\"\n grpc_service_name, grpc_method_name, _ = method_call\n\n async def _new_handler(request_or_iterator, servicer_context: ServicerContext):\n response_or_iterator = None\n try:\n start = default_timer()\n grpc_type = grpc_utils.get_method_type(\n request_streaming, response_streaming\n )\n try:\n if request_streaming:\n request_or_iterator = grpc_utils.wrap_iterator_inc_counter(\n request_or_iterator,\n self._interceptor._metrics[\n \"grpc_server_stream_msg_received\"\n ],\n grpc_type,\n grpc_service_name,\n grpc_method_name,\n )\n else:\n self._interceptor._metrics[\n \"grpc_server_started_counter\"\n ].labels(\n grpc_type=grpc_type,\n grpc_service=grpc_service_name,\n grpc_method=grpc_method_name,\n ).inc()\n\n # Invoke the original rpc behavior.\n # NOTE: This is the main change required with respect to\n # the original implementation in `py-grpc-prometheus`.\n response_or_iterator = await old_handler(\n request_or_iterator, servicer_context\n )\n\n if response_streaming:\n sent_metric = self._interceptor._metrics[\n \"grpc_server_stream_msg_sent\"\n ]\n response_or_iterator = grpc_utils.wrap_iterator_inc_counter(\n response_or_iterator,\n sent_metric,\n grpc_type,\n grpc_service_name,\n grpc_method_name,\n )\n\n else:\n self._interceptor.increase_grpc_server_handled_total_counter(\n grpc_type,\n grpc_service_name,\n grpc_method_name,\n self._compute_status_code(servicer_context).name,\n )\n return response_or_iterator\n except RpcError as e:\n self._interceptor.increase_grpc_server_handled_total_counter(\n grpc_type,\n grpc_service_name,\n grpc_method_name,\n self._interceptor._compute_error_code(e).name,\n )\n raise e\n\n finally:\n\n if not response_streaming:\n if self._interceptor._legacy:\n self._interceptor._metrics[\n \"legacy_grpc_server_handled_latency_seconds\"\n ].labels(\n grpc_type=grpc_type,\n grpc_service=grpc_service_name,\n grpc_method=grpc_method_name,\n ).observe(\n max(default_timer() - start, 0)\n )\n elif self._interceptor._enable_handling_time_histogram:\n self._interceptor._metrics[\n \"grpc_server_handled_histogram\"\n ].labels(\n grpc_type=grpc_type,\n grpc_service=grpc_service_name,\n grpc_method=grpc_method_name,\n ).observe(\n max(default_timer() - start, 0)\n )\n except Exception as e: # pylint: disable=broad-except\n # Allow user to skip the exceptions in order to maintain\n # the basic functionality in the server\n # The logging function in exception can be toggled with log_exceptions\n # in order to suppress the noise in logging\n if self._interceptor._skip_exceptions:\n if self._interceptor._log_exceptions:\n logger.error(e)\n if response_or_iterator is None:\n return response_or_iterator\n return old_handler(request_or_iterator, servicer_context)\n raise e\n\n return _new_handler\n", "path": "mlserver/grpc/interceptors.py"}], "after_files": [{"content": "import grpc\n\nfrom typing import Callable, Dict, Tuple\nfrom fastapi import status\n\nfrom grpc import ServicerContext\n\nfrom .logging import logger\nfrom ..errors import MLServerError\n\n\nSTATUS_CODE_MAPPING = {\n status.HTTP_400_BAD_REQUEST: grpc.StatusCode.INVALID_ARGUMENT,\n status.HTTP_404_NOT_FOUND: grpc.StatusCode.NOT_FOUND,\n status.HTTP_422_UNPROCESSABLE_ENTITY: grpc.StatusCode.FAILED_PRECONDITION,\n status.HTTP_500_INTERNAL_SERVER_ERROR: grpc.StatusCode.INTERNAL,\n}\n\n\ndef to_headers(context: ServicerContext) -> Dict[str, str]:\n metadata = context.invocation_metadata()\n if hasattr(context, \"trailing_metadata\"):\n # NOTE: Older versions of `grpcio` (e.g. `grpcio==1.34.0`) don't expose\n # access to the trailing metadata on the service side\n metadata += context.trailing_metadata()\n headers = {}\n for metadatum in metadata:\n headers[metadatum.key] = metadatum.value\n\n return headers\n\n\ndef to_metadata(headers: Dict[str, str]) -> Tuple[Tuple[str, str], ...]:\n return tuple(headers.items())\n\n\ndef _grpc_status_code(err: MLServerError):\n return STATUS_CODE_MAPPING.get(err.status_code, grpc.StatusCode.UNKNOWN)\n\n\ndef handle_mlserver_error(f: Callable):\n async def _inner(self, request, context):\n try:\n return await f(self, request, context)\n except MLServerError as err:\n logger.error(err)\n await context.abort(code=_grpc_status_code(err), details=str(err))\n\n return _inner\n", "path": "mlserver/grpc/utils.py"}, {"content": "from typing import Awaitable, Callable, Tuple\nfrom functools import partial\nfrom timeit import default_timer\n\nfrom grpc.aio import ServerInterceptor, ServicerContext\nfrom grpc import HandlerCallDetails, RpcMethodHandler, RpcError, StatusCode\nfrom py_grpc_prometheus.prometheus_server_interceptor import (\n grpc_utils,\n PromServerInterceptor as _PromServerInterceptor,\n)\n\nfrom .logging import logger\n\n\nclass LoggingInterceptor(ServerInterceptor):\n def _get_log_message(self, handler_call_details: HandlerCallDetails) -> str:\n return handler_call_details.method\n\n async def intercept_service(\n self,\n continuation: Callable[[HandlerCallDetails], Awaitable[RpcMethodHandler]],\n handler_call_details: HandlerCallDetails,\n ) -> RpcMethodHandler:\n logger.info(self._get_log_message(handler_call_details))\n handler = await continuation(handler_call_details)\n return handler\n\n\nclass PromServerInterceptor(ServerInterceptor):\n \"\"\"\n Simple wrapper around `py_grpc_prometheus` to support `grpc.aio`.\n\n TODO: Open PR to add support upstream for AsyncIO.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self._interceptor = _PromServerInterceptor(*args, **kwargs)\n # We need a status code mapping to ensure we can convert from an int:\n # https://groups.google.com/g/grpc-io/c/EdIXjMEaOyw/m/d3DeqmrJAAAJ\n self._status_codes = {code.value[0]: code for code in StatusCode}\n\n async def intercept_service(\n self,\n continuation: Callable[[HandlerCallDetails], Awaitable[RpcMethodHandler]],\n handler_call_details: HandlerCallDetails,\n ) -> RpcMethodHandler:\n method_call = grpc_utils.split_method_call(handler_call_details)\n handler = await continuation(handler_call_details)\n\n metrics_wrapper = partial(self._metrics_wrapper, method_call)\n return self._interceptor._wrap_rpc_behavior(handler, metrics_wrapper)\n\n def _compute_status_code(self, servicer_context: ServicerContext) -> StatusCode:\n \"\"\"\n This method is mostly copied from `py-grpc-prometheus`, with a couple\n minor changes to avoid using private APIs from ServicerContext which\n don't exist anymore in `grpc.aio`.\n To see the original implementation, please check:\n\n https://github.com/lchenn/py-grpc-prometheus/blob/eb9dee1f0a4e57cef220193ee48021dc9a9f3d82/py_grpc_prometheus/prometheus_server_interceptor.py#L127-L134\n \"\"\"\n # Backwards compatibility for non-aio.\n # TODO: It's not clear yet how to check whether the context has been\n # cancelled with aio.\n if hasattr(servicer_context, \"_state\"):\n if servicer_context._state.client == \"cancelled\":\n return StatusCode.CANCELLED\n\n if not hasattr(servicer_context, \"code\"):\n return StatusCode.OK\n\n code = servicer_context.code()\n if code is None:\n return StatusCode.OK\n\n # NOTE: With gRPC AIO, the `code` can be a plain integer that needs to\n # be converted to an actual `StatusCode` entry\n if isinstance(code, int):\n if code not in self._status_codes:\n return StatusCode.UNKNOWN\n\n return self._status_codes[code]\n\n return code\n\n def _metrics_wrapper(\n self,\n method_call: Tuple[str, str, str],\n old_handler: RpcMethodHandler,\n request_streaming: bool,\n response_streaming: bool,\n ):\n \"\"\"\n Port of `py-grpc-prometheus` metrics_wrapper method to work with gRPC's\n AsyncIO support.\n To see the original implementation, please check:\n\n https://github.com/lchenn/py-grpc-prometheus/blob/eb9dee1f0a4e57cef220193ee48021dc9a9f3d82/py_grpc_prometheus/prometheus_server_interceptor.py#L46-L120\n \"\"\"\n grpc_service_name, grpc_method_name, _ = method_call\n\n async def _new_handler(request_or_iterator, servicer_context: ServicerContext):\n response_or_iterator = None\n try:\n start = default_timer()\n grpc_type = grpc_utils.get_method_type(\n request_streaming, response_streaming\n )\n try:\n if request_streaming:\n request_or_iterator = grpc_utils.wrap_iterator_inc_counter(\n request_or_iterator,\n self._interceptor._metrics[\n \"grpc_server_stream_msg_received\"\n ],\n grpc_type,\n grpc_service_name,\n grpc_method_name,\n )\n else:\n self._interceptor._metrics[\n \"grpc_server_started_counter\"\n ].labels(\n grpc_type=grpc_type,\n grpc_service=grpc_service_name,\n grpc_method=grpc_method_name,\n ).inc()\n\n # Invoke the original rpc behavior.\n # NOTE: This is the main change required with respect to\n # the original implementation in `py-grpc-prometheus`.\n response_or_iterator = await old_handler(\n request_or_iterator, servicer_context\n )\n\n if response_streaming:\n sent_metric = self._interceptor._metrics[\n \"grpc_server_stream_msg_sent\"\n ]\n response_or_iterator = grpc_utils.wrap_iterator_inc_counter(\n response_or_iterator,\n sent_metric,\n grpc_type,\n grpc_service_name,\n grpc_method_name,\n )\n\n else:\n self._interceptor.increase_grpc_server_handled_total_counter(\n grpc_type,\n grpc_service_name,\n grpc_method_name,\n self._compute_status_code(servicer_context).name,\n )\n return response_or_iterator\n except RpcError as e:\n self._interceptor.increase_grpc_server_handled_total_counter(\n grpc_type,\n grpc_service_name,\n grpc_method_name,\n self._interceptor._compute_error_code(e).name,\n )\n raise e\n\n finally:\n\n if not response_streaming:\n if self._interceptor._legacy:\n self._interceptor._metrics[\n \"legacy_grpc_server_handled_latency_seconds\"\n ].labels(\n grpc_type=grpc_type,\n grpc_service=grpc_service_name,\n grpc_method=grpc_method_name,\n ).observe(\n max(default_timer() - start, 0)\n )\n elif self._interceptor._enable_handling_time_histogram:\n self._interceptor._metrics[\n \"grpc_server_handled_histogram\"\n ].labels(\n grpc_type=grpc_type,\n grpc_service=grpc_service_name,\n grpc_method=grpc_method_name,\n ).observe(\n max(default_timer() - start, 0)\n )\n except Exception as e: # pylint: disable=broad-except\n # Allow user to skip the exceptions in order to maintain\n # the basic functionality in the server\n # The logging function in exception can be toggled with log_exceptions\n # in order to suppress the noise in logging\n if self._interceptor._skip_exceptions:\n if self._interceptor._log_exceptions:\n logger.error(e)\n if response_or_iterator is None:\n return response_or_iterator\n return old_handler(request_or_iterator, servicer_context)\n raise e\n\n return _new_handler\n", "path": "mlserver/grpc/interceptors.py"}]}
2,777
282
gh_patches_debug_988
rasdani/github-patches
git_diff
cowrie__cowrie-1030
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- adding root:x:!password to userdb.txt doesn't exclude root/password as valid credentials Fresh install. I tried to exclude 'password' or 'abc123' from valid passwords for the user root Now file looks like ``` root:x:!root root:x:!123456 root:x:!password root:x:* ``` Retarted cowrie, but no way to deny login with root/password credentials Maybe, some sort of problem with new regexp checking? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/cowrie/core/auth.py` Content: ``` 1 # Copyright (c) 2009-2014 Upi Tamminen <[email protected]> 2 # See the COPYRIGHT file for more information 3 4 """ 5 This module contains authentication code 6 """ 7 8 from __future__ import absolute_import, division 9 10 import json 11 import re 12 from collections import OrderedDict 13 from os import path 14 from random import randint 15 16 from twisted.python import log 17 18 from cowrie.core.config import CONFIG 19 20 _USERDB_DEFAULTS = [ 21 'root:x:!root', 22 'root:x:!123456', 23 'root:x:!/honeypot/i', 24 'root:x:*', 25 'richard:x:*', 26 'richard:x:fout', 27 ] 28 29 30 class UserDB(object): 31 """ 32 By Walter de Jong <[email protected]> 33 """ 34 35 def __init__(self): 36 self.userdb = OrderedDict() 37 self.load() 38 39 def load(self): 40 """ 41 load the user db 42 """ 43 44 try: 45 with open('{}/userdb.txt'.format(CONFIG.get('honeypot', 'etc_path')), 'r') as db: 46 userdb = db.readlines() 47 except IOError: 48 log.msg("Could not read etc/userdb.txt, default database activated") 49 userdb = _USERDB_DEFAULTS 50 51 for user in userdb: 52 if not user.startswith('#'): 53 try: 54 login = user.split(':')[0].encode('utf8') 55 password = user.split(':')[2].strip().encode('utf8') 56 except IndexError: 57 continue 58 else: 59 self.adduser(login, password) 60 61 def checklogin(self, thelogin, thepasswd, src_ip='0.0.0.0'): 62 for credentials, policy in self.userdb.items(): 63 login, passwd = credentials 64 65 if self.match_rule(login, thelogin): 66 if self.match_rule(passwd, thepasswd): 67 return policy 68 69 return False 70 71 def match_rule(self, rule, input): 72 if type(rule) is bytes: 73 return rule in [b'*', input] 74 else: 75 return bool(rule.search(input)) 76 77 def re_or_str(self, rule): 78 """ 79 Convert a /.../ type rule to a regex, otherwise return the string as-is 80 81 @param login: rule 82 @type login: bytes 83 """ 84 res = re.match(br'/(.+)/(i)?$', rule) 85 if res: 86 return re.compile(res.group(1), re.IGNORECASE if res.group(2) else 0) 87 88 return rule 89 90 def adduser(self, login, passwd): 91 """ 92 All arguments are bytes 93 94 @param login: user id 95 @type login: bytes 96 @param passwd: password 97 @type passwd: bytes 98 """ 99 login = self.re_or_str(login) 100 101 if passwd[0] == b'!': 102 policy = False 103 passwd = passwd[1:] 104 else: 105 policy = True 106 107 passwd = self.re_or_str(passwd) 108 self.userdb[(login, passwd)] = policy 109 110 111 class AuthRandom(object): 112 """ 113 Alternative class that defines the checklogin() method. 114 Users will be authenticated after a random number of attempts. 115 """ 116 117 def __init__(self): 118 # Default values 119 self.mintry, self.maxtry, self.maxcache = 2, 5, 10 120 121 # Are there auth_class parameters? 122 if CONFIG.has_option('honeypot', 'auth_class_parameters'): 123 parameters = CONFIG.get('honeypot', 'auth_class_parameters') 124 parlist = parameters.split(',') 125 if len(parlist) == 3: 126 self.mintry = int(parlist[0]) 127 self.maxtry = int(parlist[1]) 128 self.maxcache = int(parlist[2]) 129 130 if self.maxtry < self.mintry: 131 self.maxtry = self.mintry + 1 132 log.msg("maxtry < mintry, adjusting maxtry to: {}".format(self.maxtry)) 133 self.uservar = {} 134 self.uservar_file = '{}/auth_random.json'.format(CONFIG.get('honeypot', 'state_path')) 135 self.loadvars() 136 137 def loadvars(self): 138 """ 139 Load user vars from json file 140 """ 141 if path.isfile(self.uservar_file): 142 with open(self.uservar_file, 'r') as fp: 143 try: 144 self.uservar = json.load(fp) 145 except Exception: 146 self.uservar = {} 147 148 def savevars(self): 149 """ 150 Save the user vars to json file 151 """ 152 data = self.uservar 153 # Note: this is subject to races between cowrie logins 154 with open(self.uservar_file, 'w') as fp: 155 json.dump(data, fp) 156 157 def checklogin(self, thelogin, thepasswd, src_ip): 158 """ 159 Every new source IP will have to try a random number of times between 160 'mintry' and 'maxtry' before succeeding to login. 161 All username/password combinations must be different. 162 The successful login combination is stored with the IP address. 163 Successful username/passwords pairs are also cached for 'maxcache' times. 164 This is to allow access for returns from different IP addresses. 165 Variables are saved in 'uservar.json' in the data directory. 166 """ 167 168 auth = False 169 userpass = str(thelogin) + ':' + str(thepasswd) 170 171 if 'cache' not in self.uservar: 172 self.uservar['cache'] = [] 173 cache = self.uservar['cache'] 174 175 # Check if it is the first visit from src_ip 176 if src_ip not in self.uservar: 177 self.uservar[src_ip] = {} 178 ipinfo = self.uservar[src_ip] 179 ipinfo['try'] = 0 180 if userpass in cache: 181 log.msg("first time for {}, found cached: {}".format(src_ip, userpass)) 182 ipinfo['max'] = 1 183 ipinfo['user'] = str(thelogin) 184 ipinfo['pw'] = str(thepasswd) 185 auth = True 186 self.savevars() 187 return auth 188 else: 189 ipinfo['max'] = randint(self.mintry, self.maxtry) 190 log.msg("first time for {}, need: {}".format(src_ip, ipinfo['max'])) 191 else: 192 if userpass in cache: 193 ipinfo = self.uservar[src_ip] 194 log.msg("Found cached: {}".format(userpass)) 195 ipinfo['max'] = 1 196 ipinfo['user'] = str(thelogin) 197 ipinfo['pw'] = str(thepasswd) 198 auth = True 199 self.savevars() 200 return auth 201 202 ipinfo = self.uservar[src_ip] 203 204 # Fill in missing variables 205 if 'max' not in ipinfo: 206 ipinfo['max'] = randint(self.mintry, self.maxtry) 207 if 'try' not in ipinfo: 208 ipinfo['try'] = 0 209 if 'tried' not in ipinfo: 210 ipinfo['tried'] = [] 211 212 # Don't count repeated username/password combinations 213 if userpass in ipinfo['tried']: 214 log.msg('already tried this combination') 215 self.savevars() 216 return auth 217 218 ipinfo['try'] += 1 219 attempts = ipinfo['try'] 220 need = ipinfo['max'] 221 log.msg("login attempt: {}".format(attempts)) 222 223 # Check if enough login attempts are tried 224 if attempts < need: 225 self.uservar[src_ip]['tried'].append(userpass) 226 elif attempts == need: 227 ipinfo['user'] = str(thelogin) 228 ipinfo['pw'] = str(thepasswd) 229 cache.append(userpass) 230 if len(cache) > self.maxcache: 231 cache.pop(0) 232 auth = True 233 # Returning after successful login 234 elif attempts > need: 235 if 'user' not in ipinfo or 'pw' not in ipinfo: 236 log.msg('return, but username or password not set!!!') 237 ipinfo['tried'].append(userpass) 238 ipinfo['try'] = 1 239 else: 240 log.msg("login return, expect: [{}/{}]".format(ipinfo['user'], ipinfo['pw'])) 241 if thelogin == ipinfo['user'] and str(thepasswd) == ipinfo['pw']: 242 auth = True 243 self.savevars() 244 return auth 245 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/cowrie/core/auth.py b/src/cowrie/core/auth.py --- a/src/cowrie/core/auth.py +++ b/src/cowrie/core/auth.py @@ -98,7 +98,7 @@ """ login = self.re_or_str(login) - if passwd[0] == b'!': + if passwd[0] == ord("!"): policy = False passwd = passwd[1:] else:
{"golden_diff": "diff --git a/src/cowrie/core/auth.py b/src/cowrie/core/auth.py\n--- a/src/cowrie/core/auth.py\n+++ b/src/cowrie/core/auth.py\n@@ -98,7 +98,7 @@\n \"\"\"\n login = self.re_or_str(login)\n \n- if passwd[0] == b'!':\n+ if passwd[0] == ord(\"!\"):\n policy = False\n passwd = passwd[1:]\n else:\n", "issue": "adding root:x:!password to userdb.txt doesn't exclude root/password as valid credentials\nFresh install.\r\n\r\nI tried to exclude 'password' or 'abc123' from valid passwords for the user root\r\n\r\nNow file looks like \r\n```\r\nroot:x:!root\r\nroot:x:!123456\r\nroot:x:!password\r\nroot:x:*\r\n```\r\n\r\nRetarted cowrie, but no way to deny login with root/password credentials\r\n\r\nMaybe, some sort of problem with new regexp checking?\n", "before_files": [{"content": "# Copyright (c) 2009-2014 Upi Tamminen <[email protected]>\n# See the COPYRIGHT file for more information\n\n\"\"\"\nThis module contains authentication code\n\"\"\"\n\nfrom __future__ import absolute_import, division\n\nimport json\nimport re\nfrom collections import OrderedDict\nfrom os import path\nfrom random import randint\n\nfrom twisted.python import log\n\nfrom cowrie.core.config import CONFIG\n\n_USERDB_DEFAULTS = [\n 'root:x:!root',\n 'root:x:!123456',\n 'root:x:!/honeypot/i',\n 'root:x:*',\n 'richard:x:*',\n 'richard:x:fout',\n]\n\n\nclass UserDB(object):\n \"\"\"\n By Walter de Jong <[email protected]>\n \"\"\"\n\n def __init__(self):\n self.userdb = OrderedDict()\n self.load()\n\n def load(self):\n \"\"\"\n load the user db\n \"\"\"\n\n try:\n with open('{}/userdb.txt'.format(CONFIG.get('honeypot', 'etc_path')), 'r') as db:\n userdb = db.readlines()\n except IOError:\n log.msg(\"Could not read etc/userdb.txt, default database activated\")\n userdb = _USERDB_DEFAULTS\n\n for user in userdb:\n if not user.startswith('#'):\n try:\n login = user.split(':')[0].encode('utf8')\n password = user.split(':')[2].strip().encode('utf8')\n except IndexError:\n continue\n else:\n self.adduser(login, password)\n\n def checklogin(self, thelogin, thepasswd, src_ip='0.0.0.0'):\n for credentials, policy in self.userdb.items():\n login, passwd = credentials\n\n if self.match_rule(login, thelogin):\n if self.match_rule(passwd, thepasswd):\n return policy\n\n return False\n\n def match_rule(self, rule, input):\n if type(rule) is bytes:\n return rule in [b'*', input]\n else:\n return bool(rule.search(input))\n\n def re_or_str(self, rule):\n \"\"\"\n Convert a /.../ type rule to a regex, otherwise return the string as-is\n\n @param login: rule\n @type login: bytes\n \"\"\"\n res = re.match(br'/(.+)/(i)?$', rule)\n if res:\n return re.compile(res.group(1), re.IGNORECASE if res.group(2) else 0)\n\n return rule\n\n def adduser(self, login, passwd):\n \"\"\"\n All arguments are bytes\n\n @param login: user id\n @type login: bytes\n @param passwd: password\n @type passwd: bytes\n \"\"\"\n login = self.re_or_str(login)\n\n if passwd[0] == b'!':\n policy = False\n passwd = passwd[1:]\n else:\n policy = True\n\n passwd = self.re_or_str(passwd)\n self.userdb[(login, passwd)] = policy\n\n\nclass AuthRandom(object):\n \"\"\"\n Alternative class that defines the checklogin() method.\n Users will be authenticated after a random number of attempts.\n \"\"\"\n\n def __init__(self):\n # Default values\n self.mintry, self.maxtry, self.maxcache = 2, 5, 10\n\n # Are there auth_class parameters?\n if CONFIG.has_option('honeypot', 'auth_class_parameters'):\n parameters = CONFIG.get('honeypot', 'auth_class_parameters')\n parlist = parameters.split(',')\n if len(parlist) == 3:\n self.mintry = int(parlist[0])\n self.maxtry = int(parlist[1])\n self.maxcache = int(parlist[2])\n\n if self.maxtry < self.mintry:\n self.maxtry = self.mintry + 1\n log.msg(\"maxtry < mintry, adjusting maxtry to: {}\".format(self.maxtry))\n self.uservar = {}\n self.uservar_file = '{}/auth_random.json'.format(CONFIG.get('honeypot', 'state_path'))\n self.loadvars()\n\n def loadvars(self):\n \"\"\"\n Load user vars from json file\n \"\"\"\n if path.isfile(self.uservar_file):\n with open(self.uservar_file, 'r') as fp:\n try:\n self.uservar = json.load(fp)\n except Exception:\n self.uservar = {}\n\n def savevars(self):\n \"\"\"\n Save the user vars to json file\n \"\"\"\n data = self.uservar\n # Note: this is subject to races between cowrie logins\n with open(self.uservar_file, 'w') as fp:\n json.dump(data, fp)\n\n def checklogin(self, thelogin, thepasswd, src_ip):\n \"\"\"\n Every new source IP will have to try a random number of times between\n 'mintry' and 'maxtry' before succeeding to login.\n All username/password combinations must be different.\n The successful login combination is stored with the IP address.\n Successful username/passwords pairs are also cached for 'maxcache' times.\n This is to allow access for returns from different IP addresses.\n Variables are saved in 'uservar.json' in the data directory.\n \"\"\"\n\n auth = False\n userpass = str(thelogin) + ':' + str(thepasswd)\n\n if 'cache' not in self.uservar:\n self.uservar['cache'] = []\n cache = self.uservar['cache']\n\n # Check if it is the first visit from src_ip\n if src_ip not in self.uservar:\n self.uservar[src_ip] = {}\n ipinfo = self.uservar[src_ip]\n ipinfo['try'] = 0\n if userpass in cache:\n log.msg(\"first time for {}, found cached: {}\".format(src_ip, userpass))\n ipinfo['max'] = 1\n ipinfo['user'] = str(thelogin)\n ipinfo['pw'] = str(thepasswd)\n auth = True\n self.savevars()\n return auth\n else:\n ipinfo['max'] = randint(self.mintry, self.maxtry)\n log.msg(\"first time for {}, need: {}\".format(src_ip, ipinfo['max']))\n else:\n if userpass in cache:\n ipinfo = self.uservar[src_ip]\n log.msg(\"Found cached: {}\".format(userpass))\n ipinfo['max'] = 1\n ipinfo['user'] = str(thelogin)\n ipinfo['pw'] = str(thepasswd)\n auth = True\n self.savevars()\n return auth\n\n ipinfo = self.uservar[src_ip]\n\n # Fill in missing variables\n if 'max' not in ipinfo:\n ipinfo['max'] = randint(self.mintry, self.maxtry)\n if 'try' not in ipinfo:\n ipinfo['try'] = 0\n if 'tried' not in ipinfo:\n ipinfo['tried'] = []\n\n # Don't count repeated username/password combinations\n if userpass in ipinfo['tried']:\n log.msg('already tried this combination')\n self.savevars()\n return auth\n\n ipinfo['try'] += 1\n attempts = ipinfo['try']\n need = ipinfo['max']\n log.msg(\"login attempt: {}\".format(attempts))\n\n # Check if enough login attempts are tried\n if attempts < need:\n self.uservar[src_ip]['tried'].append(userpass)\n elif attempts == need:\n ipinfo['user'] = str(thelogin)\n ipinfo['pw'] = str(thepasswd)\n cache.append(userpass)\n if len(cache) > self.maxcache:\n cache.pop(0)\n auth = True\n # Returning after successful login\n elif attempts > need:\n if 'user' not in ipinfo or 'pw' not in ipinfo:\n log.msg('return, but username or password not set!!!')\n ipinfo['tried'].append(userpass)\n ipinfo['try'] = 1\n else:\n log.msg(\"login return, expect: [{}/{}]\".format(ipinfo['user'], ipinfo['pw']))\n if thelogin == ipinfo['user'] and str(thepasswd) == ipinfo['pw']:\n auth = True\n self.savevars()\n return auth\n", "path": "src/cowrie/core/auth.py"}], "after_files": [{"content": "# Copyright (c) 2009-2014 Upi Tamminen <[email protected]>\n# See the COPYRIGHT file for more information\n\n\"\"\"\nThis module contains authentication code\n\"\"\"\n\nfrom __future__ import absolute_import, division\n\nimport json\nimport re\nfrom collections import OrderedDict\nfrom os import path\nfrom random import randint\n\nfrom twisted.python import log\n\nfrom cowrie.core.config import CONFIG\n\n_USERDB_DEFAULTS = [\n 'root:x:!root',\n 'root:x:!123456',\n 'root:x:!/honeypot/i',\n 'root:x:*',\n 'richard:x:*',\n 'richard:x:fout',\n]\n\n\nclass UserDB(object):\n \"\"\"\n By Walter de Jong <[email protected]>\n \"\"\"\n\n def __init__(self):\n self.userdb = OrderedDict()\n self.load()\n\n def load(self):\n \"\"\"\n load the user db\n \"\"\"\n\n try:\n with open('{}/userdb.txt'.format(CONFIG.get('honeypot', 'etc_path')), 'r') as db:\n userdb = db.readlines()\n except IOError:\n log.msg(\"Could not read etc/userdb.txt, default database activated\")\n userdb = _USERDB_DEFAULTS\n\n for user in userdb:\n if not user.startswith('#'):\n try:\n login = user.split(':')[0].encode('utf8')\n password = user.split(':')[2].strip().encode('utf8')\n except IndexError:\n continue\n else:\n self.adduser(login, password)\n\n def checklogin(self, thelogin, thepasswd, src_ip='0.0.0.0'):\n for credentials, policy in self.userdb.items():\n login, passwd = credentials\n\n if self.match_rule(login, thelogin):\n if self.match_rule(passwd, thepasswd):\n return policy\n\n return False\n\n def match_rule(self, rule, input):\n if type(rule) is bytes:\n return rule in [b'*', input]\n else:\n return bool(rule.search(input))\n\n def re_or_str(self, rule):\n \"\"\"\n Convert a /.../ type rule to a regex, otherwise return the string as-is\n\n @param login: rule\n @type login: bytes\n \"\"\"\n res = re.match(br'/(.+)/(i)?$', rule)\n if res:\n return re.compile(res.group(1), re.IGNORECASE if res.group(2) else 0)\n\n return rule\n\n def adduser(self, login, passwd):\n \"\"\"\n All arguments are bytes\n\n @param login: user id\n @type login: bytes\n @param passwd: password\n @type passwd: bytes\n \"\"\"\n login = self.re_or_str(login)\n\n if passwd[0] == ord(\"!\"):\n policy = False\n passwd = passwd[1:]\n else:\n policy = True\n\n passwd = self.re_or_str(passwd)\n self.userdb[(login, passwd)] = policy\n\n\nclass AuthRandom(object):\n \"\"\"\n Alternative class that defines the checklogin() method.\n Users will be authenticated after a random number of attempts.\n \"\"\"\n\n def __init__(self):\n # Default values\n self.mintry, self.maxtry, self.maxcache = 2, 5, 10\n\n # Are there auth_class parameters?\n if CONFIG.has_option('honeypot', 'auth_class_parameters'):\n parameters = CONFIG.get('honeypot', 'auth_class_parameters')\n parlist = parameters.split(',')\n if len(parlist) == 3:\n self.mintry = int(parlist[0])\n self.maxtry = int(parlist[1])\n self.maxcache = int(parlist[2])\n\n if self.maxtry < self.mintry:\n self.maxtry = self.mintry + 1\n log.msg(\"maxtry < mintry, adjusting maxtry to: {}\".format(self.maxtry))\n self.uservar = {}\n self.uservar_file = '{}/auth_random.json'.format(CONFIG.get('honeypot', 'state_path'))\n self.loadvars()\n\n def loadvars(self):\n \"\"\"\n Load user vars from json file\n \"\"\"\n if path.isfile(self.uservar_file):\n with open(self.uservar_file, 'r') as fp:\n try:\n self.uservar = json.load(fp)\n except Exception:\n self.uservar = {}\n\n def savevars(self):\n \"\"\"\n Save the user vars to json file\n \"\"\"\n data = self.uservar\n # Note: this is subject to races between cowrie logins\n with open(self.uservar_file, 'w') as fp:\n json.dump(data, fp)\n\n def checklogin(self, thelogin, thepasswd, src_ip):\n \"\"\"\n Every new source IP will have to try a random number of times between\n 'mintry' and 'maxtry' before succeeding to login.\n All username/password combinations must be different.\n The successful login combination is stored with the IP address.\n Successful username/passwords pairs are also cached for 'maxcache' times.\n This is to allow access for returns from different IP addresses.\n Variables are saved in 'uservar.json' in the data directory.\n \"\"\"\n\n auth = False\n userpass = str(thelogin) + ':' + str(thepasswd)\n\n if 'cache' not in self.uservar:\n self.uservar['cache'] = []\n cache = self.uservar['cache']\n\n # Check if it is the first visit from src_ip\n if src_ip not in self.uservar:\n self.uservar[src_ip] = {}\n ipinfo = self.uservar[src_ip]\n ipinfo['try'] = 0\n if userpass in cache:\n log.msg(\"first time for {}, found cached: {}\".format(src_ip, userpass))\n ipinfo['max'] = 1\n ipinfo['user'] = str(thelogin)\n ipinfo['pw'] = str(thepasswd)\n auth = True\n self.savevars()\n return auth\n else:\n ipinfo['max'] = randint(self.mintry, self.maxtry)\n log.msg(\"first time for {}, need: {}\".format(src_ip, ipinfo['max']))\n else:\n if userpass in cache:\n ipinfo = self.uservar[src_ip]\n log.msg(\"Found cached: {}\".format(userpass))\n ipinfo['max'] = 1\n ipinfo['user'] = str(thelogin)\n ipinfo['pw'] = str(thepasswd)\n auth = True\n self.savevars()\n return auth\n\n ipinfo = self.uservar[src_ip]\n\n # Fill in missing variables\n if 'max' not in ipinfo:\n ipinfo['max'] = randint(self.mintry, self.maxtry)\n if 'try' not in ipinfo:\n ipinfo['try'] = 0\n if 'tried' not in ipinfo:\n ipinfo['tried'] = []\n\n # Don't count repeated username/password combinations\n if userpass in ipinfo['tried']:\n log.msg('already tried this combination')\n self.savevars()\n return auth\n\n ipinfo['try'] += 1\n attempts = ipinfo['try']\n need = ipinfo['max']\n log.msg(\"login attempt: {}\".format(attempts))\n\n # Check if enough login attempts are tried\n if attempts < need:\n self.uservar[src_ip]['tried'].append(userpass)\n elif attempts == need:\n ipinfo['user'] = str(thelogin)\n ipinfo['pw'] = str(thepasswd)\n cache.append(userpass)\n if len(cache) > self.maxcache:\n cache.pop(0)\n auth = True\n # Returning after successful login\n elif attempts > need:\n if 'user' not in ipinfo or 'pw' not in ipinfo:\n log.msg('return, but username or password not set!!!')\n ipinfo['tried'].append(userpass)\n ipinfo['try'] = 1\n else:\n log.msg(\"login return, expect: [{}/{}]\".format(ipinfo['user'], ipinfo['pw']))\n if thelogin == ipinfo['user'] and str(thepasswd) == ipinfo['pw']:\n auth = True\n self.savevars()\n return auth\n", "path": "src/cowrie/core/auth.py"}]}
2,841
102
gh_patches_debug_11025
rasdani/github-patches
git_diff
Qiskit__qiskit-3555
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Can't invert gate created from QuantumCircuit.to_gate <!-- ⚠️ If you do not respect this template, your issue will be closed --> <!-- ⚠️ Make sure to browse the opened and closed issues --> ### Information - **Qiskit Terra version**: - **Python version**: - **Operating system**: ### What is the current behavior? When inverting a gate created from QuantumCircuit.to_gate the following exception is raised: `ValueError: not enough values to unpack (expected 3, got 2)` ### Steps to reproduce the problem ``` qc = QuantumCircuit(1) qc.x(0) gate = qc.to_gate() gate.inverse() ``` ### What is the expected behavior? ### Suggested solutions --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `qiskit/converters/circuit_to_gate.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 # This code is part of Qiskit. 4 # 5 # (C) Copyright IBM 2017, 2019. 6 # 7 # This code is licensed under the Apache License, Version 2.0. You may 8 # obtain a copy of this license in the LICENSE.txt file in the root directory 9 # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. 10 # 11 # Any modifications or derivative works of this code must retain this 12 # copyright notice, and modified files need to carry a notice indicating 13 # that they have been altered from the originals. 14 15 """Helper function for converting a circuit to a gate""" 16 17 from qiskit.circuit.gate import Gate 18 from qiskit.circuit.quantumregister import QuantumRegister, Qubit 19 from qiskit.exceptions import QiskitError 20 21 22 def circuit_to_gate(circuit, parameter_map=None): 23 """Build a ``Gate`` object from a ``QuantumCircuit``. 24 25 The gate is anonymous (not tied to a named quantum register), 26 and so can be inserted into another circuit. The gate will 27 have the same string name as the circuit. 28 29 Args: 30 circuit (QuantumCircuit): the input circuit. 31 parameter_map (dict): For parameterized circuits, a mapping from 32 parameters in the circuit to parameters to be used in the gate. 33 If None, existing circuit parameters will also parameterize the 34 Gate. 35 36 Raises: 37 QiskitError: if circuit is non-unitary or if 38 parameter_map is not compatible with circuit 39 40 Return: 41 Gate: a Gate equivalent to the action of the 42 input circuit. Upon decomposition, this gate will 43 yield the components comprising the original circuit. 44 """ 45 for inst, _, _ in circuit.data: 46 if not isinstance(inst, Gate): 47 raise QiskitError('One or more instructions in this instruction ' 48 'cannot be converted to a gate') 49 50 if parameter_map is None: 51 parameter_dict = {p: p for p in circuit.parameters} 52 else: 53 parameter_dict = circuit._unroll_param_dict(parameter_map) 54 55 if parameter_dict.keys() != circuit.parameters: 56 raise QiskitError(('parameter_map should map all circuit parameters. ' 57 'Circuit parameters: {}, parameter_map: {}').format( 58 circuit.parameters, parameter_dict)) 59 60 gate = Gate(name=circuit.name, 61 num_qubits=sum([qreg.size for qreg in circuit.qregs]), 62 params=sorted(parameter_dict.values(), key=lambda p: p.name)) 63 gate.condition = None 64 65 def find_bit_position(bit): 66 """find the index of a given bit (Register, int) within 67 a flat ordered list of bits of the circuit 68 """ 69 if isinstance(bit, Qubit): 70 ordered_regs = circuit.qregs 71 else: 72 ordered_regs = circuit.cregs 73 reg_index = ordered_regs.index(bit.register) 74 return sum([reg.size for reg in ordered_regs[:reg_index]]) + bit.index 75 76 target = circuit.copy() 77 target._substitute_parameters(parameter_dict) 78 79 definition = target.data 80 81 if gate.num_qubits > 0: 82 q = QuantumRegister(gate.num_qubits, 'q') 83 84 definition = list(map(lambda x: 85 (x[0], list(map(lambda y: q[find_bit_position(y)], x[1]))), 86 definition)) 87 gate.definition = definition 88 89 return gate 90 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/qiskit/converters/circuit_to_gate.py b/qiskit/converters/circuit_to_gate.py --- a/qiskit/converters/circuit_to_gate.py +++ b/qiskit/converters/circuit_to_gate.py @@ -81,9 +81,14 @@ if gate.num_qubits > 0: q = QuantumRegister(gate.num_qubits, 'q') - definition = list(map(lambda x: - (x[0], list(map(lambda y: q[find_bit_position(y)], x[1]))), - definition)) + # The 3rd parameter in the output tuple) is hard coded to [] because + # Gate objects do not have cregs set and we've verified that all + # instructions are gates + definition = list(map( + lambda x: (x[0], + list(map(lambda y: q[find_bit_position(y)], x[1])), + []), + definition)) gate.definition = definition return gate
{"golden_diff": "diff --git a/qiskit/converters/circuit_to_gate.py b/qiskit/converters/circuit_to_gate.py\n--- a/qiskit/converters/circuit_to_gate.py\n+++ b/qiskit/converters/circuit_to_gate.py\n@@ -81,9 +81,14 @@\n if gate.num_qubits > 0:\n q = QuantumRegister(gate.num_qubits, 'q')\n \n- definition = list(map(lambda x:\n- (x[0], list(map(lambda y: q[find_bit_position(y)], x[1]))),\n- definition))\n+ # The 3rd parameter in the output tuple) is hard coded to [] because\n+ # Gate objects do not have cregs set and we've verified that all\n+ # instructions are gates\n+ definition = list(map(\n+ lambda x: (x[0],\n+ list(map(lambda y: q[find_bit_position(y)], x[1])),\n+ []),\n+ definition))\n gate.definition = definition\n \n return gate\n", "issue": "Can't invert gate created from QuantumCircuit.to_gate\n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues -->\r\n\r\n### Information\r\n\r\n- **Qiskit Terra version**:\r\n- **Python version**:\r\n- **Operating system**:\r\n\r\n### What is the current behavior?\r\nWhen inverting a gate created from QuantumCircuit.to_gate the following exception is raised:\r\n\r\n`ValueError: not enough values to unpack (expected 3, got 2)`\r\n\r\n\r\n### Steps to reproduce the problem\r\n```\r\nqc = QuantumCircuit(1)\r\nqc.x(0)\r\ngate = qc.to_gate()\r\ngate.inverse()\r\n```\r\n\r\n### What is the expected behavior?\r\n\r\n\r\n\r\n### Suggested solutions\r\n\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Helper function for converting a circuit to a gate\"\"\"\n\nfrom qiskit.circuit.gate import Gate\nfrom qiskit.circuit.quantumregister import QuantumRegister, Qubit\nfrom qiskit.exceptions import QiskitError\n\n\ndef circuit_to_gate(circuit, parameter_map=None):\n \"\"\"Build a ``Gate`` object from a ``QuantumCircuit``.\n\n The gate is anonymous (not tied to a named quantum register),\n and so can be inserted into another circuit. The gate will\n have the same string name as the circuit.\n\n Args:\n circuit (QuantumCircuit): the input circuit.\n parameter_map (dict): For parameterized circuits, a mapping from\n parameters in the circuit to parameters to be used in the gate.\n If None, existing circuit parameters will also parameterize the\n Gate.\n\n Raises:\n QiskitError: if circuit is non-unitary or if\n parameter_map is not compatible with circuit\n\n Return:\n Gate: a Gate equivalent to the action of the\n input circuit. Upon decomposition, this gate will\n yield the components comprising the original circuit.\n \"\"\"\n for inst, _, _ in circuit.data:\n if not isinstance(inst, Gate):\n raise QiskitError('One or more instructions in this instruction '\n 'cannot be converted to a gate')\n\n if parameter_map is None:\n parameter_dict = {p: p for p in circuit.parameters}\n else:\n parameter_dict = circuit._unroll_param_dict(parameter_map)\n\n if parameter_dict.keys() != circuit.parameters:\n raise QiskitError(('parameter_map should map all circuit parameters. '\n 'Circuit parameters: {}, parameter_map: {}').format(\n circuit.parameters, parameter_dict))\n\n gate = Gate(name=circuit.name,\n num_qubits=sum([qreg.size for qreg in circuit.qregs]),\n params=sorted(parameter_dict.values(), key=lambda p: p.name))\n gate.condition = None\n\n def find_bit_position(bit):\n \"\"\"find the index of a given bit (Register, int) within\n a flat ordered list of bits of the circuit\n \"\"\"\n if isinstance(bit, Qubit):\n ordered_regs = circuit.qregs\n else:\n ordered_regs = circuit.cregs\n reg_index = ordered_regs.index(bit.register)\n return sum([reg.size for reg in ordered_regs[:reg_index]]) + bit.index\n\n target = circuit.copy()\n target._substitute_parameters(parameter_dict)\n\n definition = target.data\n\n if gate.num_qubits > 0:\n q = QuantumRegister(gate.num_qubits, 'q')\n\n definition = list(map(lambda x:\n (x[0], list(map(lambda y: q[find_bit_position(y)], x[1]))),\n definition))\n gate.definition = definition\n\n return gate\n", "path": "qiskit/converters/circuit_to_gate.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# This code is part of Qiskit.\n#\n# (C) Copyright IBM 2017, 2019.\n#\n# This code is licensed under the Apache License, Version 2.0. You may\n# obtain a copy of this license in the LICENSE.txt file in the root directory\n# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Any modifications or derivative works of this code must retain this\n# copyright notice, and modified files need to carry a notice indicating\n# that they have been altered from the originals.\n\n\"\"\"Helper function for converting a circuit to a gate\"\"\"\n\nfrom qiskit.circuit.gate import Gate\nfrom qiskit.circuit.quantumregister import QuantumRegister, Qubit\nfrom qiskit.exceptions import QiskitError\n\n\ndef circuit_to_gate(circuit, parameter_map=None):\n \"\"\"Build a ``Gate`` object from a ``QuantumCircuit``.\n\n The gate is anonymous (not tied to a named quantum register),\n and so can be inserted into another circuit. The gate will\n have the same string name as the circuit.\n\n Args:\n circuit (QuantumCircuit): the input circuit.\n parameter_map (dict): For parameterized circuits, a mapping from\n parameters in the circuit to parameters to be used in the gate.\n If None, existing circuit parameters will also parameterize the\n Gate.\n\n Raises:\n QiskitError: if circuit is non-unitary or if\n parameter_map is not compatible with circuit\n\n Return:\n Gate: a Gate equivalent to the action of the\n input circuit. Upon decomposition, this gate will\n yield the components comprising the original circuit.\n \"\"\"\n for inst, _, _ in circuit.data:\n if not isinstance(inst, Gate):\n raise QiskitError('One or more instructions in this instruction '\n 'cannot be converted to a gate')\n\n if parameter_map is None:\n parameter_dict = {p: p for p in circuit.parameters}\n else:\n parameter_dict = circuit._unroll_param_dict(parameter_map)\n\n if parameter_dict.keys() != circuit.parameters:\n raise QiskitError(('parameter_map should map all circuit parameters. '\n 'Circuit parameters: {}, parameter_map: {}').format(\n circuit.parameters, parameter_dict))\n\n gate = Gate(name=circuit.name,\n num_qubits=sum([qreg.size for qreg in circuit.qregs]),\n params=sorted(parameter_dict.values(), key=lambda p: p.name))\n gate.condition = None\n\n def find_bit_position(bit):\n \"\"\"find the index of a given bit (Register, int) within\n a flat ordered list of bits of the circuit\n \"\"\"\n if isinstance(bit, Qubit):\n ordered_regs = circuit.qregs\n else:\n ordered_regs = circuit.cregs\n reg_index = ordered_regs.index(bit.register)\n return sum([reg.size for reg in ordered_regs[:reg_index]]) + bit.index\n\n target = circuit.copy()\n target._substitute_parameters(parameter_dict)\n\n definition = target.data\n\n if gate.num_qubits > 0:\n q = QuantumRegister(gate.num_qubits, 'q')\n\n # The 3rd parameter in the output tuple) is hard coded to [] because\n # Gate objects do not have cregs set and we've verified that all\n # instructions are gates\n definition = list(map(\n lambda x: (x[0],\n list(map(lambda y: q[find_bit_position(y)], x[1])),\n []),\n definition))\n gate.definition = definition\n\n return gate\n", "path": "qiskit/converters/circuit_to_gate.py"}]}
1,340
228
gh_patches_debug_4796
rasdani/github-patches
git_diff
saleor__saleor-2791
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- menuCreateItem input issue Why `menuCreateItem` does not require `menu` argument on the schema level, but returns an error if not given? It doesn't look right. Besides, do we really need to specify `menu` argument, if `parent` is given? It's not like child could be pinned to different menu than its parent. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `saleor/graphql/menu/mutations.py` Content: ``` 1 import graphene 2 from graphql_jwt.decorators import permission_required 3 4 from ...menu import models 5 from ..core.mutations import BaseMutation, ModelDeleteMutation, ModelMutation 6 from ..product.types import Category, Collection 7 from ..page.types import Page 8 from .types import Menu 9 10 11 class MenuItemInput(graphene.InputObjectType): 12 name = graphene.String(description='Name of the menu item.') 13 url = graphene.String(description='URL of the pointed item.') 14 category = graphene.ID( 15 description='Category to which item points.', name='category') 16 collection = graphene.ID( 17 description='Collection to which item points.', name='collection') 18 page = graphene.ID( 19 description='Page to which item points.', name='page') 20 21 22 class MenuItemCreateInput(MenuItemInput): 23 menu = graphene.ID( 24 description='Menu to which item belongs to.', name='menu') 25 parent = graphene.ID( 26 description=''' 27 ID of the parent menu. If empty, menu will be top level 28 menu.''', 29 name='parent') 30 31 32 class MenuInput(graphene.InputObjectType): 33 name = graphene.String(description='Name of the menu.') 34 35 36 class MenuCreateInput(MenuInput): 37 items = graphene.List( 38 MenuItemInput, description='List of menu items.') 39 40 41 class MenuCreate(ModelMutation): 42 class Arguments: 43 input = MenuCreateInput( 44 required=True, 45 description='Fields required to create a menu.') 46 47 class Meta: 48 description = 'Creates a new Menu' 49 model = models.Menu 50 51 @classmethod 52 def user_is_allowed(cls, user, input): 53 return user.has_perm('menu.manage_menus') 54 55 @classmethod 56 def clean_input(cls, info, instance, input, errors): 57 cleaned_input = super().clean_input(info, instance, input, errors) 58 items = [] 59 for item in cleaned_input.get('items', []): 60 category = item.get('category') 61 collection = item.get('collection') 62 page = item.get('page') 63 url = item.get('url') 64 if len([i for i in [category, collection, page, url] if i]) > 1: 65 cls.add_error( 66 errors, 'items', 'More than one item provided.') 67 else: 68 if category: 69 category = cls.get_node_or_error( 70 info, category, errors, 'items', only_type=Category) 71 item['category'] = category 72 elif collection: 73 collection = cls.get_node_or_error( 74 info, collection, errors, 'items', 75 only_type=Collection) 76 item['collection'] = collection 77 elif page: 78 page = cls.get_node_or_error( 79 info, page, errors, 'items', only_type=Page) 80 item['page'] = page 81 elif not url: 82 cls.add_error(errors, 'items', 'No menu item provided.') 83 items.append(item) 84 cleaned_input['items'] = items 85 return cleaned_input 86 87 @classmethod 88 def _save_m2m(cls, info, instance, cleaned_data): 89 super()._save_m2m(info, instance, cleaned_data) 90 items = cleaned_data.get('items', []) 91 for item in items: 92 instance.items.create(**item) 93 94 95 class MenuUpdate(ModelMutation): 96 class Arguments: 97 id = graphene.ID( 98 required=True, description='ID of a menu to update.') 99 input = MenuInput( 100 required=True, 101 description='Fields required to update a menu.') 102 103 class Meta: 104 description = 'Updates a menu.' 105 model = models.Menu 106 107 @classmethod 108 def user_is_allowed(cls, user, input): 109 return user.has_perm('menu.manage_menus') 110 111 112 class MenuDelete(ModelDeleteMutation): 113 class Arguments: 114 id = graphene.ID( 115 required=True, description='ID of a menu to delete.') 116 117 class Meta: 118 description = 'Deletes a menu.' 119 model = models.Menu 120 121 @classmethod 122 def user_is_allowed(cls, user, input): 123 return user.has_perm('menu.manage_menus') 124 125 126 class MenuItemCreate(ModelMutation): 127 class Arguments: 128 input = MenuItemCreateInput( 129 required=True, 130 description="""Fields required to update a menu item. 131 Only one of 'url', 'category', 'page', 'collection' is allowed 132 per item""") 133 134 class Meta: 135 description = 'Creates a new Menu' 136 model = models.MenuItem 137 138 @classmethod 139 def user_is_allowed(cls, user, input): 140 return user.has_perm('menu.manage_menus') 141 142 @classmethod 143 def clean_input(cls, info, instance, input, errors): 144 cleaned_input = super().clean_input(info, instance, input, errors) 145 items = [ 146 cleaned_input.get('page'), cleaned_input.get('collection'), 147 cleaned_input.get('url'), cleaned_input.get('category')] 148 items = [item for item in items if item is not None] 149 if len(items) > 1: 150 cls.add_error( 151 errors=errors, 152 field='items', message='More than one item provided.') 153 return cleaned_input 154 155 156 class MenuItemUpdate(MenuItemCreate): 157 class Arguments: 158 id = graphene.ID( 159 required=True, description='ID of a menu item to update.') 160 input = MenuItemInput( 161 required=True, 162 description="""Fields required to update a menu item. 163 Only one of 'url', 'category', 'page', 'collection' is allowed 164 per item""") 165 166 class Meta: 167 description = 'Updates a menu item.' 168 model = models.MenuItem 169 170 @classmethod 171 def user_is_allowed(cls, user, input): 172 return user.has_perm('menu.manage_menus') 173 174 @classmethod 175 def construct_instance(cls, instance, cleaned_data): 176 # Only one item can be assigned per menu item 177 instance.page = None 178 instance.collection = None 179 instance.category = None 180 instance.url = None 181 return super().construct_instance(instance, cleaned_data) 182 183 184 class MenuItemDelete(ModelDeleteMutation): 185 class Arguments: 186 id = graphene.ID( 187 required=True, description='ID of a menu item to delete.') 188 189 class Meta: 190 description = 'Deletes a menu item.' 191 model = models.MenuItem 192 193 @classmethod 194 def user_is_allowed(cls, user, input): 195 return user.has_perm('menu.manage_menus') 196 197 198 class NavigationType(graphene.Enum): 199 MAIN = 'main' 200 SECONDARY = 'secondary' 201 202 @property 203 def description(self): 204 if self == NavigationType.MAIN: 205 return 'Main storefront\'s navigation.' 206 return 'Secondary storefront\'s navigation.' 207 208 209 class AssignNavigation(BaseMutation): 210 menu = graphene.Field(Menu, description='Assigned navigation menu.') 211 212 class Arguments: 213 menu = graphene.ID( 214 description='ID of the menu.') 215 navigation_type = NavigationType( 216 description='Type of the navigation bar to assign the menu to.', 217 required=True) 218 219 class Meta: 220 description = 'Assigns storefront\'s navigation menus.' 221 222 @classmethod 223 @permission_required(['menu.manage_menus', 'site.manage_settings']) 224 def mutate(cls, root, info, navigation_type, menu=None): 225 errors = [] 226 site_settings = info.context.site.settings 227 if menu is not None: 228 menu = cls.get_node_or_error( 229 info, menu, errors=errors, field='menu') 230 if not errors: 231 if navigation_type == NavigationType.MAIN: 232 site_settings.top_menu = menu 233 site_settings.save(update_fields=['top_menu']) 234 elif navigation_type == NavigationType.SECONDARY: 235 site_settings.bottom_menu = menu 236 site_settings.save(update_fields=['bottom_menu']) 237 else: 238 raise AssertionError( 239 'Unknown navigation type: %s' % navigation_type) 240 return AssignNavigation(menu=menu, errors=errors) 241 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/saleor/graphql/menu/mutations.py b/saleor/graphql/menu/mutations.py --- a/saleor/graphql/menu/mutations.py +++ b/saleor/graphql/menu/mutations.py @@ -21,7 +21,8 @@ class MenuItemCreateInput(MenuItemInput): menu = graphene.ID( - description='Menu to which item belongs to.', name='menu') + description='Menu to which item belongs to.', name='menu', + required=True) parent = graphene.ID( description=''' ID of the parent menu. If empty, menu will be top level
{"golden_diff": "diff --git a/saleor/graphql/menu/mutations.py b/saleor/graphql/menu/mutations.py\n--- a/saleor/graphql/menu/mutations.py\n+++ b/saleor/graphql/menu/mutations.py\n@@ -21,7 +21,8 @@\n \n class MenuItemCreateInput(MenuItemInput):\n menu = graphene.ID(\n- description='Menu to which item belongs to.', name='menu')\n+ description='Menu to which item belongs to.', name='menu',\n+ required=True)\n parent = graphene.ID(\n description='''\n ID of the parent menu. If empty, menu will be top level\n", "issue": "menuCreateItem input issue\nWhy `menuCreateItem` does not require `menu` argument on the schema level, but returns an error if not given? It doesn't look right. \r\nBesides, do we really need to specify `menu` argument, if `parent` is given? It's not like child could be pinned to different menu than its parent.\n", "before_files": [{"content": "import graphene\nfrom graphql_jwt.decorators import permission_required\n\nfrom ...menu import models\nfrom ..core.mutations import BaseMutation, ModelDeleteMutation, ModelMutation\nfrom ..product.types import Category, Collection\nfrom ..page.types import Page\nfrom .types import Menu\n\n\nclass MenuItemInput(graphene.InputObjectType):\n name = graphene.String(description='Name of the menu item.')\n url = graphene.String(description='URL of the pointed item.')\n category = graphene.ID(\n description='Category to which item points.', name='category')\n collection = graphene.ID(\n description='Collection to which item points.', name='collection')\n page = graphene.ID(\n description='Page to which item points.', name='page')\n\n\nclass MenuItemCreateInput(MenuItemInput):\n menu = graphene.ID(\n description='Menu to which item belongs to.', name='menu')\n parent = graphene.ID(\n description='''\n ID of the parent menu. If empty, menu will be top level\n menu.''',\n name='parent')\n\n\nclass MenuInput(graphene.InputObjectType):\n name = graphene.String(description='Name of the menu.')\n\n\nclass MenuCreateInput(MenuInput):\n items = graphene.List(\n MenuItemInput, description='List of menu items.')\n\n\nclass MenuCreate(ModelMutation):\n class Arguments:\n input = MenuCreateInput(\n required=True,\n description='Fields required to create a menu.')\n\n class Meta:\n description = 'Creates a new Menu'\n model = models.Menu\n\n @classmethod\n def user_is_allowed(cls, user, input):\n return user.has_perm('menu.manage_menus')\n\n @classmethod\n def clean_input(cls, info, instance, input, errors):\n cleaned_input = super().clean_input(info, instance, input, errors)\n items = []\n for item in cleaned_input.get('items', []):\n category = item.get('category')\n collection = item.get('collection')\n page = item.get('page')\n url = item.get('url')\n if len([i for i in [category, collection, page, url] if i]) > 1:\n cls.add_error(\n errors, 'items', 'More than one item provided.')\n else:\n if category:\n category = cls.get_node_or_error(\n info, category, errors, 'items', only_type=Category)\n item['category'] = category\n elif collection:\n collection = cls.get_node_or_error(\n info, collection, errors, 'items',\n only_type=Collection)\n item['collection'] = collection\n elif page:\n page = cls.get_node_or_error(\n info, page, errors, 'items', only_type=Page)\n item['page'] = page\n elif not url:\n cls.add_error(errors, 'items', 'No menu item provided.')\n items.append(item)\n cleaned_input['items'] = items\n return cleaned_input\n\n @classmethod\n def _save_m2m(cls, info, instance, cleaned_data):\n super()._save_m2m(info, instance, cleaned_data)\n items = cleaned_data.get('items', [])\n for item in items:\n instance.items.create(**item)\n\n\nclass MenuUpdate(ModelMutation):\n class Arguments:\n id = graphene.ID(\n required=True, description='ID of a menu to update.')\n input = MenuInput(\n required=True,\n description='Fields required to update a menu.')\n\n class Meta:\n description = 'Updates a menu.'\n model = models.Menu\n\n @classmethod\n def user_is_allowed(cls, user, input):\n return user.has_perm('menu.manage_menus')\n\n\nclass MenuDelete(ModelDeleteMutation):\n class Arguments:\n id = graphene.ID(\n required=True, description='ID of a menu to delete.')\n\n class Meta:\n description = 'Deletes a menu.'\n model = models.Menu\n\n @classmethod\n def user_is_allowed(cls, user, input):\n return user.has_perm('menu.manage_menus')\n\n\nclass MenuItemCreate(ModelMutation):\n class Arguments:\n input = MenuItemCreateInput(\n required=True,\n description=\"\"\"Fields required to update a menu item.\n Only one of 'url', 'category', 'page', 'collection' is allowed\n per item\"\"\")\n\n class Meta:\n description = 'Creates a new Menu'\n model = models.MenuItem\n\n @classmethod\n def user_is_allowed(cls, user, input):\n return user.has_perm('menu.manage_menus')\n\n @classmethod\n def clean_input(cls, info, instance, input, errors):\n cleaned_input = super().clean_input(info, instance, input, errors)\n items = [\n cleaned_input.get('page'), cleaned_input.get('collection'),\n cleaned_input.get('url'), cleaned_input.get('category')]\n items = [item for item in items if item is not None]\n if len(items) > 1:\n cls.add_error(\n errors=errors,\n field='items', message='More than one item provided.')\n return cleaned_input\n\n\nclass MenuItemUpdate(MenuItemCreate):\n class Arguments:\n id = graphene.ID(\n required=True, description='ID of a menu item to update.')\n input = MenuItemInput(\n required=True,\n description=\"\"\"Fields required to update a menu item.\n Only one of 'url', 'category', 'page', 'collection' is allowed\n per item\"\"\")\n\n class Meta:\n description = 'Updates a menu item.'\n model = models.MenuItem\n\n @classmethod\n def user_is_allowed(cls, user, input):\n return user.has_perm('menu.manage_menus')\n\n @classmethod\n def construct_instance(cls, instance, cleaned_data):\n # Only one item can be assigned per menu item\n instance.page = None\n instance.collection = None\n instance.category = None\n instance.url = None\n return super().construct_instance(instance, cleaned_data)\n\n\nclass MenuItemDelete(ModelDeleteMutation):\n class Arguments:\n id = graphene.ID(\n required=True, description='ID of a menu item to delete.')\n\n class Meta:\n description = 'Deletes a menu item.'\n model = models.MenuItem\n\n @classmethod\n def user_is_allowed(cls, user, input):\n return user.has_perm('menu.manage_menus')\n\n\nclass NavigationType(graphene.Enum):\n MAIN = 'main'\n SECONDARY = 'secondary'\n\n @property\n def description(self):\n if self == NavigationType.MAIN:\n return 'Main storefront\\'s navigation.'\n return 'Secondary storefront\\'s navigation.'\n\n\nclass AssignNavigation(BaseMutation):\n menu = graphene.Field(Menu, description='Assigned navigation menu.')\n\n class Arguments:\n menu = graphene.ID(\n description='ID of the menu.')\n navigation_type = NavigationType(\n description='Type of the navigation bar to assign the menu to.',\n required=True)\n\n class Meta:\n description = 'Assigns storefront\\'s navigation menus.'\n\n @classmethod\n @permission_required(['menu.manage_menus', 'site.manage_settings'])\n def mutate(cls, root, info, navigation_type, menu=None):\n errors = []\n site_settings = info.context.site.settings\n if menu is not None:\n menu = cls.get_node_or_error(\n info, menu, errors=errors, field='menu')\n if not errors:\n if navigation_type == NavigationType.MAIN:\n site_settings.top_menu = menu\n site_settings.save(update_fields=['top_menu'])\n elif navigation_type == NavigationType.SECONDARY:\n site_settings.bottom_menu = menu\n site_settings.save(update_fields=['bottom_menu'])\n else:\n raise AssertionError(\n 'Unknown navigation type: %s' % navigation_type)\n return AssignNavigation(menu=menu, errors=errors)\n", "path": "saleor/graphql/menu/mutations.py"}], "after_files": [{"content": "import graphene\nfrom graphql_jwt.decorators import permission_required\n\nfrom ...menu import models\nfrom ..core.mutations import BaseMutation, ModelDeleteMutation, ModelMutation\nfrom ..product.types import Category, Collection\nfrom ..page.types import Page\nfrom .types import Menu\n\n\nclass MenuItemInput(graphene.InputObjectType):\n name = graphene.String(description='Name of the menu item.')\n url = graphene.String(description='URL of the pointed item.')\n category = graphene.ID(\n description='Category to which item points.', name='category')\n collection = graphene.ID(\n description='Collection to which item points.', name='collection')\n page = graphene.ID(\n description='Page to which item points.', name='page')\n\n\nclass MenuItemCreateInput(MenuItemInput):\n menu = graphene.ID(\n description='Menu to which item belongs to.', name='menu',\n required=True)\n parent = graphene.ID(\n description='''\n ID of the parent menu. If empty, menu will be top level\n menu.''',\n name='parent')\n\n\nclass MenuInput(graphene.InputObjectType):\n name = graphene.String(description='Name of the menu.')\n\n\nclass MenuCreateInput(MenuInput):\n items = graphene.List(\n MenuItemInput, description='List of menu items.')\n\n\nclass MenuCreate(ModelMutation):\n class Arguments:\n input = MenuCreateInput(\n required=True,\n description='Fields required to create a menu.')\n\n class Meta:\n description = 'Creates a new Menu'\n model = models.Menu\n\n @classmethod\n def user_is_allowed(cls, user, input):\n return user.has_perm('menu.manage_menus')\n\n @classmethod\n def clean_input(cls, info, instance, input, errors):\n cleaned_input = super().clean_input(info, instance, input, errors)\n items = []\n for item in cleaned_input.get('items', []):\n category = item.get('category')\n collection = item.get('collection')\n page = item.get('page')\n url = item.get('url')\n if len([i for i in [category, collection, page, url] if i]) > 1:\n cls.add_error(\n errors, 'items', 'More than one item provided.')\n else:\n if category:\n category = cls.get_node_or_error(\n info, category, errors, 'items', only_type=Category)\n item['category'] = category\n elif collection:\n collection = cls.get_node_or_error(\n info, collection, errors, 'items',\n only_type=Collection)\n item['collection'] = collection\n elif page:\n page = cls.get_node_or_error(\n info, page, errors, 'items', only_type=Page)\n item['page'] = page\n elif not url:\n cls.add_error(errors, 'items', 'No menu item provided.')\n items.append(item)\n cleaned_input['items'] = items\n return cleaned_input\n\n @classmethod\n def _save_m2m(cls, info, instance, cleaned_data):\n super()._save_m2m(info, instance, cleaned_data)\n items = cleaned_data.get('items', [])\n for item in items:\n instance.items.create(**item)\n\n\nclass MenuUpdate(ModelMutation):\n class Arguments:\n id = graphene.ID(\n required=True, description='ID of a menu to update.')\n input = MenuInput(\n required=True,\n description='Fields required to update a menu.')\n\n class Meta:\n description = 'Updates a menu.'\n model = models.Menu\n\n @classmethod\n def user_is_allowed(cls, user, input):\n return user.has_perm('menu.manage_menus')\n\n\nclass MenuDelete(ModelDeleteMutation):\n class Arguments:\n id = graphene.ID(\n required=True, description='ID of a menu to delete.')\n\n class Meta:\n description = 'Deletes a menu.'\n model = models.Menu\n\n @classmethod\n def user_is_allowed(cls, user, input):\n return user.has_perm('menu.manage_menus')\n\n\nclass MenuItemCreate(ModelMutation):\n class Arguments:\n input = MenuItemCreateInput(\n required=True,\n description=\"\"\"Fields required to update a menu item.\n Only one of 'url', 'category', 'page', 'collection' is allowed\n per item\"\"\")\n\n class Meta:\n description = 'Creates a new Menu'\n model = models.MenuItem\n\n @classmethod\n def user_is_allowed(cls, user, input):\n return user.has_perm('menu.manage_menus')\n\n @classmethod\n def clean_input(cls, info, instance, input, errors):\n cleaned_input = super().clean_input(info, instance, input, errors)\n items = [\n cleaned_input.get('page'), cleaned_input.get('collection'),\n cleaned_input.get('url'), cleaned_input.get('category')]\n items = [item for item in items if item is not None]\n if len(items) > 1:\n cls.add_error(\n errors=errors,\n field='items', message='More than one item provided.')\n return cleaned_input\n\n\nclass MenuItemUpdate(MenuItemCreate):\n class Arguments:\n id = graphene.ID(\n required=True, description='ID of a menu item to update.')\n input = MenuItemInput(\n required=True,\n description=\"\"\"Fields required to update a menu item.\n Only one of 'url', 'category', 'page', 'collection' is allowed\n per item\"\"\")\n\n class Meta:\n description = 'Updates a menu item.'\n model = models.MenuItem\n\n @classmethod\n def user_is_allowed(cls, user, input):\n return user.has_perm('menu.manage_menus')\n\n @classmethod\n def construct_instance(cls, instance, cleaned_data):\n # Only one item can be assigned per menu item\n instance.page = None\n instance.collection = None\n instance.category = None\n instance.url = None\n return super().construct_instance(instance, cleaned_data)\n\n\nclass MenuItemDelete(ModelDeleteMutation):\n class Arguments:\n id = graphene.ID(\n required=True, description='ID of a menu item to delete.')\n\n class Meta:\n description = 'Deletes a menu item.'\n model = models.MenuItem\n\n @classmethod\n def user_is_allowed(cls, user, input):\n return user.has_perm('menu.manage_menus')\n\n\nclass NavigationType(graphene.Enum):\n MAIN = 'main'\n SECONDARY = 'secondary'\n\n @property\n def description(self):\n if self == NavigationType.MAIN:\n return 'Main storefront\\'s navigation.'\n return 'Secondary storefront\\'s navigation.'\n\n\nclass AssignNavigation(BaseMutation):\n menu = graphene.Field(Menu, description='Assigned navigation menu.')\n\n class Arguments:\n menu = graphene.ID(\n description='ID of the menu.')\n navigation_type = NavigationType(\n description='Type of the navigation bar to assign the menu to.',\n required=True)\n\n class Meta:\n description = 'Assigns storefront\\'s navigation menus.'\n\n @classmethod\n @permission_required(['menu.manage_menus', 'site.manage_settings'])\n def mutate(cls, root, info, navigation_type, menu=None):\n errors = []\n site_settings = info.context.site.settings\n if menu is not None:\n menu = cls.get_node_or_error(\n info, menu, errors=errors, field='menu')\n if not errors:\n if navigation_type == NavigationType.MAIN:\n site_settings.top_menu = menu\n site_settings.save(update_fields=['top_menu'])\n elif navigation_type == NavigationType.SECONDARY:\n site_settings.bottom_menu = menu\n site_settings.save(update_fields=['bottom_menu'])\n else:\n raise AssertionError(\n 'Unknown navigation type: %s' % navigation_type)\n return AssignNavigation(menu=menu, errors=errors)\n", "path": "saleor/graphql/menu/mutations.py"}]}
2,590
131
gh_patches_debug_33408
rasdani/github-patches
git_diff
pre-commit__pre-commit-1107
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `pre-commit init-templatedir` should expanduser before checking value to warn on https://github.com/pre-commit/pre-commit/issues/1084#issuecomment-513744333 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pre_commit/commands/init_templatedir.py` Content: ``` 1 import logging 2 import os.path 3 4 from pre_commit.commands.install_uninstall import install 5 from pre_commit.util import cmd_output 6 7 logger = logging.getLogger('pre_commit') 8 9 10 def init_templatedir(config_file, store, directory, hook_type): 11 install( 12 config_file, store, overwrite=True, hook_type=hook_type, 13 skip_on_missing_config=True, git_dir=directory, 14 ) 15 _, out, _ = cmd_output('git', 'config', 'init.templateDir', retcode=None) 16 dest = os.path.realpath(directory) 17 if os.path.realpath(out.strip()) != dest: 18 logger.warning('`init.templateDir` not set to the target directory') 19 logger.warning( 20 'maybe `git config --global init.templateDir {}`?'.format(dest), 21 ) 22 ``` Path: `pre_commit/main.py` Content: ``` 1 from __future__ import unicode_literals 2 3 import argparse 4 import logging 5 import os 6 import sys 7 8 import pre_commit.constants as C 9 from pre_commit import color 10 from pre_commit import five 11 from pre_commit import git 12 from pre_commit.commands.autoupdate import autoupdate 13 from pre_commit.commands.clean import clean 14 from pre_commit.commands.gc import gc 15 from pre_commit.commands.init_templatedir import init_templatedir 16 from pre_commit.commands.install_uninstall import install 17 from pre_commit.commands.install_uninstall import install_hooks 18 from pre_commit.commands.install_uninstall import uninstall 19 from pre_commit.commands.migrate_config import migrate_config 20 from pre_commit.commands.run import run 21 from pre_commit.commands.sample_config import sample_config 22 from pre_commit.commands.try_repo import try_repo 23 from pre_commit.error_handler import error_handler 24 from pre_commit.error_handler import FatalError 25 from pre_commit.logging_handler import logging_handler 26 from pre_commit.store import Store 27 from pre_commit.util import CalledProcessError 28 29 30 logger = logging.getLogger('pre_commit') 31 32 # https://github.com/pre-commit/pre-commit/issues/217 33 # On OSX, making a virtualenv using pyvenv at . causes `virtualenv` and `pip` 34 # to install packages to the wrong place. We don't want anything to deal with 35 # pyvenv 36 os.environ.pop('__PYVENV_LAUNCHER__', None) 37 38 39 def _add_color_option(parser): 40 parser.add_argument( 41 '--color', default=os.environ.get('PRE_COMMIT_COLOR', 'auto'), 42 type=color.use_color, 43 metavar='{' + ','.join(color.COLOR_CHOICES) + '}', 44 help='Whether to use color in output. Defaults to `%(default)s`.', 45 ) 46 47 48 def _add_config_option(parser): 49 parser.add_argument( 50 '-c', '--config', default=C.CONFIG_FILE, 51 help='Path to alternate config file', 52 ) 53 54 55 def _add_hook_type_option(parser): 56 parser.add_argument( 57 '-t', '--hook-type', choices=( 58 'pre-commit', 'pre-push', 'prepare-commit-msg', 'commit-msg', 59 ), 60 default='pre-commit', 61 ) 62 63 64 def _add_run_options(parser): 65 parser.add_argument('hook', nargs='?', help='A single hook-id to run') 66 parser.add_argument('--verbose', '-v', action='store_true', default=False) 67 parser.add_argument( 68 '--origin', '-o', 69 help="The origin branch's commit_id when using `git push`.", 70 ) 71 parser.add_argument( 72 '--source', '-s', 73 help="The remote branch's commit_id when using `git push`.", 74 ) 75 parser.add_argument( 76 '--commit-msg-filename', 77 help='Filename to check when running during `commit-msg`', 78 ) 79 parser.add_argument( 80 '--hook-stage', choices=C.STAGES, default='commit', 81 help='The stage during which the hook is fired. One of %(choices)s', 82 ) 83 parser.add_argument( 84 '--show-diff-on-failure', action='store_true', 85 help='When hooks fail, run `git diff` directly afterward.', 86 ) 87 mutex_group = parser.add_mutually_exclusive_group(required=False) 88 mutex_group.add_argument( 89 '--all-files', '-a', action='store_true', default=False, 90 help='Run on all the files in the repo.', 91 ) 92 mutex_group.add_argument( 93 '--files', nargs='*', default=[], 94 help='Specific filenames to run hooks on.', 95 ) 96 97 98 def _adjust_args_and_chdir(args): 99 # `--config` was specified relative to the non-root working directory 100 if os.path.exists(args.config): 101 args.config = os.path.abspath(args.config) 102 if args.command in {'run', 'try-repo'}: 103 args.files = [os.path.abspath(filename) for filename in args.files] 104 if args.command == 'try-repo' and os.path.exists(args.repo): 105 args.repo = os.path.abspath(args.repo) 106 107 try: 108 os.chdir(git.get_root()) 109 except CalledProcessError: 110 raise FatalError( 111 'git failed. Is it installed, and are you in a Git repository ' 112 'directory?', 113 ) 114 115 args.config = os.path.relpath(args.config) 116 if args.command in {'run', 'try-repo'}: 117 args.files = [os.path.relpath(filename) for filename in args.files] 118 if args.command == 'try-repo' and os.path.exists(args.repo): 119 args.repo = os.path.relpath(args.repo) 120 121 122 def main(argv=None): 123 argv = argv if argv is not None else sys.argv[1:] 124 argv = [five.to_text(arg) for arg in argv] 125 parser = argparse.ArgumentParser() 126 127 # https://stackoverflow.com/a/8521644/812183 128 parser.add_argument( 129 '-V', '--version', 130 action='version', 131 version='%(prog)s {}'.format(C.VERSION), 132 ) 133 134 subparsers = parser.add_subparsers(dest='command') 135 136 autoupdate_parser = subparsers.add_parser( 137 'autoupdate', 138 help="Auto-update pre-commit config to the latest repos' versions.", 139 ) 140 _add_color_option(autoupdate_parser) 141 _add_config_option(autoupdate_parser) 142 autoupdate_parser.add_argument( 143 '--tags-only', action='store_true', help='LEGACY: for compatibility', 144 ) 145 autoupdate_parser.add_argument( 146 '--bleeding-edge', action='store_true', 147 help=( 148 'Update to the bleeding edge of `master` instead of the latest ' 149 'tagged version (the default behavior).' 150 ), 151 ) 152 autoupdate_parser.add_argument( 153 '--repo', dest='repos', action='append', metavar='REPO', 154 help='Only update this repository -- may be specified multiple times.', 155 ) 156 157 clean_parser = subparsers.add_parser( 158 'clean', help='Clean out pre-commit files.', 159 ) 160 _add_color_option(clean_parser) 161 _add_config_option(clean_parser) 162 163 gc_parser = subparsers.add_parser('gc', help='Clean unused cached repos.') 164 _add_color_option(gc_parser) 165 _add_config_option(gc_parser) 166 167 init_templatedir_parser = subparsers.add_parser( 168 'init-templatedir', 169 help=( 170 'Install hook script in a directory intended for use with ' 171 '`git config init.templateDir`.' 172 ), 173 ) 174 _add_color_option(init_templatedir_parser) 175 _add_config_option(init_templatedir_parser) 176 init_templatedir_parser.add_argument( 177 'directory', help='The directory in which to write the hook script.', 178 ) 179 _add_hook_type_option(init_templatedir_parser) 180 181 install_parser = subparsers.add_parser( 182 'install', help='Install the pre-commit script.', 183 ) 184 _add_color_option(install_parser) 185 _add_config_option(install_parser) 186 install_parser.add_argument( 187 '-f', '--overwrite', action='store_true', 188 help='Overwrite existing hooks / remove migration mode.', 189 ) 190 install_parser.add_argument( 191 '--install-hooks', action='store_true', 192 help=( 193 'Whether to install hook environments for all environments ' 194 'in the config file.' 195 ), 196 ) 197 _add_hook_type_option(install_parser) 198 install_parser.add_argument( 199 '--allow-missing-config', action='store_true', default=False, 200 help=( 201 'Whether to allow a missing `pre-commit` configuration file ' 202 'or exit with a failure code.' 203 ), 204 ) 205 206 install_hooks_parser = subparsers.add_parser( 207 'install-hooks', 208 help=( 209 'Install hook environments for all environments in the config ' 210 'file. You may find `pre-commit install --install-hooks` more ' 211 'useful.' 212 ), 213 ) 214 _add_color_option(install_hooks_parser) 215 _add_config_option(install_hooks_parser) 216 217 migrate_config_parser = subparsers.add_parser( 218 'migrate-config', 219 help='Migrate list configuration to new map configuration.', 220 ) 221 _add_color_option(migrate_config_parser) 222 _add_config_option(migrate_config_parser) 223 224 run_parser = subparsers.add_parser('run', help='Run hooks.') 225 _add_color_option(run_parser) 226 _add_config_option(run_parser) 227 _add_run_options(run_parser) 228 229 sample_config_parser = subparsers.add_parser( 230 'sample-config', help='Produce a sample {} file'.format(C.CONFIG_FILE), 231 ) 232 _add_color_option(sample_config_parser) 233 _add_config_option(sample_config_parser) 234 235 try_repo_parser = subparsers.add_parser( 236 'try-repo', 237 help='Try the hooks in a repository, useful for developing new hooks.', 238 ) 239 _add_color_option(try_repo_parser) 240 _add_config_option(try_repo_parser) 241 try_repo_parser.add_argument( 242 'repo', help='Repository to source hooks from.', 243 ) 244 try_repo_parser.add_argument( 245 '--ref', '--rev', 246 help=( 247 'Manually select a rev to run against, otherwise the `HEAD` ' 248 'revision will be used.' 249 ), 250 ) 251 _add_run_options(try_repo_parser) 252 253 uninstall_parser = subparsers.add_parser( 254 'uninstall', help='Uninstall the pre-commit script.', 255 ) 256 _add_color_option(uninstall_parser) 257 _add_config_option(uninstall_parser) 258 _add_hook_type_option(uninstall_parser) 259 260 help = subparsers.add_parser( 261 'help', help='Show help for a specific command.', 262 ) 263 help.add_argument('help_cmd', nargs='?', help='Command to show help for.') 264 265 # argparse doesn't really provide a way to use a `default` subparser 266 if len(argv) == 0: 267 argv = ['run'] 268 args = parser.parse_args(argv) 269 270 if args.command == 'help' and args.help_cmd: 271 parser.parse_args([args.help_cmd, '--help']) 272 elif args.command == 'help': 273 parser.parse_args(['--help']) 274 275 with error_handler(), logging_handler(args.color): 276 if args.command not in {'clean', 'gc', 'sample-config'}: 277 _adjust_args_and_chdir(args) 278 279 git.check_for_cygwin_mismatch() 280 281 store = Store() 282 store.mark_config_used(args.config) 283 284 if args.command == 'autoupdate': 285 if args.tags_only: 286 logger.warning('--tags-only is the default') 287 return autoupdate( 288 args.config, store, 289 tags_only=not args.bleeding_edge, 290 repos=args.repos, 291 ) 292 elif args.command == 'clean': 293 return clean(store) 294 elif args.command == 'gc': 295 return gc(store) 296 elif args.command == 'install': 297 return install( 298 args.config, store, 299 overwrite=args.overwrite, hooks=args.install_hooks, 300 hook_type=args.hook_type, 301 skip_on_missing_config=args.allow_missing_config, 302 ) 303 elif args.command == 'init-templatedir': 304 return init_templatedir( 305 args.config, store, 306 args.directory, hook_type=args.hook_type, 307 ) 308 elif args.command == 'install-hooks': 309 return install_hooks(args.config, store) 310 elif args.command == 'migrate-config': 311 return migrate_config(args.config) 312 elif args.command == 'run': 313 return run(args.config, store, args) 314 elif args.command == 'sample-config': 315 return sample_config() 316 elif args.command == 'try-repo': 317 return try_repo(args) 318 elif args.command == 'uninstall': 319 return uninstall(hook_type=args.hook_type) 320 else: 321 raise NotImplementedError( 322 'Command {} not implemented.'.format(args.command), 323 ) 324 325 raise AssertionError( 326 'Command {} failed to exit with a returncode'.format(args.command), 327 ) 328 329 330 if __name__ == '__main__': 331 exit(main()) 332 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pre_commit/commands/init_templatedir.py b/pre_commit/commands/init_templatedir.py --- a/pre_commit/commands/init_templatedir.py +++ b/pre_commit/commands/init_templatedir.py @@ -2,6 +2,7 @@ import os.path from pre_commit.commands.install_uninstall import install +from pre_commit.util import CalledProcessError from pre_commit.util import cmd_output logger = logging.getLogger('pre_commit') @@ -12,9 +13,14 @@ config_file, store, overwrite=True, hook_type=hook_type, skip_on_missing_config=True, git_dir=directory, ) - _, out, _ = cmd_output('git', 'config', 'init.templateDir', retcode=None) + try: + _, out, _ = cmd_output('git', 'config', 'init.templateDir') + except CalledProcessError: + configured_path = None + else: + configured_path = os.path.realpath(os.path.expanduser(out.strip())) dest = os.path.realpath(directory) - if os.path.realpath(out.strip()) != dest: + if configured_path != dest: logger.warning('`init.templateDir` not set to the target directory') logger.warning( 'maybe `git config --global init.templateDir {}`?'.format(dest), diff --git a/pre_commit/main.py b/pre_commit/main.py --- a/pre_commit/main.py +++ b/pre_commit/main.py @@ -36,6 +36,9 @@ os.environ.pop('__PYVENV_LAUNCHER__', None) +COMMANDS_NO_GIT = {'clean', 'gc', 'init-templatedir', 'sample-config'} + + def _add_color_option(parser): parser.add_argument( '--color', default=os.environ.get('PRE_COMMIT_COLOR', 'auto'), @@ -273,7 +276,7 @@ parser.parse_args(['--help']) with error_handler(), logging_handler(args.color): - if args.command not in {'clean', 'gc', 'sample-config'}: + if args.command not in COMMANDS_NO_GIT: _adjust_args_and_chdir(args) git.check_for_cygwin_mismatch()
{"golden_diff": "diff --git a/pre_commit/commands/init_templatedir.py b/pre_commit/commands/init_templatedir.py\n--- a/pre_commit/commands/init_templatedir.py\n+++ b/pre_commit/commands/init_templatedir.py\n@@ -2,6 +2,7 @@\n import os.path\n \n from pre_commit.commands.install_uninstall import install\n+from pre_commit.util import CalledProcessError\n from pre_commit.util import cmd_output\n \n logger = logging.getLogger('pre_commit')\n@@ -12,9 +13,14 @@\n config_file, store, overwrite=True, hook_type=hook_type,\n skip_on_missing_config=True, git_dir=directory,\n )\n- _, out, _ = cmd_output('git', 'config', 'init.templateDir', retcode=None)\n+ try:\n+ _, out, _ = cmd_output('git', 'config', 'init.templateDir')\n+ except CalledProcessError:\n+ configured_path = None\n+ else:\n+ configured_path = os.path.realpath(os.path.expanduser(out.strip()))\n dest = os.path.realpath(directory)\n- if os.path.realpath(out.strip()) != dest:\n+ if configured_path != dest:\n logger.warning('`init.templateDir` not set to the target directory')\n logger.warning(\n 'maybe `git config --global init.templateDir {}`?'.format(dest),\ndiff --git a/pre_commit/main.py b/pre_commit/main.py\n--- a/pre_commit/main.py\n+++ b/pre_commit/main.py\n@@ -36,6 +36,9 @@\n os.environ.pop('__PYVENV_LAUNCHER__', None)\n \n \n+COMMANDS_NO_GIT = {'clean', 'gc', 'init-templatedir', 'sample-config'}\n+\n+\n def _add_color_option(parser):\n parser.add_argument(\n '--color', default=os.environ.get('PRE_COMMIT_COLOR', 'auto'),\n@@ -273,7 +276,7 @@\n parser.parse_args(['--help'])\n \n with error_handler(), logging_handler(args.color):\n- if args.command not in {'clean', 'gc', 'sample-config'}:\n+ if args.command not in COMMANDS_NO_GIT:\n _adjust_args_and_chdir(args)\n \n git.check_for_cygwin_mismatch()\n", "issue": "`pre-commit init-templatedir` should expanduser before checking value to warn on\nhttps://github.com/pre-commit/pre-commit/issues/1084#issuecomment-513744333\n", "before_files": [{"content": "import logging\nimport os.path\n\nfrom pre_commit.commands.install_uninstall import install\nfrom pre_commit.util import cmd_output\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef init_templatedir(config_file, store, directory, hook_type):\n install(\n config_file, store, overwrite=True, hook_type=hook_type,\n skip_on_missing_config=True, git_dir=directory,\n )\n _, out, _ = cmd_output('git', 'config', 'init.templateDir', retcode=None)\n dest = os.path.realpath(directory)\n if os.path.realpath(out.strip()) != dest:\n logger.warning('`init.templateDir` not set to the target directory')\n logger.warning(\n 'maybe `git config --global init.templateDir {}`?'.format(dest),\n )\n", "path": "pre_commit/commands/init_templatedir.py"}, {"content": "from __future__ import unicode_literals\n\nimport argparse\nimport logging\nimport os\nimport sys\n\nimport pre_commit.constants as C\nfrom pre_commit import color\nfrom pre_commit import five\nfrom pre_commit import git\nfrom pre_commit.commands.autoupdate import autoupdate\nfrom pre_commit.commands.clean import clean\nfrom pre_commit.commands.gc import gc\nfrom pre_commit.commands.init_templatedir import init_templatedir\nfrom pre_commit.commands.install_uninstall import install\nfrom pre_commit.commands.install_uninstall import install_hooks\nfrom pre_commit.commands.install_uninstall import uninstall\nfrom pre_commit.commands.migrate_config import migrate_config\nfrom pre_commit.commands.run import run\nfrom pre_commit.commands.sample_config import sample_config\nfrom pre_commit.commands.try_repo import try_repo\nfrom pre_commit.error_handler import error_handler\nfrom pre_commit.error_handler import FatalError\nfrom pre_commit.logging_handler import logging_handler\nfrom pre_commit.store import Store\nfrom pre_commit.util import CalledProcessError\n\n\nlogger = logging.getLogger('pre_commit')\n\n# https://github.com/pre-commit/pre-commit/issues/217\n# On OSX, making a virtualenv using pyvenv at . causes `virtualenv` and `pip`\n# to install packages to the wrong place. We don't want anything to deal with\n# pyvenv\nos.environ.pop('__PYVENV_LAUNCHER__', None)\n\n\ndef _add_color_option(parser):\n parser.add_argument(\n '--color', default=os.environ.get('PRE_COMMIT_COLOR', 'auto'),\n type=color.use_color,\n metavar='{' + ','.join(color.COLOR_CHOICES) + '}',\n help='Whether to use color in output. Defaults to `%(default)s`.',\n )\n\n\ndef _add_config_option(parser):\n parser.add_argument(\n '-c', '--config', default=C.CONFIG_FILE,\n help='Path to alternate config file',\n )\n\n\ndef _add_hook_type_option(parser):\n parser.add_argument(\n '-t', '--hook-type', choices=(\n 'pre-commit', 'pre-push', 'prepare-commit-msg', 'commit-msg',\n ),\n default='pre-commit',\n )\n\n\ndef _add_run_options(parser):\n parser.add_argument('hook', nargs='?', help='A single hook-id to run')\n parser.add_argument('--verbose', '-v', action='store_true', default=False)\n parser.add_argument(\n '--origin', '-o',\n help=\"The origin branch's commit_id when using `git push`.\",\n )\n parser.add_argument(\n '--source', '-s',\n help=\"The remote branch's commit_id when using `git push`.\",\n )\n parser.add_argument(\n '--commit-msg-filename',\n help='Filename to check when running during `commit-msg`',\n )\n parser.add_argument(\n '--hook-stage', choices=C.STAGES, default='commit',\n help='The stage during which the hook is fired. One of %(choices)s',\n )\n parser.add_argument(\n '--show-diff-on-failure', action='store_true',\n help='When hooks fail, run `git diff` directly afterward.',\n )\n mutex_group = parser.add_mutually_exclusive_group(required=False)\n mutex_group.add_argument(\n '--all-files', '-a', action='store_true', default=False,\n help='Run on all the files in the repo.',\n )\n mutex_group.add_argument(\n '--files', nargs='*', default=[],\n help='Specific filenames to run hooks on.',\n )\n\n\ndef _adjust_args_and_chdir(args):\n # `--config` was specified relative to the non-root working directory\n if os.path.exists(args.config):\n args.config = os.path.abspath(args.config)\n if args.command in {'run', 'try-repo'}:\n args.files = [os.path.abspath(filename) for filename in args.files]\n if args.command == 'try-repo' and os.path.exists(args.repo):\n args.repo = os.path.abspath(args.repo)\n\n try:\n os.chdir(git.get_root())\n except CalledProcessError:\n raise FatalError(\n 'git failed. Is it installed, and are you in a Git repository '\n 'directory?',\n )\n\n args.config = os.path.relpath(args.config)\n if args.command in {'run', 'try-repo'}:\n args.files = [os.path.relpath(filename) for filename in args.files]\n if args.command == 'try-repo' and os.path.exists(args.repo):\n args.repo = os.path.relpath(args.repo)\n\n\ndef main(argv=None):\n argv = argv if argv is not None else sys.argv[1:]\n argv = [five.to_text(arg) for arg in argv]\n parser = argparse.ArgumentParser()\n\n # https://stackoverflow.com/a/8521644/812183\n parser.add_argument(\n '-V', '--version',\n action='version',\n version='%(prog)s {}'.format(C.VERSION),\n )\n\n subparsers = parser.add_subparsers(dest='command')\n\n autoupdate_parser = subparsers.add_parser(\n 'autoupdate',\n help=\"Auto-update pre-commit config to the latest repos' versions.\",\n )\n _add_color_option(autoupdate_parser)\n _add_config_option(autoupdate_parser)\n autoupdate_parser.add_argument(\n '--tags-only', action='store_true', help='LEGACY: for compatibility',\n )\n autoupdate_parser.add_argument(\n '--bleeding-edge', action='store_true',\n help=(\n 'Update to the bleeding edge of `master` instead of the latest '\n 'tagged version (the default behavior).'\n ),\n )\n autoupdate_parser.add_argument(\n '--repo', dest='repos', action='append', metavar='REPO',\n help='Only update this repository -- may be specified multiple times.',\n )\n\n clean_parser = subparsers.add_parser(\n 'clean', help='Clean out pre-commit files.',\n )\n _add_color_option(clean_parser)\n _add_config_option(clean_parser)\n\n gc_parser = subparsers.add_parser('gc', help='Clean unused cached repos.')\n _add_color_option(gc_parser)\n _add_config_option(gc_parser)\n\n init_templatedir_parser = subparsers.add_parser(\n 'init-templatedir',\n help=(\n 'Install hook script in a directory intended for use with '\n '`git config init.templateDir`.'\n ),\n )\n _add_color_option(init_templatedir_parser)\n _add_config_option(init_templatedir_parser)\n init_templatedir_parser.add_argument(\n 'directory', help='The directory in which to write the hook script.',\n )\n _add_hook_type_option(init_templatedir_parser)\n\n install_parser = subparsers.add_parser(\n 'install', help='Install the pre-commit script.',\n )\n _add_color_option(install_parser)\n _add_config_option(install_parser)\n install_parser.add_argument(\n '-f', '--overwrite', action='store_true',\n help='Overwrite existing hooks / remove migration mode.',\n )\n install_parser.add_argument(\n '--install-hooks', action='store_true',\n help=(\n 'Whether to install hook environments for all environments '\n 'in the config file.'\n ),\n )\n _add_hook_type_option(install_parser)\n install_parser.add_argument(\n '--allow-missing-config', action='store_true', default=False,\n help=(\n 'Whether to allow a missing `pre-commit` configuration file '\n 'or exit with a failure code.'\n ),\n )\n\n install_hooks_parser = subparsers.add_parser(\n 'install-hooks',\n help=(\n 'Install hook environments for all environments in the config '\n 'file. You may find `pre-commit install --install-hooks` more '\n 'useful.'\n ),\n )\n _add_color_option(install_hooks_parser)\n _add_config_option(install_hooks_parser)\n\n migrate_config_parser = subparsers.add_parser(\n 'migrate-config',\n help='Migrate list configuration to new map configuration.',\n )\n _add_color_option(migrate_config_parser)\n _add_config_option(migrate_config_parser)\n\n run_parser = subparsers.add_parser('run', help='Run hooks.')\n _add_color_option(run_parser)\n _add_config_option(run_parser)\n _add_run_options(run_parser)\n\n sample_config_parser = subparsers.add_parser(\n 'sample-config', help='Produce a sample {} file'.format(C.CONFIG_FILE),\n )\n _add_color_option(sample_config_parser)\n _add_config_option(sample_config_parser)\n\n try_repo_parser = subparsers.add_parser(\n 'try-repo',\n help='Try the hooks in a repository, useful for developing new hooks.',\n )\n _add_color_option(try_repo_parser)\n _add_config_option(try_repo_parser)\n try_repo_parser.add_argument(\n 'repo', help='Repository to source hooks from.',\n )\n try_repo_parser.add_argument(\n '--ref', '--rev',\n help=(\n 'Manually select a rev to run against, otherwise the `HEAD` '\n 'revision will be used.'\n ),\n )\n _add_run_options(try_repo_parser)\n\n uninstall_parser = subparsers.add_parser(\n 'uninstall', help='Uninstall the pre-commit script.',\n )\n _add_color_option(uninstall_parser)\n _add_config_option(uninstall_parser)\n _add_hook_type_option(uninstall_parser)\n\n help = subparsers.add_parser(\n 'help', help='Show help for a specific command.',\n )\n help.add_argument('help_cmd', nargs='?', help='Command to show help for.')\n\n # argparse doesn't really provide a way to use a `default` subparser\n if len(argv) == 0:\n argv = ['run']\n args = parser.parse_args(argv)\n\n if args.command == 'help' and args.help_cmd:\n parser.parse_args([args.help_cmd, '--help'])\n elif args.command == 'help':\n parser.parse_args(['--help'])\n\n with error_handler(), logging_handler(args.color):\n if args.command not in {'clean', 'gc', 'sample-config'}:\n _adjust_args_and_chdir(args)\n\n git.check_for_cygwin_mismatch()\n\n store = Store()\n store.mark_config_used(args.config)\n\n if args.command == 'autoupdate':\n if args.tags_only:\n logger.warning('--tags-only is the default')\n return autoupdate(\n args.config, store,\n tags_only=not args.bleeding_edge,\n repos=args.repos,\n )\n elif args.command == 'clean':\n return clean(store)\n elif args.command == 'gc':\n return gc(store)\n elif args.command == 'install':\n return install(\n args.config, store,\n overwrite=args.overwrite, hooks=args.install_hooks,\n hook_type=args.hook_type,\n skip_on_missing_config=args.allow_missing_config,\n )\n elif args.command == 'init-templatedir':\n return init_templatedir(\n args.config, store,\n args.directory, hook_type=args.hook_type,\n )\n elif args.command == 'install-hooks':\n return install_hooks(args.config, store)\n elif args.command == 'migrate-config':\n return migrate_config(args.config)\n elif args.command == 'run':\n return run(args.config, store, args)\n elif args.command == 'sample-config':\n return sample_config()\n elif args.command == 'try-repo':\n return try_repo(args)\n elif args.command == 'uninstall':\n return uninstall(hook_type=args.hook_type)\n else:\n raise NotImplementedError(\n 'Command {} not implemented.'.format(args.command),\n )\n\n raise AssertionError(\n 'Command {} failed to exit with a returncode'.format(args.command),\n )\n\n\nif __name__ == '__main__':\n exit(main())\n", "path": "pre_commit/main.py"}], "after_files": [{"content": "import logging\nimport os.path\n\nfrom pre_commit.commands.install_uninstall import install\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import cmd_output\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef init_templatedir(config_file, store, directory, hook_type):\n install(\n config_file, store, overwrite=True, hook_type=hook_type,\n skip_on_missing_config=True, git_dir=directory,\n )\n try:\n _, out, _ = cmd_output('git', 'config', 'init.templateDir')\n except CalledProcessError:\n configured_path = None\n else:\n configured_path = os.path.realpath(os.path.expanduser(out.strip()))\n dest = os.path.realpath(directory)\n if configured_path != dest:\n logger.warning('`init.templateDir` not set to the target directory')\n logger.warning(\n 'maybe `git config --global init.templateDir {}`?'.format(dest),\n )\n", "path": "pre_commit/commands/init_templatedir.py"}, {"content": "from __future__ import unicode_literals\n\nimport argparse\nimport logging\nimport os\nimport sys\n\nimport pre_commit.constants as C\nfrom pre_commit import color\nfrom pre_commit import five\nfrom pre_commit import git\nfrom pre_commit.commands.autoupdate import autoupdate\nfrom pre_commit.commands.clean import clean\nfrom pre_commit.commands.gc import gc\nfrom pre_commit.commands.init_templatedir import init_templatedir\nfrom pre_commit.commands.install_uninstall import install\nfrom pre_commit.commands.install_uninstall import install_hooks\nfrom pre_commit.commands.install_uninstall import uninstall\nfrom pre_commit.commands.migrate_config import migrate_config\nfrom pre_commit.commands.run import run\nfrom pre_commit.commands.sample_config import sample_config\nfrom pre_commit.commands.try_repo import try_repo\nfrom pre_commit.error_handler import error_handler\nfrom pre_commit.error_handler import FatalError\nfrom pre_commit.logging_handler import logging_handler\nfrom pre_commit.store import Store\nfrom pre_commit.util import CalledProcessError\n\n\nlogger = logging.getLogger('pre_commit')\n\n# https://github.com/pre-commit/pre-commit/issues/217\n# On OSX, making a virtualenv using pyvenv at . causes `virtualenv` and `pip`\n# to install packages to the wrong place. We don't want anything to deal with\n# pyvenv\nos.environ.pop('__PYVENV_LAUNCHER__', None)\n\n\nCOMMANDS_NO_GIT = {'clean', 'gc', 'init-templatedir', 'sample-config'}\n\n\ndef _add_color_option(parser):\n parser.add_argument(\n '--color', default=os.environ.get('PRE_COMMIT_COLOR', 'auto'),\n type=color.use_color,\n metavar='{' + ','.join(color.COLOR_CHOICES) + '}',\n help='Whether to use color in output. Defaults to `%(default)s`.',\n )\n\n\ndef _add_config_option(parser):\n parser.add_argument(\n '-c', '--config', default=C.CONFIG_FILE,\n help='Path to alternate config file',\n )\n\n\ndef _add_hook_type_option(parser):\n parser.add_argument(\n '-t', '--hook-type', choices=(\n 'pre-commit', 'pre-push', 'prepare-commit-msg', 'commit-msg',\n ),\n default='pre-commit',\n )\n\n\ndef _add_run_options(parser):\n parser.add_argument('hook', nargs='?', help='A single hook-id to run')\n parser.add_argument('--verbose', '-v', action='store_true', default=False)\n parser.add_argument(\n '--origin', '-o',\n help=\"The origin branch's commit_id when using `git push`.\",\n )\n parser.add_argument(\n '--source', '-s',\n help=\"The remote branch's commit_id when using `git push`.\",\n )\n parser.add_argument(\n '--commit-msg-filename',\n help='Filename to check when running during `commit-msg`',\n )\n parser.add_argument(\n '--hook-stage', choices=C.STAGES, default='commit',\n help='The stage during which the hook is fired. One of %(choices)s',\n )\n parser.add_argument(\n '--show-diff-on-failure', action='store_true',\n help='When hooks fail, run `git diff` directly afterward.',\n )\n mutex_group = parser.add_mutually_exclusive_group(required=False)\n mutex_group.add_argument(\n '--all-files', '-a', action='store_true', default=False,\n help='Run on all the files in the repo.',\n )\n mutex_group.add_argument(\n '--files', nargs='*', default=[],\n help='Specific filenames to run hooks on.',\n )\n\n\ndef _adjust_args_and_chdir(args):\n # `--config` was specified relative to the non-root working directory\n if os.path.exists(args.config):\n args.config = os.path.abspath(args.config)\n if args.command in {'run', 'try-repo'}:\n args.files = [os.path.abspath(filename) for filename in args.files]\n if args.command == 'try-repo' and os.path.exists(args.repo):\n args.repo = os.path.abspath(args.repo)\n\n try:\n os.chdir(git.get_root())\n except CalledProcessError:\n raise FatalError(\n 'git failed. Is it installed, and are you in a Git repository '\n 'directory?',\n )\n\n args.config = os.path.relpath(args.config)\n if args.command in {'run', 'try-repo'}:\n args.files = [os.path.relpath(filename) for filename in args.files]\n if args.command == 'try-repo' and os.path.exists(args.repo):\n args.repo = os.path.relpath(args.repo)\n\n\ndef main(argv=None):\n argv = argv if argv is not None else sys.argv[1:]\n argv = [five.to_text(arg) for arg in argv]\n parser = argparse.ArgumentParser()\n\n # https://stackoverflow.com/a/8521644/812183\n parser.add_argument(\n '-V', '--version',\n action='version',\n version='%(prog)s {}'.format(C.VERSION),\n )\n\n subparsers = parser.add_subparsers(dest='command')\n\n autoupdate_parser = subparsers.add_parser(\n 'autoupdate',\n help=\"Auto-update pre-commit config to the latest repos' versions.\",\n )\n _add_color_option(autoupdate_parser)\n _add_config_option(autoupdate_parser)\n autoupdate_parser.add_argument(\n '--tags-only', action='store_true', help='LEGACY: for compatibility',\n )\n autoupdate_parser.add_argument(\n '--bleeding-edge', action='store_true',\n help=(\n 'Update to the bleeding edge of `master` instead of the latest '\n 'tagged version (the default behavior).'\n ),\n )\n autoupdate_parser.add_argument(\n '--repo', dest='repos', action='append', metavar='REPO',\n help='Only update this repository -- may be specified multiple times.',\n )\n\n clean_parser = subparsers.add_parser(\n 'clean', help='Clean out pre-commit files.',\n )\n _add_color_option(clean_parser)\n _add_config_option(clean_parser)\n\n gc_parser = subparsers.add_parser('gc', help='Clean unused cached repos.')\n _add_color_option(gc_parser)\n _add_config_option(gc_parser)\n\n init_templatedir_parser = subparsers.add_parser(\n 'init-templatedir',\n help=(\n 'Install hook script in a directory intended for use with '\n '`git config init.templateDir`.'\n ),\n )\n _add_color_option(init_templatedir_parser)\n _add_config_option(init_templatedir_parser)\n init_templatedir_parser.add_argument(\n 'directory', help='The directory in which to write the hook script.',\n )\n _add_hook_type_option(init_templatedir_parser)\n\n install_parser = subparsers.add_parser(\n 'install', help='Install the pre-commit script.',\n )\n _add_color_option(install_parser)\n _add_config_option(install_parser)\n install_parser.add_argument(\n '-f', '--overwrite', action='store_true',\n help='Overwrite existing hooks / remove migration mode.',\n )\n install_parser.add_argument(\n '--install-hooks', action='store_true',\n help=(\n 'Whether to install hook environments for all environments '\n 'in the config file.'\n ),\n )\n _add_hook_type_option(install_parser)\n install_parser.add_argument(\n '--allow-missing-config', action='store_true', default=False,\n help=(\n 'Whether to allow a missing `pre-commit` configuration file '\n 'or exit with a failure code.'\n ),\n )\n\n install_hooks_parser = subparsers.add_parser(\n 'install-hooks',\n help=(\n 'Install hook environments for all environments in the config '\n 'file. You may find `pre-commit install --install-hooks` more '\n 'useful.'\n ),\n )\n _add_color_option(install_hooks_parser)\n _add_config_option(install_hooks_parser)\n\n migrate_config_parser = subparsers.add_parser(\n 'migrate-config',\n help='Migrate list configuration to new map configuration.',\n )\n _add_color_option(migrate_config_parser)\n _add_config_option(migrate_config_parser)\n\n run_parser = subparsers.add_parser('run', help='Run hooks.')\n _add_color_option(run_parser)\n _add_config_option(run_parser)\n _add_run_options(run_parser)\n\n sample_config_parser = subparsers.add_parser(\n 'sample-config', help='Produce a sample {} file'.format(C.CONFIG_FILE),\n )\n _add_color_option(sample_config_parser)\n _add_config_option(sample_config_parser)\n\n try_repo_parser = subparsers.add_parser(\n 'try-repo',\n help='Try the hooks in a repository, useful for developing new hooks.',\n )\n _add_color_option(try_repo_parser)\n _add_config_option(try_repo_parser)\n try_repo_parser.add_argument(\n 'repo', help='Repository to source hooks from.',\n )\n try_repo_parser.add_argument(\n '--ref', '--rev',\n help=(\n 'Manually select a rev to run against, otherwise the `HEAD` '\n 'revision will be used.'\n ),\n )\n _add_run_options(try_repo_parser)\n\n uninstall_parser = subparsers.add_parser(\n 'uninstall', help='Uninstall the pre-commit script.',\n )\n _add_color_option(uninstall_parser)\n _add_config_option(uninstall_parser)\n _add_hook_type_option(uninstall_parser)\n\n help = subparsers.add_parser(\n 'help', help='Show help for a specific command.',\n )\n help.add_argument('help_cmd', nargs='?', help='Command to show help for.')\n\n # argparse doesn't really provide a way to use a `default` subparser\n if len(argv) == 0:\n argv = ['run']\n args = parser.parse_args(argv)\n\n if args.command == 'help' and args.help_cmd:\n parser.parse_args([args.help_cmd, '--help'])\n elif args.command == 'help':\n parser.parse_args(['--help'])\n\n with error_handler(), logging_handler(args.color):\n if args.command not in COMMANDS_NO_GIT:\n _adjust_args_and_chdir(args)\n\n git.check_for_cygwin_mismatch()\n\n store = Store()\n store.mark_config_used(args.config)\n\n if args.command == 'autoupdate':\n if args.tags_only:\n logger.warning('--tags-only is the default')\n return autoupdate(\n args.config, store,\n tags_only=not args.bleeding_edge,\n repos=args.repos,\n )\n elif args.command == 'clean':\n return clean(store)\n elif args.command == 'gc':\n return gc(store)\n elif args.command == 'install':\n return install(\n args.config, store,\n overwrite=args.overwrite, hooks=args.install_hooks,\n hook_type=args.hook_type,\n skip_on_missing_config=args.allow_missing_config,\n )\n elif args.command == 'init-templatedir':\n return init_templatedir(\n args.config, store,\n args.directory, hook_type=args.hook_type,\n )\n elif args.command == 'install-hooks':\n return install_hooks(args.config, store)\n elif args.command == 'migrate-config':\n return migrate_config(args.config)\n elif args.command == 'run':\n return run(args.config, store, args)\n elif args.command == 'sample-config':\n return sample_config()\n elif args.command == 'try-repo':\n return try_repo(args)\n elif args.command == 'uninstall':\n return uninstall(hook_type=args.hook_type)\n else:\n raise NotImplementedError(\n 'Command {} not implemented.'.format(args.command),\n )\n\n raise AssertionError(\n 'Command {} failed to exit with a returncode'.format(args.command),\n )\n\n\nif __name__ == '__main__':\n exit(main())\n", "path": "pre_commit/main.py"}]}
3,955
483
gh_patches_debug_62393
rasdani/github-patches
git_diff
AUTOMATIC1111__stable-diffusion-webui-6772
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [Bug]: New SHA256 hash takes extremely long time up to a point of of model load being unusable ### Is there an existing issue for this? - [X] I have searched the existing issues and checked the recent builds/commits ### What happened? Newly added sha-256 hash takes extremely long time to calculate on model load up to a point where loading appears to hang (i've restarted server twice before i even let it run until completion) Previously switching to a new model was sub 10 sec, now switching to a new model (that does not have hash stored already) takes 100-150 sec (and this is a high end system)! And to make it worse, messages about hash calculation are only printed **after** it has been calculated, there is no progress info or anything to indicate system is actually doing anything for 2 min! ### Steps to reproduce the problem 1. Switch to a new model and wait for completion - it takes forever ### What should have happened? Model load should **never** take over 2 minutes to complete. ### Commit where the problem happens f8c512478568293155539f616dce26c5e4495055 ### What platforms do you use to access UI ? Windows, Linux ### What browsers do you use to access the UI ? Google Chrome, Microsoft Edge ### Command Line Arguments ```Shell --api --xformers ``` ### Additional information, context and logs Console log showing model load taking 142 seconds! ```text Calculating sha256 for /home/vlado/dev/automatic/models/Stable-diffusion/mood-beautyreal-v01.ckpt: bcc0afd3b264ea028928187f56f70840f8d87ccf283b020982beba35d9c7e4ef Loading weights [bcc0afd3b2] from /home/vlado/dev/automatic/models/Stable-diffusion/mood-beautyreal-v01.ckpt Couldn't find VAE named vae-ft-mse-840000-ema-pruned; using None instead Applying xformers cross attention optimization. Weights loaded in 142.6s. ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `modules/hashes.py` Content: ``` 1 import hashlib 2 import json 3 import os.path 4 5 import filelock 6 7 8 cache_filename = "cache.json" 9 cache_data = None 10 11 12 def dump_cache(): 13 with filelock.FileLock(cache_filename+".lock"): 14 with open(cache_filename, "w", encoding="utf8") as file: 15 json.dump(cache_data, file, indent=4) 16 17 18 def cache(subsection): 19 global cache_data 20 21 if cache_data is None: 22 with filelock.FileLock(cache_filename+".lock"): 23 if not os.path.isfile(cache_filename): 24 cache_data = {} 25 else: 26 with open(cache_filename, "r", encoding="utf8") as file: 27 cache_data = json.load(file) 28 29 s = cache_data.get(subsection, {}) 30 cache_data[subsection] = s 31 32 return s 33 34 35 def calculate_sha256(filename): 36 hash_sha256 = hashlib.sha256() 37 38 with open(filename, "rb") as f: 39 for chunk in iter(lambda: f.read(4096), b""): 40 hash_sha256.update(chunk) 41 42 return hash_sha256.hexdigest() 43 44 45 def sha256_from_cache(filename, title): 46 hashes = cache("hashes") 47 ondisk_mtime = os.path.getmtime(filename) 48 49 if title not in hashes: 50 return None 51 52 cached_sha256 = hashes[title].get("sha256", None) 53 cached_mtime = hashes[title].get("mtime", 0) 54 55 if ondisk_mtime > cached_mtime or cached_sha256 is None: 56 return None 57 58 return cached_sha256 59 60 61 def sha256(filename, title): 62 hashes = cache("hashes") 63 64 sha256_value = sha256_from_cache(filename, title) 65 if sha256_value is not None: 66 return sha256_value 67 68 print(f"Calculating sha256 for {filename}: ", end='') 69 sha256_value = calculate_sha256(filename) 70 print(f"{sha256_value}") 71 72 hashes[title] = { 73 "mtime": os.path.getmtime(filename), 74 "sha256": sha256_value, 75 } 76 77 dump_cache() 78 79 return sha256_value 80 81 82 83 84 85 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/modules/hashes.py b/modules/hashes.py --- a/modules/hashes.py +++ b/modules/hashes.py @@ -34,9 +34,10 @@ def calculate_sha256(filename): hash_sha256 = hashlib.sha256() + blksize = 1024 * 1024 with open(filename, "rb") as f: - for chunk in iter(lambda: f.read(4096), b""): + for chunk in iter(lambda: f.read(blksize), b""): hash_sha256.update(chunk) return hash_sha256.hexdigest()
{"golden_diff": "diff --git a/modules/hashes.py b/modules/hashes.py\n--- a/modules/hashes.py\n+++ b/modules/hashes.py\n@@ -34,9 +34,10 @@\n \r\n def calculate_sha256(filename):\r\n hash_sha256 = hashlib.sha256()\r\n+ blksize = 1024 * 1024\r\n \r\n with open(filename, \"rb\") as f:\r\n- for chunk in iter(lambda: f.read(4096), b\"\"):\r\n+ for chunk in iter(lambda: f.read(blksize), b\"\"):\r\n hash_sha256.update(chunk)\r\n \r\n return hash_sha256.hexdigest()\n", "issue": "[Bug]: New SHA256 hash takes extremely long time up to a point of of model load being unusable\n### Is there an existing issue for this?\r\n\r\n- [X] I have searched the existing issues and checked the recent builds/commits\r\n\r\n### What happened?\r\n\r\nNewly added sha-256 hash takes extremely long time to calculate on model load up to a point where loading appears to hang (i've restarted server twice before i even let it run until completion) \r\n\r\nPreviously switching to a new model was sub 10 sec, now switching to a new model (that does not have hash stored already) takes 100-150 sec (and this is a high end system)!\r\n\r\nAnd to make it worse, messages about hash calculation are only printed **after** it has been calculated, there is no progress info or anything to indicate system is actually doing anything for 2 min!\r\n\r\n\r\n### Steps to reproduce the problem\r\n\r\n1. Switch to a new model and wait for completion - it takes forever\r\n\r\n\r\n### What should have happened?\r\n\r\nModel load should **never** take over 2 minutes to complete.\r\n\r\n### Commit where the problem happens\r\n\r\nf8c512478568293155539f616dce26c5e4495055\r\n\r\n### What platforms do you use to access UI ?\r\n\r\nWindows, Linux\r\n\r\n### What browsers do you use to access the UI ?\r\n\r\nGoogle Chrome, Microsoft Edge\r\n\r\n### Command Line Arguments\r\n\r\n```Shell\r\n--api --xformers\r\n```\r\n\r\n\r\n### Additional information, context and logs\r\n\r\nConsole log showing model load taking 142 seconds!\r\n\r\n```text\r\nCalculating sha256 for /home/vlado/dev/automatic/models/Stable-diffusion/mood-beautyreal-v01.ckpt: bcc0afd3b264ea028928187f56f70840f8d87ccf283b020982beba35d9c7e4ef\r\nLoading weights [bcc0afd3b2] from /home/vlado/dev/automatic/models/Stable-diffusion/mood-beautyreal-v01.ckpt\r\nCouldn't find VAE named vae-ft-mse-840000-ema-pruned; using None instead\r\nApplying xformers cross attention optimization.\r\nWeights loaded in 142.6s.\r\n```\r\n\n", "before_files": [{"content": "import hashlib\r\nimport json\r\nimport os.path\r\n\r\nimport filelock\r\n\r\n\r\ncache_filename = \"cache.json\"\r\ncache_data = None\r\n\r\n\r\ndef dump_cache():\r\n with filelock.FileLock(cache_filename+\".lock\"):\r\n with open(cache_filename, \"w\", encoding=\"utf8\") as file:\r\n json.dump(cache_data, file, indent=4)\r\n\r\n\r\ndef cache(subsection):\r\n global cache_data\r\n\r\n if cache_data is None:\r\n with filelock.FileLock(cache_filename+\".lock\"):\r\n if not os.path.isfile(cache_filename):\r\n cache_data = {}\r\n else:\r\n with open(cache_filename, \"r\", encoding=\"utf8\") as file:\r\n cache_data = json.load(file)\r\n\r\n s = cache_data.get(subsection, {})\r\n cache_data[subsection] = s\r\n\r\n return s\r\n\r\n\r\ndef calculate_sha256(filename):\r\n hash_sha256 = hashlib.sha256()\r\n\r\n with open(filename, \"rb\") as f:\r\n for chunk in iter(lambda: f.read(4096), b\"\"):\r\n hash_sha256.update(chunk)\r\n\r\n return hash_sha256.hexdigest()\r\n\r\n\r\ndef sha256_from_cache(filename, title):\r\n hashes = cache(\"hashes\")\r\n ondisk_mtime = os.path.getmtime(filename)\r\n\r\n if title not in hashes:\r\n return None\r\n\r\n cached_sha256 = hashes[title].get(\"sha256\", None)\r\n cached_mtime = hashes[title].get(\"mtime\", 0)\r\n\r\n if ondisk_mtime > cached_mtime or cached_sha256 is None:\r\n return None\r\n\r\n return cached_sha256\r\n\r\n\r\ndef sha256(filename, title):\r\n hashes = cache(\"hashes\")\r\n\r\n sha256_value = sha256_from_cache(filename, title)\r\n if sha256_value is not None:\r\n return sha256_value\r\n\r\n print(f\"Calculating sha256 for {filename}: \", end='')\r\n sha256_value = calculate_sha256(filename)\r\n print(f\"{sha256_value}\")\r\n\r\n hashes[title] = {\r\n \"mtime\": os.path.getmtime(filename),\r\n \"sha256\": sha256_value,\r\n }\r\n\r\n dump_cache()\r\n\r\n return sha256_value\r\n\r\n\r\n\r\n\r\n\r\n", "path": "modules/hashes.py"}], "after_files": [{"content": "import hashlib\r\nimport json\r\nimport os.path\r\n\r\nimport filelock\r\n\r\n\r\ncache_filename = \"cache.json\"\r\ncache_data = None\r\n\r\n\r\ndef dump_cache():\r\n with filelock.FileLock(cache_filename+\".lock\"):\r\n with open(cache_filename, \"w\", encoding=\"utf8\") as file:\r\n json.dump(cache_data, file, indent=4)\r\n\r\n\r\ndef cache(subsection):\r\n global cache_data\r\n\r\n if cache_data is None:\r\n with filelock.FileLock(cache_filename+\".lock\"):\r\n if not os.path.isfile(cache_filename):\r\n cache_data = {}\r\n else:\r\n with open(cache_filename, \"r\", encoding=\"utf8\") as file:\r\n cache_data = json.load(file)\r\n\r\n s = cache_data.get(subsection, {})\r\n cache_data[subsection] = s\r\n\r\n return s\r\n\r\n\r\ndef calculate_sha256(filename):\r\n hash_sha256 = hashlib.sha256()\r\n blksize = 1024 * 1024\r\n\r\n with open(filename, \"rb\") as f:\r\n for chunk in iter(lambda: f.read(blksize), b\"\"):\r\n hash_sha256.update(chunk)\r\n\r\n return hash_sha256.hexdigest()\r\n\r\n\r\ndef sha256_from_cache(filename, title):\r\n hashes = cache(\"hashes\")\r\n ondisk_mtime = os.path.getmtime(filename)\r\n\r\n if title not in hashes:\r\n return None\r\n\r\n cached_sha256 = hashes[title].get(\"sha256\", None)\r\n cached_mtime = hashes[title].get(\"mtime\", 0)\r\n\r\n if ondisk_mtime > cached_mtime or cached_sha256 is None:\r\n return None\r\n\r\n return cached_sha256\r\n\r\n\r\ndef sha256(filename, title):\r\n hashes = cache(\"hashes\")\r\n\r\n sha256_value = sha256_from_cache(filename, title)\r\n if sha256_value is not None:\r\n return sha256_value\r\n\r\n print(f\"Calculating sha256 for {filename}: \", end='')\r\n sha256_value = calculate_sha256(filename)\r\n print(f\"{sha256_value}\")\r\n\r\n hashes[title] = {\r\n \"mtime\": os.path.getmtime(filename),\r\n \"sha256\": sha256_value,\r\n }\r\n\r\n dump_cache()\r\n\r\n return sha256_value\r\n\r\n\r\n\r\n\r\n\r\n", "path": "modules/hashes.py"}]}
1,459
148
gh_patches_debug_44267
rasdani/github-patches
git_diff
dask__dask-1246
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- dask.array regression involving arithmetic with xarray objects This code worked with dask v0.7.6, but failed when I updated to v0.9.0: ``` In [1]: import numpy as np In [2]: import xarray as xr In [3]: import dask.array as da In [4]: x = xr.core.indexing.LazilyIndexedArray(np.zeros((3, 3))) In [5]: y = da.from_array(x, (2, 2)) In [6]: (y[:2, :2] * y[:2, :2]).compute() --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-6-82666a5a7d29> in <module>() ----> 1 (y[:2, :2] * y[:2, :2]).compute() /Users/shoyer/conda/envs/xarray-dev/lib/python3.5/site-packages/dask/base.py in compute(self, **kwargs) 35 36 def compute(self, **kwargs): ---> 37 return compute(self, **kwargs)[0] 38 39 @classmethod /Users/shoyer/conda/envs/xarray-dev/lib/python3.5/site-packages/dask/base.py in compute(*args, **kwargs) 108 for opt, val in groups.items()]) 109 keys = [var._keys() for var in variables] --> 110 results = get(dsk, keys, **kwargs) 111 112 results_iter = iter(results) /Users/shoyer/conda/envs/xarray-dev/lib/python3.5/site-packages/dask/threaded.py in get(dsk, result, cache, num_workers, **kwargs) 55 results = get_async(pool.apply_async, len(pool._pool), dsk, result, 56 cache=cache, queue=queue, get_id=_thread_get_id, ---> 57 **kwargs) 58 59 return results /Users/shoyer/conda/envs/xarray-dev/lib/python3.5/site-packages/dask/async.py in get_async(apply_async, num_workers, dsk, result, cache, queue, get_id, raise_on_exception, rerun_exceptions_locally, callbacks, **kwargs) 486 _execute_task(task, data) # Re-execute locally 487 else: --> 488 raise(remote_exception(res, tb)) 489 state['cache'][key] = res 490 finish_task(dsk, key, state, results, keyorder.get) TypeError: unsupported operand type(s) for *: 'LazilyIndexedArray' and 'LazilyIndexedArray' Traceback --------- File "/Users/shoyer/conda/envs/xarray-dev/lib/python3.5/site-packages/dask/async.py", line 267, in execute_task result = _execute_task(task, data) File "/Users/shoyer/conda/envs/xarray-dev/lib/python3.5/site-packages/dask/async.py", line 249, in _execute_task return func(*args2) ``` LazilyIndexedArray is an object that satisfies some of the usual duck-array API (e.g. shape, dtype, `__getitem__`) and that can be coerced into a NumPy array: https://github.com/pydata/xarray/blob/v0.7.2/xarray/core/indexing.py#L272 I _think_ it should be valid into to `da.from_array` -- this certainly worked in old versions of dask. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `dask/array/optimization.py` Content: ``` 1 from __future__ import absolute_import, division, print_function 2 3 from operator import getitem 4 5 import numpy as np 6 from toolz import valmap, partial 7 8 from .core import getarray 9 from ..core import flatten 10 from ..optimize import cull, fuse, dealias, inline_functions 11 from ..rewrite import RuleSet, RewriteRule 12 13 14 def optimize(dsk, keys, **kwargs): 15 """ Optimize dask for array computation 16 17 1. Cull tasks not necessary to evaluate keys 18 2. Remove full slicing, e.g. x[:] 19 3. Inline fast functions like getitem and np.transpose 20 """ 21 keys = list(flatten(keys)) 22 fast_functions = kwargs.get('fast_functions', 23 set([getarray, np.transpose])) 24 dsk2, dependencies = cull(dsk, keys) 25 dsk4, dependencies = fuse(dsk2, keys, dependencies) 26 dsk5 = optimize_slices(dsk4) 27 dsk6 = inline_functions(dsk5, keys, fast_functions=fast_functions, 28 dependencies=dependencies) 29 return dsk6 30 31 32 def optimize_slices(dsk): 33 """ Optimize slices 34 35 1. Fuse repeated slices, like x[5:][2:6] -> x[7:11] 36 2. Remove full slices, like x[:] -> x 37 38 See also: 39 fuse_slice_dict 40 """ 41 dsk = dsk.copy() 42 for k, v in dsk.items(): 43 if type(v) is tuple: 44 if v[0] is getitem or v[0] is getarray: 45 try: 46 func, a, a_index = v 47 except ValueError: # has four elements, includes a lock 48 continue 49 while type(a) is tuple and (a[0] is getitem or a[0] is getarray): 50 try: 51 _, b, b_index = a 52 except ValueError: # has four elements, includes a lock 53 break 54 if (type(a_index) is tuple) != (type(b_index) is tuple): 55 break 56 if ((type(a_index) is tuple) and 57 (len(a_index) != len(b_index)) and 58 any(i is None for i in b_index + a_index)): 59 break 60 try: 61 c_index = fuse_slice(b_index, a_index) 62 except NotImplementedError: 63 break 64 (a, a_index) = (b, c_index) 65 if (type(a_index) is slice and 66 not a_index.start and 67 a_index.stop is None and 68 a_index.step is None): 69 dsk[k] = a 70 elif type(a_index) is tuple and all(type(s) is slice and 71 not s.start and 72 s.stop is None and 73 s.step is None 74 for s in a_index): 75 dsk[k] = a 76 else: 77 dsk[k] = (func, a, a_index) 78 return dsk 79 80 81 def normalize_slice(s): 82 """ Replace Nones in slices with integers 83 84 >>> normalize_slice(slice(None, None, None)) 85 slice(0, None, 1) 86 """ 87 start, stop, step = s.start, s.stop, s.step 88 if start is None: 89 start = 0 90 if step is None: 91 step = 1 92 if start < 0 or step < 0 or stop is not None and stop < 0: 93 raise NotImplementedError() 94 return slice(start, stop, step) 95 96 97 def fuse_slice(a, b): 98 """ Fuse stacked slices together 99 100 Fuse a pair of repeated slices into a single slice: 101 102 >>> fuse_slice(slice(1000, 2000), slice(10, 15)) 103 slice(1010, 1015, None) 104 105 This also works for tuples of slices 106 107 >>> fuse_slice((slice(100, 200), slice(100, 200, 10)), 108 ... (slice(10, 15), [5, 2])) 109 (slice(110, 115, None), [150, 120]) 110 111 And a variety of other interesting cases 112 113 >>> fuse_slice(slice(1000, 2000), 10) # integers 114 1010 115 116 >>> fuse_slice(slice(1000, 2000, 5), slice(10, 20, 2)) 117 slice(1050, 1100, 10) 118 119 >>> fuse_slice(slice(1000, 2000, 5), [1, 2, 3]) # lists 120 [1005, 1010, 1015] 121 122 >>> fuse_slice(None, slice(None, None)) # doctest: +SKIP 123 None 124 """ 125 # None only works if the second side is a full slice 126 if a is None and b == slice(None, None): 127 return None 128 129 # Replace None with 0 and one in start and step 130 if isinstance(a, slice): 131 a = normalize_slice(a) 132 if isinstance(b, slice): 133 b = normalize_slice(b) 134 135 if isinstance(a, slice) and isinstance(b, int): 136 if b < 0: 137 raise NotImplementedError() 138 return a.start + b*a.step 139 140 if isinstance(a, slice) and isinstance(b, slice): 141 start = a.start + a.step * b.start 142 if b.stop is not None: 143 stop = a.start + a.step * b.stop 144 else: 145 stop = None 146 if a.stop is not None: 147 if stop is not None: 148 stop = min(a.stop, stop) 149 else: 150 stop = a.stop 151 stop = stop 152 step = a.step * b.step 153 if step == 1: 154 step = None 155 return slice(start, stop, step) 156 157 if isinstance(b, list): 158 return [fuse_slice(a, bb) for bb in b] 159 if isinstance(a, list) and isinstance(b, (int, slice)): 160 return a[b] 161 162 if isinstance(a, tuple) and not isinstance(b, tuple): 163 b = (b,) 164 165 # If given two tuples walk through both, being mindful of uneven sizes 166 # and newaxes 167 if isinstance(a, tuple) and isinstance(b, tuple): 168 169 if (any(isinstance(item, list) for item in a) and 170 any(isinstance(item, list) for item in b)): 171 raise NotImplementedError("Can't handle multiple list indexing") 172 173 j = 0 174 result = list() 175 for i in range(len(a)): 176 # axis ceased to exist or we're out of b 177 if isinstance(a[i], int) or j == len(b): 178 result.append(a[i]) 179 continue 180 while b[j] is None: # insert any Nones on the rhs 181 result.append(None) 182 j += 1 183 result.append(fuse_slice(a[i], b[j])) # Common case 184 j += 1 185 while j < len(b): # anything leftover on the right? 186 result.append(b[j]) 187 j += 1 188 return tuple(result) 189 raise NotImplementedError() 190 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/dask/array/optimization.py b/dask/array/optimization.py --- a/dask/array/optimization.py +++ b/dask/array/optimization.py @@ -3,12 +3,10 @@ from operator import getitem import numpy as np -from toolz import valmap, partial from .core import getarray from ..core import flatten -from ..optimize import cull, fuse, dealias, inline_functions -from ..rewrite import RuleSet, RewriteRule +from ..optimize import cull, fuse, inline_functions def optimize(dsk, keys, **kwargs): @@ -20,12 +18,12 @@ """ keys = list(flatten(keys)) fast_functions = kwargs.get('fast_functions', - set([getarray, np.transpose])) + set([getarray, np.transpose])) dsk2, dependencies = cull(dsk, keys) dsk4, dependencies = fuse(dsk2, keys, dependencies) dsk5 = optimize_slices(dsk4) dsk6 = inline_functions(dsk5, keys, fast_functions=fast_functions, - dependencies=dependencies) + dependencies=dependencies) return dsk6 @@ -38,34 +36,39 @@ See also: fuse_slice_dict """ + getters = (getarray, getitem) dsk = dsk.copy() for k, v in dsk.items(): if type(v) is tuple: - if v[0] is getitem or v[0] is getarray: + if v[0] in getters: try: func, a, a_index = v + use_getarray = func is getarray except ValueError: # has four elements, includes a lock continue - while type(a) is tuple and (a[0] is getitem or a[0] is getarray): + while type(a) is tuple and a[0] in getters: try: - _, b, b_index = a + f2, b, b_index = a + use_getarray |= f2 is getarray except ValueError: # has four elements, includes a lock break if (type(a_index) is tuple) != (type(b_index) is tuple): break if ((type(a_index) is tuple) and - (len(a_index) != len(b_index)) and - any(i is None for i in b_index + a_index)): + (len(a_index) != len(b_index)) and + any(i is None for i in b_index + a_index)): break try: c_index = fuse_slice(b_index, a_index) except NotImplementedError: break (a, a_index) = (b, c_index) - if (type(a_index) is slice and - not a_index.start and - a_index.stop is None and - a_index.step is None): + if use_getarray: + dsk[k] = (getarray, a, a_index) + elif (type(a_index) is slice and + not a_index.start and + a_index.stop is None and + a_index.step is None): dsk[k] = a elif type(a_index) is tuple and all(type(s) is slice and not s.start and @@ -74,7 +77,7 @@ for s in a_index): dsk[k] = a else: - dsk[k] = (func, a, a_index) + dsk[k] = (getitem, a, a_index) return dsk @@ -167,7 +170,7 @@ if isinstance(a, tuple) and isinstance(b, tuple): if (any(isinstance(item, list) for item in a) and - any(isinstance(item, list) for item in b)): + any(isinstance(item, list) for item in b)): raise NotImplementedError("Can't handle multiple list indexing") j = 0
{"golden_diff": "diff --git a/dask/array/optimization.py b/dask/array/optimization.py\n--- a/dask/array/optimization.py\n+++ b/dask/array/optimization.py\n@@ -3,12 +3,10 @@\n from operator import getitem\n \n import numpy as np\n-from toolz import valmap, partial\n \n from .core import getarray\n from ..core import flatten\n-from ..optimize import cull, fuse, dealias, inline_functions\n-from ..rewrite import RuleSet, RewriteRule\n+from ..optimize import cull, fuse, inline_functions\n \n \n def optimize(dsk, keys, **kwargs):\n@@ -20,12 +18,12 @@\n \"\"\"\n keys = list(flatten(keys))\n fast_functions = kwargs.get('fast_functions',\n- set([getarray, np.transpose]))\n+ set([getarray, np.transpose]))\n dsk2, dependencies = cull(dsk, keys)\n dsk4, dependencies = fuse(dsk2, keys, dependencies)\n dsk5 = optimize_slices(dsk4)\n dsk6 = inline_functions(dsk5, keys, fast_functions=fast_functions,\n- dependencies=dependencies)\n+ dependencies=dependencies)\n return dsk6\n \n \n@@ -38,34 +36,39 @@\n See also:\n fuse_slice_dict\n \"\"\"\n+ getters = (getarray, getitem)\n dsk = dsk.copy()\n for k, v in dsk.items():\n if type(v) is tuple:\n- if v[0] is getitem or v[0] is getarray:\n+ if v[0] in getters:\n try:\n func, a, a_index = v\n+ use_getarray = func is getarray\n except ValueError: # has four elements, includes a lock\n continue\n- while type(a) is tuple and (a[0] is getitem or a[0] is getarray):\n+ while type(a) is tuple and a[0] in getters:\n try:\n- _, b, b_index = a\n+ f2, b, b_index = a\n+ use_getarray |= f2 is getarray\n except ValueError: # has four elements, includes a lock\n break\n if (type(a_index) is tuple) != (type(b_index) is tuple):\n break\n if ((type(a_index) is tuple) and\n- (len(a_index) != len(b_index)) and\n- any(i is None for i in b_index + a_index)):\n+ (len(a_index) != len(b_index)) and\n+ any(i is None for i in b_index + a_index)):\n break\n try:\n c_index = fuse_slice(b_index, a_index)\n except NotImplementedError:\n break\n (a, a_index) = (b, c_index)\n- if (type(a_index) is slice and\n- not a_index.start and\n- a_index.stop is None and\n- a_index.step is None):\n+ if use_getarray:\n+ dsk[k] = (getarray, a, a_index)\n+ elif (type(a_index) is slice and\n+ not a_index.start and\n+ a_index.stop is None and\n+ a_index.step is None):\n dsk[k] = a\n elif type(a_index) is tuple and all(type(s) is slice and\n not s.start and\n@@ -74,7 +77,7 @@\n for s in a_index):\n dsk[k] = a\n else:\n- dsk[k] = (func, a, a_index)\n+ dsk[k] = (getitem, a, a_index)\n return dsk\n \n \n@@ -167,7 +170,7 @@\n if isinstance(a, tuple) and isinstance(b, tuple):\n \n if (any(isinstance(item, list) for item in a) and\n- any(isinstance(item, list) for item in b)):\n+ any(isinstance(item, list) for item in b)):\n raise NotImplementedError(\"Can't handle multiple list indexing\")\n \n j = 0\n", "issue": "dask.array regression involving arithmetic with xarray objects\nThis code worked with dask v0.7.6, but failed when I updated to v0.9.0:\n\n```\nIn [1]: import numpy as np\n\nIn [2]: import xarray as xr\n\nIn [3]: import dask.array as da\n\nIn [4]: x = xr.core.indexing.LazilyIndexedArray(np.zeros((3, 3)))\n\nIn [5]: y = da.from_array(x, (2, 2))\n\nIn [6]: (y[:2, :2] * y[:2, :2]).compute()\n---------------------------------------------------------------------------\nTypeError Traceback (most recent call last)\n<ipython-input-6-82666a5a7d29> in <module>()\n----> 1 (y[:2, :2] * y[:2, :2]).compute()\n\n/Users/shoyer/conda/envs/xarray-dev/lib/python3.5/site-packages/dask/base.py in compute(self, **kwargs)\n 35\n 36 def compute(self, **kwargs):\n---> 37 return compute(self, **kwargs)[0]\n 38\n 39 @classmethod\n\n/Users/shoyer/conda/envs/xarray-dev/lib/python3.5/site-packages/dask/base.py in compute(*args, **kwargs)\n 108 for opt, val in groups.items()])\n 109 keys = [var._keys() for var in variables]\n--> 110 results = get(dsk, keys, **kwargs)\n 111\n 112 results_iter = iter(results)\n\n/Users/shoyer/conda/envs/xarray-dev/lib/python3.5/site-packages/dask/threaded.py in get(dsk, result, cache, num_workers, **kwargs)\n 55 results = get_async(pool.apply_async, len(pool._pool), dsk, result,\n 56 cache=cache, queue=queue, get_id=_thread_get_id,\n---> 57 **kwargs)\n 58\n 59 return results\n\n/Users/shoyer/conda/envs/xarray-dev/lib/python3.5/site-packages/dask/async.py in get_async(apply_async, num_workers, dsk, result, cache, queue, get_id, raise_on_exception, rerun_exceptions_locally, callbacks, **kwargs)\n 486 _execute_task(task, data) # Re-execute locally\n 487 else:\n--> 488 raise(remote_exception(res, tb))\n 489 state['cache'][key] = res\n 490 finish_task(dsk, key, state, results, keyorder.get)\n\nTypeError: unsupported operand type(s) for *: 'LazilyIndexedArray' and 'LazilyIndexedArray'\n\nTraceback\n---------\n File \"/Users/shoyer/conda/envs/xarray-dev/lib/python3.5/site-packages/dask/async.py\", line 267, in execute_task\n result = _execute_task(task, data)\n File \"/Users/shoyer/conda/envs/xarray-dev/lib/python3.5/site-packages/dask/async.py\", line 249, in _execute_task\n return func(*args2)\n```\n\nLazilyIndexedArray is an object that satisfies some of the usual duck-array API (e.g. shape, dtype, `__getitem__`) and that can be coerced into a NumPy array:\nhttps://github.com/pydata/xarray/blob/v0.7.2/xarray/core/indexing.py#L272\n\nI _think_ it should be valid into to `da.from_array` -- this certainly worked in old versions of dask.\n\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nfrom operator import getitem\n\nimport numpy as np\nfrom toolz import valmap, partial\n\nfrom .core import getarray\nfrom ..core import flatten\nfrom ..optimize import cull, fuse, dealias, inline_functions\nfrom ..rewrite import RuleSet, RewriteRule\n\n\ndef optimize(dsk, keys, **kwargs):\n \"\"\" Optimize dask for array computation\n\n 1. Cull tasks not necessary to evaluate keys\n 2. Remove full slicing, e.g. x[:]\n 3. Inline fast functions like getitem and np.transpose\n \"\"\"\n keys = list(flatten(keys))\n fast_functions = kwargs.get('fast_functions',\n set([getarray, np.transpose]))\n dsk2, dependencies = cull(dsk, keys)\n dsk4, dependencies = fuse(dsk2, keys, dependencies)\n dsk5 = optimize_slices(dsk4)\n dsk6 = inline_functions(dsk5, keys, fast_functions=fast_functions,\n dependencies=dependencies)\n return dsk6\n\n\ndef optimize_slices(dsk):\n \"\"\" Optimize slices\n\n 1. Fuse repeated slices, like x[5:][2:6] -> x[7:11]\n 2. Remove full slices, like x[:] -> x\n\n See also:\n fuse_slice_dict\n \"\"\"\n dsk = dsk.copy()\n for k, v in dsk.items():\n if type(v) is tuple:\n if v[0] is getitem or v[0] is getarray:\n try:\n func, a, a_index = v\n except ValueError: # has four elements, includes a lock\n continue\n while type(a) is tuple and (a[0] is getitem or a[0] is getarray):\n try:\n _, b, b_index = a\n except ValueError: # has four elements, includes a lock\n break\n if (type(a_index) is tuple) != (type(b_index) is tuple):\n break\n if ((type(a_index) is tuple) and\n (len(a_index) != len(b_index)) and\n any(i is None for i in b_index + a_index)):\n break\n try:\n c_index = fuse_slice(b_index, a_index)\n except NotImplementedError:\n break\n (a, a_index) = (b, c_index)\n if (type(a_index) is slice and\n not a_index.start and\n a_index.stop is None and\n a_index.step is None):\n dsk[k] = a\n elif type(a_index) is tuple and all(type(s) is slice and\n not s.start and\n s.stop is None and\n s.step is None\n for s in a_index):\n dsk[k] = a\n else:\n dsk[k] = (func, a, a_index)\n return dsk\n\n\ndef normalize_slice(s):\n \"\"\" Replace Nones in slices with integers\n\n >>> normalize_slice(slice(None, None, None))\n slice(0, None, 1)\n \"\"\"\n start, stop, step = s.start, s.stop, s.step\n if start is None:\n start = 0\n if step is None:\n step = 1\n if start < 0 or step < 0 or stop is not None and stop < 0:\n raise NotImplementedError()\n return slice(start, stop, step)\n\n\ndef fuse_slice(a, b):\n \"\"\" Fuse stacked slices together\n\n Fuse a pair of repeated slices into a single slice:\n\n >>> fuse_slice(slice(1000, 2000), slice(10, 15))\n slice(1010, 1015, None)\n\n This also works for tuples of slices\n\n >>> fuse_slice((slice(100, 200), slice(100, 200, 10)),\n ... (slice(10, 15), [5, 2]))\n (slice(110, 115, None), [150, 120])\n\n And a variety of other interesting cases\n\n >>> fuse_slice(slice(1000, 2000), 10) # integers\n 1010\n\n >>> fuse_slice(slice(1000, 2000, 5), slice(10, 20, 2))\n slice(1050, 1100, 10)\n\n >>> fuse_slice(slice(1000, 2000, 5), [1, 2, 3]) # lists\n [1005, 1010, 1015]\n\n >>> fuse_slice(None, slice(None, None)) # doctest: +SKIP\n None\n \"\"\"\n # None only works if the second side is a full slice\n if a is None and b == slice(None, None):\n return None\n\n # Replace None with 0 and one in start and step\n if isinstance(a, slice):\n a = normalize_slice(a)\n if isinstance(b, slice):\n b = normalize_slice(b)\n\n if isinstance(a, slice) and isinstance(b, int):\n if b < 0:\n raise NotImplementedError()\n return a.start + b*a.step\n\n if isinstance(a, slice) and isinstance(b, slice):\n start = a.start + a.step * b.start\n if b.stop is not None:\n stop = a.start + a.step * b.stop\n else:\n stop = None\n if a.stop is not None:\n if stop is not None:\n stop = min(a.stop, stop)\n else:\n stop = a.stop\n stop = stop\n step = a.step * b.step\n if step == 1:\n step = None\n return slice(start, stop, step)\n\n if isinstance(b, list):\n return [fuse_slice(a, bb) for bb in b]\n if isinstance(a, list) and isinstance(b, (int, slice)):\n return a[b]\n\n if isinstance(a, tuple) and not isinstance(b, tuple):\n b = (b,)\n\n # If given two tuples walk through both, being mindful of uneven sizes\n # and newaxes\n if isinstance(a, tuple) and isinstance(b, tuple):\n\n if (any(isinstance(item, list) for item in a) and\n any(isinstance(item, list) for item in b)):\n raise NotImplementedError(\"Can't handle multiple list indexing\")\n\n j = 0\n result = list()\n for i in range(len(a)):\n # axis ceased to exist or we're out of b\n if isinstance(a[i], int) or j == len(b):\n result.append(a[i])\n continue\n while b[j] is None: # insert any Nones on the rhs\n result.append(None)\n j += 1\n result.append(fuse_slice(a[i], b[j])) # Common case\n j += 1\n while j < len(b): # anything leftover on the right?\n result.append(b[j])\n j += 1\n return tuple(result)\n raise NotImplementedError()\n", "path": "dask/array/optimization.py"}], "after_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nfrom operator import getitem\n\nimport numpy as np\n\nfrom .core import getarray\nfrom ..core import flatten\nfrom ..optimize import cull, fuse, inline_functions\n\n\ndef optimize(dsk, keys, **kwargs):\n \"\"\" Optimize dask for array computation\n\n 1. Cull tasks not necessary to evaluate keys\n 2. Remove full slicing, e.g. x[:]\n 3. Inline fast functions like getitem and np.transpose\n \"\"\"\n keys = list(flatten(keys))\n fast_functions = kwargs.get('fast_functions',\n set([getarray, np.transpose]))\n dsk2, dependencies = cull(dsk, keys)\n dsk4, dependencies = fuse(dsk2, keys, dependencies)\n dsk5 = optimize_slices(dsk4)\n dsk6 = inline_functions(dsk5, keys, fast_functions=fast_functions,\n dependencies=dependencies)\n return dsk6\n\n\ndef optimize_slices(dsk):\n \"\"\" Optimize slices\n\n 1. Fuse repeated slices, like x[5:][2:6] -> x[7:11]\n 2. Remove full slices, like x[:] -> x\n\n See also:\n fuse_slice_dict\n \"\"\"\n getters = (getarray, getitem)\n dsk = dsk.copy()\n for k, v in dsk.items():\n if type(v) is tuple:\n if v[0] in getters:\n try:\n func, a, a_index = v\n use_getarray = func is getarray\n except ValueError: # has four elements, includes a lock\n continue\n while type(a) is tuple and a[0] in getters:\n try:\n f2, b, b_index = a\n use_getarray |= f2 is getarray\n except ValueError: # has four elements, includes a lock\n break\n if (type(a_index) is tuple) != (type(b_index) is tuple):\n break\n if ((type(a_index) is tuple) and\n (len(a_index) != len(b_index)) and\n any(i is None for i in b_index + a_index)):\n break\n try:\n c_index = fuse_slice(b_index, a_index)\n except NotImplementedError:\n break\n (a, a_index) = (b, c_index)\n if use_getarray:\n dsk[k] = (getarray, a, a_index)\n elif (type(a_index) is slice and\n not a_index.start and\n a_index.stop is None and\n a_index.step is None):\n dsk[k] = a\n elif type(a_index) is tuple and all(type(s) is slice and\n not s.start and\n s.stop is None and\n s.step is None\n for s in a_index):\n dsk[k] = a\n else:\n dsk[k] = (getitem, a, a_index)\n return dsk\n\n\ndef normalize_slice(s):\n \"\"\" Replace Nones in slices with integers\n\n >>> normalize_slice(slice(None, None, None))\n slice(0, None, 1)\n \"\"\"\n start, stop, step = s.start, s.stop, s.step\n if start is None:\n start = 0\n if step is None:\n step = 1\n if start < 0 or step < 0 or stop is not None and stop < 0:\n raise NotImplementedError()\n return slice(start, stop, step)\n\n\ndef fuse_slice(a, b):\n \"\"\" Fuse stacked slices together\n\n Fuse a pair of repeated slices into a single slice:\n\n >>> fuse_slice(slice(1000, 2000), slice(10, 15))\n slice(1010, 1015, None)\n\n This also works for tuples of slices\n\n >>> fuse_slice((slice(100, 200), slice(100, 200, 10)),\n ... (slice(10, 15), [5, 2]))\n (slice(110, 115, None), [150, 120])\n\n And a variety of other interesting cases\n\n >>> fuse_slice(slice(1000, 2000), 10) # integers\n 1010\n\n >>> fuse_slice(slice(1000, 2000, 5), slice(10, 20, 2))\n slice(1050, 1100, 10)\n\n >>> fuse_slice(slice(1000, 2000, 5), [1, 2, 3]) # lists\n [1005, 1010, 1015]\n\n >>> fuse_slice(None, slice(None, None)) # doctest: +SKIP\n None\n \"\"\"\n # None only works if the second side is a full slice\n if a is None and b == slice(None, None):\n return None\n\n # Replace None with 0 and one in start and step\n if isinstance(a, slice):\n a = normalize_slice(a)\n if isinstance(b, slice):\n b = normalize_slice(b)\n\n if isinstance(a, slice) and isinstance(b, int):\n if b < 0:\n raise NotImplementedError()\n return a.start + b*a.step\n\n if isinstance(a, slice) and isinstance(b, slice):\n start = a.start + a.step * b.start\n if b.stop is not None:\n stop = a.start + a.step * b.stop\n else:\n stop = None\n if a.stop is not None:\n if stop is not None:\n stop = min(a.stop, stop)\n else:\n stop = a.stop\n stop = stop\n step = a.step * b.step\n if step == 1:\n step = None\n return slice(start, stop, step)\n\n if isinstance(b, list):\n return [fuse_slice(a, bb) for bb in b]\n if isinstance(a, list) and isinstance(b, (int, slice)):\n return a[b]\n\n if isinstance(a, tuple) and not isinstance(b, tuple):\n b = (b,)\n\n # If given two tuples walk through both, being mindful of uneven sizes\n # and newaxes\n if isinstance(a, tuple) and isinstance(b, tuple):\n\n if (any(isinstance(item, list) for item in a) and\n any(isinstance(item, list) for item in b)):\n raise NotImplementedError(\"Can't handle multiple list indexing\")\n\n j = 0\n result = list()\n for i in range(len(a)):\n # axis ceased to exist or we're out of b\n if isinstance(a[i], int) or j == len(b):\n result.append(a[i])\n continue\n while b[j] is None: # insert any Nones on the rhs\n result.append(None)\n j += 1\n result.append(fuse_slice(a[i], b[j])) # Common case\n j += 1\n while j < len(b): # anything leftover on the right?\n result.append(b[j])\n j += 1\n return tuple(result)\n raise NotImplementedError()\n", "path": "dask/array/optimization.py"}]}
3,149
908
gh_patches_debug_605
rasdani/github-patches
git_diff
pex-tool__pex-1664
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Release 2.1.71 On the docket: + [x] Secure Pex against sha1 collision attacks. #1662 + [x] Problems building venvs from certain distributions. #1656 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pex/version.py` Content: ``` 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = "2.1.70" 5 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = "2.1.70" +__version__ = "2.1.71"
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.70\"\n+__version__ = \"2.1.71\"\n", "issue": "Release 2.1.71\nOn the docket:\r\n+ [x] Secure Pex against sha1 collision attacks. #1662 \r\n+ [x] Problems building venvs from certain distributions. #1656\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.70\"\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.71\"\n", "path": "pex/version.py"}]}
359
96
gh_patches_debug_881
rasdani/github-patches
git_diff
python__peps-3263
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Infra: Check Sphinx warnings on CI This is similar to what we have in the CPython repo, most recently: https://github.com/python/cpython/pull/106460, and will help us gradually remove Sphinx warnings, and avoid new ones being introduces. It checks three things: 1. If a file previously had no warnings (not listed in `.nitignore`), and new ones are introduced, it fails * -> To prevent regressions 2. If a file previously had warnings (it's lsited in `.nitignore`), but now has none, it fails and tells us to remove it from `.nitignore` * To help us incrementally improve over time 3. If a file previously had warnings (it's listed in `.nitignore`), and still has warnings, it doesn't fail, but it will annotate the PR to show the warning * To make them more visible, and give us the opportunity to fix them I've intentionally kept the code and layout as close as possible to the CPython version (see https://github.com/python/cpython/tree/main/Doc/tools) for easier future maintenance. <!-- readthedocs-preview pep-previews start --> ---- :books: Documentation preview :books:: https://pep-previews--3213.org.readthedocs.build/ <!-- readthedocs-preview pep-previews end --> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `conf.py` Content: ``` 1 # This file is placed in the public domain or under the 2 # CC0-1.0-Universal license, whichever is more permissive. 3 4 """Configuration for building PEPs using Sphinx.""" 5 6 from pathlib import Path 7 import sys 8 9 sys.path.append(str(Path(".").absolute())) 10 11 # -- Project information ----------------------------------------------------- 12 13 project = "PEPs" 14 master_doc = "contents" 15 16 # -- General configuration --------------------------------------------------- 17 18 # Add any Sphinx extension module names here, as strings. 19 extensions = [ 20 "pep_sphinx_extensions", 21 "sphinx.ext.intersphinx", 22 "sphinx.ext.githubpages", 23 ] 24 25 # The file extensions of source files. Sphinx uses these suffixes as sources. 26 source_suffix = { 27 ".rst": "pep", 28 ".txt": "pep", 29 } 30 31 # List of patterns (relative to source dir) to ignore when looking for source files. 32 include_patterns = [ 33 # Required for Sphinx 34 "contents.rst", 35 # PEP files 36 "pep-????.rst", 37 "pep-????.txt", 38 # PEP ancillary files 39 "pep-????/*.rst", 40 # Documentation 41 "docs/*.rst", 42 ] 43 exclude_patterns = [ 44 # PEP Template 45 "pep-0012/pep-NNNN.rst", 46 ] 47 48 # Intersphinx configuration 49 intersphinx_mapping = { 50 'python': ('https://docs.python.org/3/', None), 51 'packaging': ('https://packaging.python.org/en/latest/', None), 52 'devguide': ('https://devguide.python.org/', None), 53 'py3.11': ('https://docs.python.org/3.11/', None), 54 'py3.12': ('https://docs.python.org/3.12/', None), 55 } 56 intersphinx_disabled_reftypes = [] 57 58 # -- Options for HTML output ------------------------------------------------- 59 60 # HTML output settings 61 html_math_renderer = "maths_to_html" # Maths rendering 62 63 # Theme settings 64 html_theme_path = ["pep_sphinx_extensions"] 65 html_theme = "pep_theme" # The actual theme directory (child of html_theme_path) 66 html_use_index = False # Disable index (we use PEP 0) 67 html_style = "" # must be defined here or in theme.conf, but is unused 68 html_permalinks = False # handled in the PEPContents transform 69 html_baseurl = "https://peps.python.org" # to create the CNAME file 70 gettext_auto_build = False # speed-ups 71 72 templates_path = ["pep_sphinx_extensions/pep_theme/templates"] # Theme template relative paths from `confdir` 73 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/conf.py b/conf.py --- a/conf.py +++ b/conf.py @@ -45,6 +45,9 @@ "pep-0012/pep-NNNN.rst", ] +# Warn on missing references +nitpicky = True + # Intersphinx configuration intersphinx_mapping = { 'python': ('https://docs.python.org/3/', None),
{"golden_diff": "diff --git a/conf.py b/conf.py\n--- a/conf.py\n+++ b/conf.py\n@@ -45,6 +45,9 @@\n \"pep-0012/pep-NNNN.rst\",\n ]\n \n+# Warn on missing references\n+nitpicky = True\n+\n # Intersphinx configuration\n intersphinx_mapping = {\n 'python': ('https://docs.python.org/3/', None),\n", "issue": "Infra: Check Sphinx warnings on CI\nThis is similar to what we have in the CPython repo, most recently: https://github.com/python/cpython/pull/106460, and will help us gradually remove Sphinx warnings, and avoid new ones being introduces.\r\n\r\nIt checks three things:\r\n\r\n1. If a file previously had no warnings (not listed in `.nitignore`), and new ones are introduced, it fails\r\n * -> To prevent regressions\r\n\r\n2. If a file previously had warnings (it's lsited in `.nitignore`), but now has none, it fails and tells us to remove it from `.nitignore`\r\n * To help us incrementally improve over time\r\n\r\n3. If a file previously had warnings (it's listed in `.nitignore`), and still has warnings, it doesn't fail, but it will annotate the PR to show the warning\r\n * To make them more visible, and give us the opportunity to fix them\r\n\r\nI've intentionally kept the code and layout as close as possible to the CPython version (see https://github.com/python/cpython/tree/main/Doc/tools) for easier future maintenance.\r\n\r\n\r\n\r\n<!-- readthedocs-preview pep-previews start -->\r\n----\n:books: Documentation preview :books:: https://pep-previews--3213.org.readthedocs.build/\n\r\n<!-- readthedocs-preview pep-previews end -->\n", "before_files": [{"content": "# This file is placed in the public domain or under the\n# CC0-1.0-Universal license, whichever is more permissive.\n\n\"\"\"Configuration for building PEPs using Sphinx.\"\"\"\n\nfrom pathlib import Path\nimport sys\n\nsys.path.append(str(Path(\".\").absolute()))\n\n# -- Project information -----------------------------------------------------\n\nproject = \"PEPs\"\nmaster_doc = \"contents\"\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings.\nextensions = [\n \"pep_sphinx_extensions\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.githubpages\",\n]\n\n# The file extensions of source files. Sphinx uses these suffixes as sources.\nsource_suffix = {\n \".rst\": \"pep\",\n \".txt\": \"pep\",\n}\n\n# List of patterns (relative to source dir) to ignore when looking for source files.\ninclude_patterns = [\n # Required for Sphinx\n \"contents.rst\",\n # PEP files\n \"pep-????.rst\",\n \"pep-????.txt\",\n # PEP ancillary files\n \"pep-????/*.rst\",\n # Documentation\n \"docs/*.rst\",\n]\nexclude_patterns = [\n # PEP Template\n \"pep-0012/pep-NNNN.rst\",\n]\n\n# Intersphinx configuration\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3/', None),\n 'packaging': ('https://packaging.python.org/en/latest/', None),\n 'devguide': ('https://devguide.python.org/', None),\n 'py3.11': ('https://docs.python.org/3.11/', None),\n 'py3.12': ('https://docs.python.org/3.12/', None),\n}\nintersphinx_disabled_reftypes = []\n\n# -- Options for HTML output -------------------------------------------------\n\n# HTML output settings\nhtml_math_renderer = \"maths_to_html\" # Maths rendering\n\n# Theme settings\nhtml_theme_path = [\"pep_sphinx_extensions\"]\nhtml_theme = \"pep_theme\" # The actual theme directory (child of html_theme_path)\nhtml_use_index = False # Disable index (we use PEP 0)\nhtml_style = \"\" # must be defined here or in theme.conf, but is unused\nhtml_permalinks = False # handled in the PEPContents transform\nhtml_baseurl = \"https://peps.python.org\" # to create the CNAME file\ngettext_auto_build = False # speed-ups\n\ntemplates_path = [\"pep_sphinx_extensions/pep_theme/templates\"] # Theme template relative paths from `confdir`\n", "path": "conf.py"}], "after_files": [{"content": "# This file is placed in the public domain or under the\n# CC0-1.0-Universal license, whichever is more permissive.\n\n\"\"\"Configuration for building PEPs using Sphinx.\"\"\"\n\nfrom pathlib import Path\nimport sys\n\nsys.path.append(str(Path(\".\").absolute()))\n\n# -- Project information -----------------------------------------------------\n\nproject = \"PEPs\"\nmaster_doc = \"contents\"\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings.\nextensions = [\n \"pep_sphinx_extensions\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.githubpages\",\n]\n\n# The file extensions of source files. Sphinx uses these suffixes as sources.\nsource_suffix = {\n \".rst\": \"pep\",\n \".txt\": \"pep\",\n}\n\n# List of patterns (relative to source dir) to ignore when looking for source files.\ninclude_patterns = [\n # Required for Sphinx\n \"contents.rst\",\n # PEP files\n \"pep-????.rst\",\n \"pep-????.txt\",\n # PEP ancillary files\n \"pep-????/*.rst\",\n # Documentation\n \"docs/*.rst\",\n]\nexclude_patterns = [\n # PEP Template\n \"pep-0012/pep-NNNN.rst\",\n]\n\n# Warn on missing references\nnitpicky = True\n\n# Intersphinx configuration\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3/', None),\n 'packaging': ('https://packaging.python.org/en/latest/', None),\n 'devguide': ('https://devguide.python.org/', None),\n 'py3.11': ('https://docs.python.org/3.11/', None),\n 'py3.12': ('https://docs.python.org/3.12/', None),\n}\nintersphinx_disabled_reftypes = []\n\n# -- Options for HTML output -------------------------------------------------\n\n# HTML output settings\nhtml_math_renderer = \"maths_to_html\" # Maths rendering\n\n# Theme settings\nhtml_theme_path = [\"pep_sphinx_extensions\"]\nhtml_theme = \"pep_theme\" # The actual theme directory (child of html_theme_path)\nhtml_use_index = False # Disable index (we use PEP 0)\nhtml_style = \"\" # must be defined here or in theme.conf, but is unused\nhtml_permalinks = False # handled in the PEPContents transform\nhtml_baseurl = \"https://peps.python.org\" # to create the CNAME file\ngettext_auto_build = False # speed-ups\n\ntemplates_path = [\"pep_sphinx_extensions/pep_theme/templates\"] # Theme template relative paths from `confdir`\n", "path": "conf.py"}]}
1,252
92
gh_patches_debug_32502
rasdani/github-patches
git_diff
TileDB-Inc__TileDB-Py-263
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- multi_index not accepting tuples The [UDF apply docs](https://docs.tiledb.com/cloud/client-api/serverless-udfs#multi-index-usage) imply the tuples and slices are interchangeable. The standard API throws an exception on tuples (for both tiledb: and s3:) ``` >>> import tiledb, tiledb.cloud >>> A = tiledb.DenseArray("tiledb://TileDB-Inc/quickstart_dense", ctx=tiledb.cloud.Ctx()) >>> A.multi_index[[(1,2), 4], [slice(1,4)]]['a'] Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/ec2-user/venv/lib64/python3.7/site-packages/tiledb/multirange_indexing.py", line 121, in __getitem__ ranges = self.getitem_ranges(idx) File "/home/ec2-user/venv/lib64/python3.7/site-packages/tiledb/multirange_indexing.py", line 109, in getitem_ranges subranges = sel_to_subranges(sel) File "/home/ec2-user/venv/lib64/python3.7/site-packages/tiledb/multirange_indexing.py", line 60, in sel_to_subranges raise TypeError("Unsupported selection ") TypeError: Unsupported selection ``` It would be great if multi_index and UDF apply accepted the same index types. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `tiledb/multirange_indexing.py` Content: ``` 1 import tiledb 2 from tiledb import Array, ArraySchema 3 import os, numpy as np 4 import sys, weakref 5 6 try: 7 from tiledb.libtiledb import multi_index 8 except: 9 from tiledb.indexing import multi_index 10 11 def _index_as_tuple(idx): 12 """Forces scalar index objects to a tuple representation""" 13 if isinstance(idx, tuple): 14 return idx 15 return (idx,) 16 17 def mr_dense_result_shape(ranges, base_shape = None): 18 # assumptions: len(ranges) matches number of dims 19 if base_shape is not None: 20 assert len(ranges) == len(base_shape), "internal error: mismatched shapes" 21 22 new_shape = list() 23 for i,rr in enumerate(ranges): 24 if rr != (): 25 m = list(map(lambda y: abs(y[1] - y[0]) + 1, rr)) 26 new_shape.append(np.sum(m)) 27 else: 28 if base_shape is None: 29 raise ValueError("Missing required base_shape for whole-dimension slices") 30 # empty range covers dimension 31 new_shape.append(base_shape[i]) 32 33 return tuple(new_shape) 34 35 def mr_dense_result_numel(ranges): 36 return np.prod(mr_dense_result_shape(ranges)) 37 38 def sel_to_subranges(dim_sel): 39 if isinstance(dim_sel, list): 40 dim_sel = tuple(dim_sel) 41 elif not isinstance(dim_sel, tuple): 42 dim_sel = (dim_sel,) 43 44 subranges = list() 45 for range in dim_sel: 46 if np.isscalar(range): 47 subranges.append( (range, range) ) 48 elif isinstance(range, slice): 49 if range.step is not None: 50 raise ValueError("Stepped slice ranges are not supported") 51 elif range.start is None and range.stop is None: 52 # ':' full slice 53 pass 54 else: 55 subranges.append( (range.start, range.stop) ) 56 elif isinstance(range, list): 57 for el in range: 58 subranges.append( (el, el) ) 59 else: 60 raise TypeError("Unsupported selection ") 61 62 return tuple(subranges) 63 64 65 class MultiRangeIndexer(object): 66 """ 67 Implements multi-range / outer / orthogonal indexing. 68 69 """ 70 # for cython 71 # comment out for Python 2 :/ 72 #array: Array 73 #schema: ArraySchema 74 #def __init__(self, array: Array, query = None): 75 76 def __init__(self, array, query = None): 77 if not issubclass(type(array), tiledb.Array): 78 raise ValueError("Internal error: MultiRangeIndexer expected tiledb.Array") 79 self.array_ref = weakref.ref(array) 80 self.schema = array.schema 81 self.query = query 82 83 @property 84 def array(self): 85 assert self.array_ref() is not None, \ 86 "Internal error: invariant violation (indexing call w/ dead array_ref)" 87 return self.array_ref() 88 89 @classmethod 90 def __test_init__(cls, array): 91 """ 92 Internal helper method for testing getitem range calculation. 93 :param array: 94 :return: 95 """ 96 m = cls.__new__(cls) 97 m.array_ref = weakref.ref(array) 98 m.schema = array.schema 99 m.query = None 100 return m 101 102 def getitem_ranges(self, idx): 103 dom = self.schema.domain 104 ndim = dom.ndim 105 idx = _index_as_tuple(idx) 106 107 ranges = list() 108 for i,sel in enumerate(idx): 109 subranges = sel_to_subranges(sel) 110 ranges.append(subranges) 111 112 # extend the list to ndim 113 if len(ranges) < ndim: 114 ranges.extend([ tuple() for _ in range(ndim-len(ranges))]) 115 116 rval = tuple(ranges) 117 return rval 118 119 def __getitem__(self, idx): 120 # implements multi-range / outer / orthogonal indexing 121 ranges = self.getitem_ranges(idx) 122 123 dom = self.schema.domain 124 attr_names = tuple(self.schema.attr(i).name for i in range(self.schema.nattr)) 125 126 coords = None 127 if self.query is not None: 128 # if we are called via Query object, then we need to respect Query semantics 129 attr_names = tuple(self.query.attrs) if self.query.attrs else attr_names # query.attrs might be None -> all 130 coords = self.query.coords 131 132 # TODO order 133 result_dict = multi_index( 134 self.array, 135 attr_names, 136 ranges, 137 coords=coords 138 ) 139 140 if self.schema.sparse: 141 return result_dict 142 else: 143 result_shape = mr_dense_result_shape(ranges, self.schema.shape) 144 for arr in result_dict.values(): 145 # TODO check/test layout 146 arr.shape = result_shape 147 return result_dict ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/tiledb/multirange_indexing.py b/tiledb/multirange_indexing.py --- a/tiledb/multirange_indexing.py +++ b/tiledb/multirange_indexing.py @@ -8,11 +8,6 @@ except: from tiledb.indexing import multi_index -def _index_as_tuple(idx): - """Forces scalar index objects to a tuple representation""" - if isinstance(idx, tuple): - return idx - return (idx,) def mr_dense_result_shape(ranges, base_shape = None): # assumptions: len(ranges) matches number of dims @@ -36,11 +31,6 @@ return np.prod(mr_dense_result_shape(ranges)) def sel_to_subranges(dim_sel): - if isinstance(dim_sel, list): - dim_sel = tuple(dim_sel) - elif not isinstance(dim_sel, tuple): - dim_sel = (dim_sel,) - subranges = list() for range in dim_sel: if np.isscalar(range): @@ -53,6 +43,8 @@ pass else: subranges.append( (range.start, range.stop) ) + elif isinstance(range, tuple): + subranges.extend((range,)) elif isinstance(range, list): for el in range: subranges.append( (el, el) ) @@ -102,10 +94,16 @@ def getitem_ranges(self, idx): dom = self.schema.domain ndim = dom.ndim - idx = _index_as_tuple(idx) + + if isinstance(idx, tuple): + idx = list(idx) + else: + idx = [idx] ranges = list() for i,sel in enumerate(idx): + if not isinstance(sel, list): + sel = [sel] subranges = sel_to_subranges(sel) ranges.append(subranges)
{"golden_diff": "diff --git a/tiledb/multirange_indexing.py b/tiledb/multirange_indexing.py\n--- a/tiledb/multirange_indexing.py\n+++ b/tiledb/multirange_indexing.py\n@@ -8,11 +8,6 @@\n except:\n from tiledb.indexing import multi_index\n \n-def _index_as_tuple(idx):\n- \"\"\"Forces scalar index objects to a tuple representation\"\"\"\n- if isinstance(idx, tuple):\n- return idx\n- return (idx,)\n \n def mr_dense_result_shape(ranges, base_shape = None):\n # assumptions: len(ranges) matches number of dims\n@@ -36,11 +31,6 @@\n return np.prod(mr_dense_result_shape(ranges))\n \n def sel_to_subranges(dim_sel):\n- if isinstance(dim_sel, list):\n- dim_sel = tuple(dim_sel)\n- elif not isinstance(dim_sel, tuple):\n- dim_sel = (dim_sel,)\n-\n subranges = list()\n for range in dim_sel:\n if np.isscalar(range):\n@@ -53,6 +43,8 @@\n pass\n else:\n subranges.append( (range.start, range.stop) )\n+ elif isinstance(range, tuple):\n+ subranges.extend((range,))\n elif isinstance(range, list):\n for el in range:\n subranges.append( (el, el) )\n@@ -102,10 +94,16 @@\n def getitem_ranges(self, idx):\n dom = self.schema.domain\n ndim = dom.ndim\n- idx = _index_as_tuple(idx)\n+\n+ if isinstance(idx, tuple):\n+ idx = list(idx)\n+ else:\n+ idx = [idx]\n \n ranges = list()\n for i,sel in enumerate(idx):\n+ if not isinstance(sel, list):\n+ sel = [sel]\n subranges = sel_to_subranges(sel)\n ranges.append(subranges)\n", "issue": "multi_index not accepting tuples\nThe [UDF apply docs](https://docs.tiledb.com/cloud/client-api/serverless-udfs#multi-index-usage) imply the tuples and slices are interchangeable. The standard API throws an exception on tuples (for both tiledb: and s3:)\r\n\r\n\r\n```\r\n>>> import tiledb, tiledb.cloud\r\n>>> A = tiledb.DenseArray(\"tiledb://TileDB-Inc/quickstart_dense\", ctx=tiledb.cloud.Ctx())\r\n>>> A.multi_index[[(1,2), 4], [slice(1,4)]]['a']\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/ec2-user/venv/lib64/python3.7/site-packages/tiledb/multirange_indexing.py\", line 121, in __getitem__\r\n ranges = self.getitem_ranges(idx)\r\n File \"/home/ec2-user/venv/lib64/python3.7/site-packages/tiledb/multirange_indexing.py\", line 109, in getitem_ranges\r\n subranges = sel_to_subranges(sel)\r\n File \"/home/ec2-user/venv/lib64/python3.7/site-packages/tiledb/multirange_indexing.py\", line 60, in sel_to_subranges\r\n raise TypeError(\"Unsupported selection \")\r\nTypeError: Unsupported selection \r\n```\r\n\r\nIt would be great if multi_index and UDF apply accepted the same index types.\n", "before_files": [{"content": "import tiledb\nfrom tiledb import Array, ArraySchema\nimport os, numpy as np\nimport sys, weakref\n\ntry:\n from tiledb.libtiledb import multi_index\nexcept:\n from tiledb.indexing import multi_index\n\ndef _index_as_tuple(idx):\n \"\"\"Forces scalar index objects to a tuple representation\"\"\"\n if isinstance(idx, tuple):\n return idx\n return (idx,)\n\ndef mr_dense_result_shape(ranges, base_shape = None):\n # assumptions: len(ranges) matches number of dims\n if base_shape is not None:\n assert len(ranges) == len(base_shape), \"internal error: mismatched shapes\"\n\n new_shape = list()\n for i,rr in enumerate(ranges):\n if rr != ():\n m = list(map(lambda y: abs(y[1] - y[0]) + 1, rr))\n new_shape.append(np.sum(m))\n else:\n if base_shape is None:\n raise ValueError(\"Missing required base_shape for whole-dimension slices\")\n # empty range covers dimension\n new_shape.append(base_shape[i])\n\n return tuple(new_shape)\n\ndef mr_dense_result_numel(ranges):\n return np.prod(mr_dense_result_shape(ranges))\n\ndef sel_to_subranges(dim_sel):\n if isinstance(dim_sel, list):\n dim_sel = tuple(dim_sel)\n elif not isinstance(dim_sel, tuple):\n dim_sel = (dim_sel,)\n\n subranges = list()\n for range in dim_sel:\n if np.isscalar(range):\n subranges.append( (range, range) )\n elif isinstance(range, slice):\n if range.step is not None:\n raise ValueError(\"Stepped slice ranges are not supported\")\n elif range.start is None and range.stop is None:\n # ':' full slice\n pass\n else:\n subranges.append( (range.start, range.stop) )\n elif isinstance(range, list):\n for el in range:\n subranges.append( (el, el) )\n else:\n raise TypeError(\"Unsupported selection \")\n\n return tuple(subranges)\n\n\nclass MultiRangeIndexer(object):\n \"\"\"\n Implements multi-range / outer / orthogonal indexing.\n\n \"\"\"\n # for cython\n # comment out for Python 2 :/\n #array: Array\n #schema: ArraySchema\n #def __init__(self, array: Array, query = None):\n\n def __init__(self, array, query = None):\n if not issubclass(type(array), tiledb.Array):\n raise ValueError(\"Internal error: MultiRangeIndexer expected tiledb.Array\")\n self.array_ref = weakref.ref(array)\n self.schema = array.schema\n self.query = query\n\n @property\n def array(self):\n assert self.array_ref() is not None, \\\n \"Internal error: invariant violation (indexing call w/ dead array_ref)\"\n return self.array_ref()\n\n @classmethod\n def __test_init__(cls, array):\n \"\"\"\n Internal helper method for testing getitem range calculation.\n :param array:\n :return:\n \"\"\"\n m = cls.__new__(cls)\n m.array_ref = weakref.ref(array)\n m.schema = array.schema\n m.query = None\n return m\n\n def getitem_ranges(self, idx):\n dom = self.schema.domain\n ndim = dom.ndim\n idx = _index_as_tuple(idx)\n\n ranges = list()\n for i,sel in enumerate(idx):\n subranges = sel_to_subranges(sel)\n ranges.append(subranges)\n\n # extend the list to ndim\n if len(ranges) < ndim:\n ranges.extend([ tuple() for _ in range(ndim-len(ranges))])\n\n rval = tuple(ranges)\n return rval\n\n def __getitem__(self, idx):\n # implements multi-range / outer / orthogonal indexing\n ranges = self.getitem_ranges(idx)\n\n dom = self.schema.domain\n attr_names = tuple(self.schema.attr(i).name for i in range(self.schema.nattr))\n\n coords = None\n if self.query is not None:\n # if we are called via Query object, then we need to respect Query semantics\n attr_names = tuple(self.query.attrs) if self.query.attrs else attr_names # query.attrs might be None -> all\n coords = self.query.coords\n\n # TODO order\n result_dict = multi_index(\n self.array,\n attr_names,\n ranges,\n coords=coords\n )\n\n if self.schema.sparse:\n return result_dict\n else:\n result_shape = mr_dense_result_shape(ranges, self.schema.shape)\n for arr in result_dict.values():\n # TODO check/test layout\n arr.shape = result_shape\n return result_dict", "path": "tiledb/multirange_indexing.py"}], "after_files": [{"content": "import tiledb\nfrom tiledb import Array, ArraySchema\nimport os, numpy as np\nimport sys, weakref\n\ntry:\n from tiledb.libtiledb import multi_index\nexcept:\n from tiledb.indexing import multi_index\n\n\ndef mr_dense_result_shape(ranges, base_shape = None):\n # assumptions: len(ranges) matches number of dims\n if base_shape is not None:\n assert len(ranges) == len(base_shape), \"internal error: mismatched shapes\"\n\n new_shape = list()\n for i,rr in enumerate(ranges):\n if rr != ():\n m = list(map(lambda y: abs(y[1] - y[0]) + 1, rr))\n new_shape.append(np.sum(m))\n else:\n if base_shape is None:\n raise ValueError(\"Missing required base_shape for whole-dimension slices\")\n # empty range covers dimension\n new_shape.append(base_shape[i])\n\n return tuple(new_shape)\n\ndef mr_dense_result_numel(ranges):\n return np.prod(mr_dense_result_shape(ranges))\n\ndef sel_to_subranges(dim_sel):\n subranges = list()\n for range in dim_sel:\n if np.isscalar(range):\n subranges.append( (range, range) )\n elif isinstance(range, slice):\n if range.step is not None:\n raise ValueError(\"Stepped slice ranges are not supported\")\n elif range.start is None and range.stop is None:\n # ':' full slice\n pass\n else:\n subranges.append( (range.start, range.stop) )\n elif isinstance(range, tuple):\n subranges.extend((range,))\n elif isinstance(range, list):\n for el in range:\n subranges.append( (el, el) )\n else:\n raise TypeError(\"Unsupported selection \")\n\n return tuple(subranges)\n\n\nclass MultiRangeIndexer(object):\n \"\"\"\n Implements multi-range / outer / orthogonal indexing.\n\n \"\"\"\n # for cython\n # comment out for Python 2 :/\n #array: Array\n #schema: ArraySchema\n #def __init__(self, array: Array, query = None):\n\n def __init__(self, array, query = None):\n if not issubclass(type(array), tiledb.Array):\n raise ValueError(\"Internal error: MultiRangeIndexer expected tiledb.Array\")\n self.array_ref = weakref.ref(array)\n self.schema = array.schema\n self.query = query\n\n @property\n def array(self):\n assert self.array_ref() is not None, \\\n \"Internal error: invariant violation (indexing call w/ dead array_ref)\"\n return self.array_ref()\n\n @classmethod\n def __test_init__(cls, array):\n \"\"\"\n Internal helper method for testing getitem range calculation.\n :param array:\n :return:\n \"\"\"\n m = cls.__new__(cls)\n m.array_ref = weakref.ref(array)\n m.schema = array.schema\n m.query = None\n return m\n\n def getitem_ranges(self, idx):\n dom = self.schema.domain\n ndim = dom.ndim\n\n if isinstance(idx, tuple):\n idx = list(idx)\n else:\n idx = [idx]\n\n ranges = list()\n for i,sel in enumerate(idx):\n if not isinstance(sel, list):\n sel = [sel]\n subranges = sel_to_subranges(sel)\n ranges.append(subranges)\n\n # extend the list to ndim\n if len(ranges) < ndim:\n ranges.extend([ tuple() for _ in range(ndim-len(ranges))])\n\n rval = tuple(ranges)\n return rval\n\n def __getitem__(self, idx):\n # implements multi-range / outer / orthogonal indexing\n ranges = self.getitem_ranges(idx)\n\n dom = self.schema.domain\n attr_names = tuple(self.schema.attr(i).name for i in range(self.schema.nattr))\n\n coords = None\n if self.query is not None:\n # if we are called via Query object, then we need to respect Query semantics\n attr_names = tuple(self.query.attrs) if self.query.attrs else attr_names # query.attrs might be None -> all\n coords = self.query.coords\n\n # TODO order\n result_dict = multi_index(\n self.array,\n attr_names,\n ranges,\n coords=coords\n )\n\n if self.schema.sparse:\n return result_dict\n else:\n result_shape = mr_dense_result_shape(ranges, self.schema.shape)\n for arr in result_dict.values():\n # TODO check/test layout\n arr.shape = result_shape\n return result_dict", "path": "tiledb/multirange_indexing.py"}]}
1,933
427
gh_patches_debug_7533
rasdani/github-patches
git_diff
stephenmcd__mezzanine-780
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- SuspiciousOperation in asset_proxy URLs Since switching our site from HTTP to HTTPS, I've been getting the following error on all of TinyMCE's dialog windows. ``` SuspiciousOperation: Attempted access to '/static/grappelli/tinymce/jscripts/tiny_mce/themes/advanced/source_editor.htm' denied. Stacktrace (most recent call last): File "django/core/handlers/base.py", line 115, in get_response response = callback(request, *callback_args, **callback_kwargs) File "django/contrib/admin/views/decorators.py", line 17, in _checklogin return view_func(request, *args, **kwargs) File "mezzanine/core/views.py", line 143, in static_proxy path = finders.find(url) File "django/contrib/staticfiles/finders.py", line 239, in find result = finder.find(path, all=all) File "django/contrib/staticfiles/finders.py", line 147, in find match = self.find_in_app(app, path) File "django/contrib/staticfiles/finders.py", line 166, in find_in_app if storage.exists(path): File "django/core/files/storage.py", line 243, in exists return os.path.exists(self.path(name)) File "django/core/files/storage.py", line 259, in path raise SuspiciousOperation("Attempted access to '%s' denied." % name) ``` The URLs that are causing them look like this: ``` https://www.example.com/asset_proxy/?u=https://www.example.com/static/grappelli/tinymce/jscripts/tiny_mce/themes/advanced/source_editor.htm ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mezzanine/core/views.py` Content: ``` 1 import os 2 from urlparse import urljoin, urlparse 3 4 from django.contrib import admin 5 from django.contrib.admin.views.decorators import staff_member_required 6 from django.contrib.admin.options import ModelAdmin 7 from django.contrib.staticfiles import finders 8 from django.core.exceptions import PermissionDenied 9 from django.core.urlresolvers import reverse 10 from django.db.models import get_model 11 from django.http import (HttpResponse, HttpResponseServerError, 12 HttpResponseNotFound) 13 from django.shortcuts import redirect 14 from django.template import RequestContext 15 from django.template.loader import get_template 16 from django.utils.translation import ugettext_lazy as _ 17 from django.views.decorators.csrf import requires_csrf_token 18 19 from mezzanine.conf import settings 20 from mezzanine.core.forms import get_edit_form 21 from mezzanine.core.models import Displayable, SitePermission 22 from mezzanine.utils.cache import add_cache_bypass 23 from mezzanine.utils.views import is_editable, paginate, render, set_cookie 24 from mezzanine.utils.sites import has_site_permission 25 26 27 def set_device(request, device=""): 28 """ 29 Sets a device name in a cookie when a user explicitly wants to go 30 to the site for a particular device (eg mobile). 31 """ 32 response = redirect(add_cache_bypass(request.GET.get("next") or "/")) 33 set_cookie(response, "mezzanine-device", device, 60 * 60 * 24 * 365) 34 return response 35 36 37 @staff_member_required 38 def set_site(request): 39 """ 40 Put the selected site ID into the session - posted to from 41 the "Select site" drop-down in the header of the admin. The 42 site ID is then used in favour of the current request's 43 domain in ``mezzanine.core.managers.CurrentSiteManager``. 44 """ 45 site_id = int(request.GET["site_id"]) 46 if not request.user.is_superuser: 47 try: 48 SitePermission.objects.get(user=request.user, sites=site_id) 49 except SitePermission.DoesNotExist: 50 raise PermissionDenied 51 request.session["site_id"] = site_id 52 admin_url = reverse("admin:index") 53 next = request.GET.get("next") or admin_url 54 # Don't redirect to a change view for an object that won't exist 55 # on the selected site - go to its list view instead. 56 if next.startswith(admin_url): 57 parts = next.split("/") 58 if len(parts) > 4 and parts[4].isdigit(): 59 next = "/".join(parts[:4]) 60 return redirect(next) 61 62 63 def direct_to_template(request, template, extra_context=None, **kwargs): 64 """ 65 Replacement for Django's ``direct_to_template`` that uses 66 ``TemplateResponse`` via ``mezzanine.utils.views.render``. 67 """ 68 context = extra_context or {} 69 context["params"] = kwargs 70 for (key, value) in context.items(): 71 if callable(value): 72 context[key] = value() 73 return render(request, template, context) 74 75 76 @staff_member_required 77 def edit(request): 78 """ 79 Process the inline editing form. 80 """ 81 model = get_model(request.POST["app"], request.POST["model"]) 82 obj = model.objects.get(id=request.POST["id"]) 83 form = get_edit_form(obj, request.POST["fields"], data=request.POST, 84 files=request.FILES) 85 if not (is_editable(obj, request) and has_site_permission(request.user)): 86 response = _("Permission denied") 87 elif form.is_valid(): 88 form.save() 89 model_admin = ModelAdmin(model, admin.site) 90 message = model_admin.construct_change_message(request, form, None) 91 model_admin.log_change(request, obj, message) 92 response = "" 93 else: 94 response = form.errors.values()[0][0] 95 return HttpResponse(unicode(response)) 96 97 98 def search(request, template="search_results.html"): 99 """ 100 Display search results. Takes an optional "contenttype" GET parameter 101 in the form "app-name.ModelName" to limit search results to a single model. 102 """ 103 settings.use_editable() 104 query = request.GET.get("q", "") 105 page = request.GET.get("page", 1) 106 per_page = settings.SEARCH_PER_PAGE 107 max_paging_links = settings.MAX_PAGING_LINKS 108 try: 109 search_model = get_model(*request.GET.get("type", "").split(".", 1)) 110 if not issubclass(search_model, Displayable): 111 raise TypeError 112 except TypeError: 113 search_model = Displayable 114 search_type = _("Everything") 115 else: 116 search_type = search_model._meta.verbose_name_plural.capitalize() 117 results = search_model.objects.search(query, for_user=request.user) 118 paginated = paginate(results, page, per_page, max_paging_links) 119 context = {"query": query, "results": paginated, 120 "search_type": search_type} 121 return render(request, template, context) 122 123 124 @staff_member_required 125 def static_proxy(request): 126 """ 127 Serves TinyMCE plugins inside the inline popups and the uploadify 128 SWF, as these are normally static files, and will break with 129 cross-domain JavaScript errors if ``STATIC_URL`` is an external 130 host. URL for the file is passed in via querystring in the inline 131 popup plugin template. 132 """ 133 # Get the relative URL after STATIC_URL. 134 url = request.GET["u"] 135 protocol = "http" if not request.is_secure() else "https" 136 host = protocol + "://" + request.get_host() 137 generic_host = "//" + request.get_host() 138 for prefix in (host, generic_host, settings.STATIC_URL): 139 if url.startswith(prefix): 140 url = url.replace(prefix, "", 1) 141 response = "" 142 mimetype = "" 143 path = finders.find(url) 144 if path: 145 if isinstance(path, (list, tuple)): 146 path = path[0] 147 with open(path, "rb") as f: 148 response = f.read() 149 mimetype = "application/octet-stream" 150 if url.endswith(".htm"): 151 # Inject <base href="{{ STATIC_URL }}"> into TinyMCE 152 # plugins, since the path static files in these won't be 153 # on the same domain. 154 mimetype = "text/html" 155 static_url = settings.STATIC_URL + os.path.split(url)[0] + "/" 156 if not urlparse(static_url).scheme: 157 static_url = urljoin(host, static_url) 158 base_tag = "<base href='%s'>" % static_url 159 response = response.replace("<head>", "<head>" + base_tag) 160 return HttpResponse(response, mimetype=mimetype) 161 162 163 @requires_csrf_token 164 def page_not_found(request, template_name="errors/404.html"): 165 """ 166 Mimics Django's 404 handler but with a different template path. 167 """ 168 context = RequestContext(request, { 169 "STATIC_URL": settings.STATIC_URL, 170 "request_path": request.path, 171 }) 172 t = get_template(template_name) 173 return HttpResponseNotFound(t.render(context)) 174 175 176 @requires_csrf_token 177 def server_error(request, template_name="errors/500.html"): 178 """ 179 Mimics Django's error handler but adds ``STATIC_URL`` to the 180 context. 181 """ 182 context = RequestContext(request, {"STATIC_URL": settings.STATIC_URL}) 183 t = get_template(template_name) 184 return HttpResponseServerError(t.render(context)) 185 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mezzanine/core/views.py b/mezzanine/core/views.py --- a/mezzanine/core/views.py +++ b/mezzanine/core/views.py @@ -135,7 +135,7 @@ protocol = "http" if not request.is_secure() else "https" host = protocol + "://" + request.get_host() generic_host = "//" + request.get_host() - for prefix in (host, generic_host, settings.STATIC_URL): + for prefix in (settings.STATIC_URL, host, generic_host, '/'): if url.startswith(prefix): url = url.replace(prefix, "", 1) response = ""
{"golden_diff": "diff --git a/mezzanine/core/views.py b/mezzanine/core/views.py\n--- a/mezzanine/core/views.py\n+++ b/mezzanine/core/views.py\n@@ -135,7 +135,7 @@\n protocol = \"http\" if not request.is_secure() else \"https\"\n host = protocol + \"://\" + request.get_host()\n generic_host = \"//\" + request.get_host()\n- for prefix in (host, generic_host, settings.STATIC_URL):\n+ for prefix in (settings.STATIC_URL, host, generic_host, '/'):\n if url.startswith(prefix):\n url = url.replace(prefix, \"\", 1)\n response = \"\"\n", "issue": "SuspiciousOperation in asset_proxy URLs\nSince switching our site from HTTP to HTTPS, I've been getting the following error on all of TinyMCE's dialog windows.\n\n```\nSuspiciousOperation: Attempted access to '/static/grappelli/tinymce/jscripts/tiny_mce/themes/advanced/source_editor.htm' denied.\n\nStacktrace (most recent call last):\n\n File \"django/core/handlers/base.py\", line 115, in get_response\n response = callback(request, *callback_args, **callback_kwargs)\n File \"django/contrib/admin/views/decorators.py\", line 17, in _checklogin\n return view_func(request, *args, **kwargs)\n File \"mezzanine/core/views.py\", line 143, in static_proxy\n path = finders.find(url)\n File \"django/contrib/staticfiles/finders.py\", line 239, in find\n result = finder.find(path, all=all)\n File \"django/contrib/staticfiles/finders.py\", line 147, in find\n match = self.find_in_app(app, path)\n File \"django/contrib/staticfiles/finders.py\", line 166, in find_in_app\n if storage.exists(path):\n File \"django/core/files/storage.py\", line 243, in exists\n return os.path.exists(self.path(name))\n File \"django/core/files/storage.py\", line 259, in path\n raise SuspiciousOperation(\"Attempted access to '%s' denied.\" % name)\n```\n\nThe URLs that are causing them look like this:\n\n```\nhttps://www.example.com/asset_proxy/?u=https://www.example.com/static/grappelli/tinymce/jscripts/tiny_mce/themes/advanced/source_editor.htm\n```\n\n", "before_files": [{"content": "import os\nfrom urlparse import urljoin, urlparse\n\nfrom django.contrib import admin\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.admin.options import ModelAdmin\nfrom django.contrib.staticfiles import finders\nfrom django.core.exceptions import PermissionDenied\nfrom django.core.urlresolvers import reverse\nfrom django.db.models import get_model\nfrom django.http import (HttpResponse, HttpResponseServerError,\n HttpResponseNotFound)\nfrom django.shortcuts import redirect\nfrom django.template import RequestContext\nfrom django.template.loader import get_template\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.decorators.csrf import requires_csrf_token\n\nfrom mezzanine.conf import settings\nfrom mezzanine.core.forms import get_edit_form\nfrom mezzanine.core.models import Displayable, SitePermission\nfrom mezzanine.utils.cache import add_cache_bypass\nfrom mezzanine.utils.views import is_editable, paginate, render, set_cookie\nfrom mezzanine.utils.sites import has_site_permission\n\n\ndef set_device(request, device=\"\"):\n \"\"\"\n Sets a device name in a cookie when a user explicitly wants to go\n to the site for a particular device (eg mobile).\n \"\"\"\n response = redirect(add_cache_bypass(request.GET.get(\"next\") or \"/\"))\n set_cookie(response, \"mezzanine-device\", device, 60 * 60 * 24 * 365)\n return response\n\n\n@staff_member_required\ndef set_site(request):\n \"\"\"\n Put the selected site ID into the session - posted to from\n the \"Select site\" drop-down in the header of the admin. The\n site ID is then used in favour of the current request's\n domain in ``mezzanine.core.managers.CurrentSiteManager``.\n \"\"\"\n site_id = int(request.GET[\"site_id\"])\n if not request.user.is_superuser:\n try:\n SitePermission.objects.get(user=request.user, sites=site_id)\n except SitePermission.DoesNotExist:\n raise PermissionDenied\n request.session[\"site_id\"] = site_id\n admin_url = reverse(\"admin:index\")\n next = request.GET.get(\"next\") or admin_url\n # Don't redirect to a change view for an object that won't exist\n # on the selected site - go to its list view instead.\n if next.startswith(admin_url):\n parts = next.split(\"/\")\n if len(parts) > 4 and parts[4].isdigit():\n next = \"/\".join(parts[:4])\n return redirect(next)\n\n\ndef direct_to_template(request, template, extra_context=None, **kwargs):\n \"\"\"\n Replacement for Django's ``direct_to_template`` that uses\n ``TemplateResponse`` via ``mezzanine.utils.views.render``.\n \"\"\"\n context = extra_context or {}\n context[\"params\"] = kwargs\n for (key, value) in context.items():\n if callable(value):\n context[key] = value()\n return render(request, template, context)\n\n\n@staff_member_required\ndef edit(request):\n \"\"\"\n Process the inline editing form.\n \"\"\"\n model = get_model(request.POST[\"app\"], request.POST[\"model\"])\n obj = model.objects.get(id=request.POST[\"id\"])\n form = get_edit_form(obj, request.POST[\"fields\"], data=request.POST,\n files=request.FILES)\n if not (is_editable(obj, request) and has_site_permission(request.user)):\n response = _(\"Permission denied\")\n elif form.is_valid():\n form.save()\n model_admin = ModelAdmin(model, admin.site)\n message = model_admin.construct_change_message(request, form, None)\n model_admin.log_change(request, obj, message)\n response = \"\"\n else:\n response = form.errors.values()[0][0]\n return HttpResponse(unicode(response))\n\n\ndef search(request, template=\"search_results.html\"):\n \"\"\"\n Display search results. Takes an optional \"contenttype\" GET parameter\n in the form \"app-name.ModelName\" to limit search results to a single model.\n \"\"\"\n settings.use_editable()\n query = request.GET.get(\"q\", \"\")\n page = request.GET.get(\"page\", 1)\n per_page = settings.SEARCH_PER_PAGE\n max_paging_links = settings.MAX_PAGING_LINKS\n try:\n search_model = get_model(*request.GET.get(\"type\", \"\").split(\".\", 1))\n if not issubclass(search_model, Displayable):\n raise TypeError\n except TypeError:\n search_model = Displayable\n search_type = _(\"Everything\")\n else:\n search_type = search_model._meta.verbose_name_plural.capitalize()\n results = search_model.objects.search(query, for_user=request.user)\n paginated = paginate(results, page, per_page, max_paging_links)\n context = {\"query\": query, \"results\": paginated,\n \"search_type\": search_type}\n return render(request, template, context)\n\n\n@staff_member_required\ndef static_proxy(request):\n \"\"\"\n Serves TinyMCE plugins inside the inline popups and the uploadify\n SWF, as these are normally static files, and will break with\n cross-domain JavaScript errors if ``STATIC_URL`` is an external\n host. URL for the file is passed in via querystring in the inline\n popup plugin template.\n \"\"\"\n # Get the relative URL after STATIC_URL.\n url = request.GET[\"u\"]\n protocol = \"http\" if not request.is_secure() else \"https\"\n host = protocol + \"://\" + request.get_host()\n generic_host = \"//\" + request.get_host()\n for prefix in (host, generic_host, settings.STATIC_URL):\n if url.startswith(prefix):\n url = url.replace(prefix, \"\", 1)\n response = \"\"\n mimetype = \"\"\n path = finders.find(url)\n if path:\n if isinstance(path, (list, tuple)):\n path = path[0]\n with open(path, \"rb\") as f:\n response = f.read()\n mimetype = \"application/octet-stream\"\n if url.endswith(\".htm\"):\n # Inject <base href=\"{{ STATIC_URL }}\"> into TinyMCE\n # plugins, since the path static files in these won't be\n # on the same domain.\n mimetype = \"text/html\"\n static_url = settings.STATIC_URL + os.path.split(url)[0] + \"/\"\n if not urlparse(static_url).scheme:\n static_url = urljoin(host, static_url)\n base_tag = \"<base href='%s'>\" % static_url\n response = response.replace(\"<head>\", \"<head>\" + base_tag)\n return HttpResponse(response, mimetype=mimetype)\n\n\n@requires_csrf_token\ndef page_not_found(request, template_name=\"errors/404.html\"):\n \"\"\"\n Mimics Django's 404 handler but with a different template path.\n \"\"\"\n context = RequestContext(request, {\n \"STATIC_URL\": settings.STATIC_URL,\n \"request_path\": request.path,\n })\n t = get_template(template_name)\n return HttpResponseNotFound(t.render(context))\n\n\n@requires_csrf_token\ndef server_error(request, template_name=\"errors/500.html\"):\n \"\"\"\n Mimics Django's error handler but adds ``STATIC_URL`` to the\n context.\n \"\"\"\n context = RequestContext(request, {\"STATIC_URL\": settings.STATIC_URL})\n t = get_template(template_name)\n return HttpResponseServerError(t.render(context))\n", "path": "mezzanine/core/views.py"}], "after_files": [{"content": "import os\nfrom urlparse import urljoin, urlparse\n\nfrom django.contrib import admin\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom django.contrib.admin.options import ModelAdmin\nfrom django.contrib.staticfiles import finders\nfrom django.core.exceptions import PermissionDenied\nfrom django.core.urlresolvers import reverse\nfrom django.db.models import get_model\nfrom django.http import (HttpResponse, HttpResponseServerError,\n HttpResponseNotFound)\nfrom django.shortcuts import redirect\nfrom django.template import RequestContext\nfrom django.template.loader import get_template\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.decorators.csrf import requires_csrf_token\n\nfrom mezzanine.conf import settings\nfrom mezzanine.core.forms import get_edit_form\nfrom mezzanine.core.models import Displayable, SitePermission\nfrom mezzanine.utils.cache import add_cache_bypass\nfrom mezzanine.utils.views import is_editable, paginate, render, set_cookie\nfrom mezzanine.utils.sites import has_site_permission\n\n\ndef set_device(request, device=\"\"):\n \"\"\"\n Sets a device name in a cookie when a user explicitly wants to go\n to the site for a particular device (eg mobile).\n \"\"\"\n response = redirect(add_cache_bypass(request.GET.get(\"next\") or \"/\"))\n set_cookie(response, \"mezzanine-device\", device, 60 * 60 * 24 * 365)\n return response\n\n\n@staff_member_required\ndef set_site(request):\n \"\"\"\n Put the selected site ID into the session - posted to from\n the \"Select site\" drop-down in the header of the admin. The\n site ID is then used in favour of the current request's\n domain in ``mezzanine.core.managers.CurrentSiteManager``.\n \"\"\"\n site_id = int(request.GET[\"site_id\"])\n if not request.user.is_superuser:\n try:\n SitePermission.objects.get(user=request.user, sites=site_id)\n except SitePermission.DoesNotExist:\n raise PermissionDenied\n request.session[\"site_id\"] = site_id\n admin_url = reverse(\"admin:index\")\n next = request.GET.get(\"next\") or admin_url\n # Don't redirect to a change view for an object that won't exist\n # on the selected site - go to its list view instead.\n if next.startswith(admin_url):\n parts = next.split(\"/\")\n if len(parts) > 4 and parts[4].isdigit():\n next = \"/\".join(parts[:4])\n return redirect(next)\n\n\ndef direct_to_template(request, template, extra_context=None, **kwargs):\n \"\"\"\n Replacement for Django's ``direct_to_template`` that uses\n ``TemplateResponse`` via ``mezzanine.utils.views.render``.\n \"\"\"\n context = extra_context or {}\n context[\"params\"] = kwargs\n for (key, value) in context.items():\n if callable(value):\n context[key] = value()\n return render(request, template, context)\n\n\n@staff_member_required\ndef edit(request):\n \"\"\"\n Process the inline editing form.\n \"\"\"\n model = get_model(request.POST[\"app\"], request.POST[\"model\"])\n obj = model.objects.get(id=request.POST[\"id\"])\n form = get_edit_form(obj, request.POST[\"fields\"], data=request.POST,\n files=request.FILES)\n if not (is_editable(obj, request) and has_site_permission(request.user)):\n response = _(\"Permission denied\")\n elif form.is_valid():\n form.save()\n model_admin = ModelAdmin(model, admin.site)\n message = model_admin.construct_change_message(request, form, None)\n model_admin.log_change(request, obj, message)\n response = \"\"\n else:\n response = form.errors.values()[0][0]\n return HttpResponse(unicode(response))\n\n\ndef search(request, template=\"search_results.html\"):\n \"\"\"\n Display search results. Takes an optional \"contenttype\" GET parameter\n in the form \"app-name.ModelName\" to limit search results to a single model.\n \"\"\"\n settings.use_editable()\n query = request.GET.get(\"q\", \"\")\n page = request.GET.get(\"page\", 1)\n per_page = settings.SEARCH_PER_PAGE\n max_paging_links = settings.MAX_PAGING_LINKS\n try:\n search_model = get_model(*request.GET.get(\"type\", \"\").split(\".\", 1))\n if not issubclass(search_model, Displayable):\n raise TypeError\n except TypeError:\n search_model = Displayable\n search_type = _(\"Everything\")\n else:\n search_type = search_model._meta.verbose_name_plural.capitalize()\n results = search_model.objects.search(query, for_user=request.user)\n paginated = paginate(results, page, per_page, max_paging_links)\n context = {\"query\": query, \"results\": paginated,\n \"search_type\": search_type}\n return render(request, template, context)\n\n\n@staff_member_required\ndef static_proxy(request):\n \"\"\"\n Serves TinyMCE plugins inside the inline popups and the uploadify\n SWF, as these are normally static files, and will break with\n cross-domain JavaScript errors if ``STATIC_URL`` is an external\n host. URL for the file is passed in via querystring in the inline\n popup plugin template.\n \"\"\"\n # Get the relative URL after STATIC_URL.\n url = request.GET[\"u\"]\n protocol = \"http\" if not request.is_secure() else \"https\"\n host = protocol + \"://\" + request.get_host()\n generic_host = \"//\" + request.get_host()\n for prefix in (settings.STATIC_URL, host, generic_host, '/'):\n if url.startswith(prefix):\n url = url.replace(prefix, \"\", 1)\n response = \"\"\n mimetype = \"\"\n path = finders.find(url)\n if path:\n if isinstance(path, (list, tuple)):\n path = path[0]\n with open(path, \"rb\") as f:\n response = f.read()\n mimetype = \"application/octet-stream\"\n if url.endswith(\".htm\"):\n # Inject <base href=\"{{ STATIC_URL }}\"> into TinyMCE\n # plugins, since the path static files in these won't be\n # on the same domain.\n mimetype = \"text/html\"\n static_url = settings.STATIC_URL + os.path.split(url)[0] + \"/\"\n if not urlparse(static_url).scheme:\n static_url = urljoin(host, static_url)\n base_tag = \"<base href='%s'>\" % static_url\n response = response.replace(\"<head>\", \"<head>\" + base_tag)\n return HttpResponse(response, mimetype=mimetype)\n\n\n@requires_csrf_token\ndef page_not_found(request, template_name=\"errors/404.html\"):\n \"\"\"\n Mimics Django's 404 handler but with a different template path.\n \"\"\"\n context = RequestContext(request, {\n \"STATIC_URL\": settings.STATIC_URL,\n \"request_path\": request.path,\n })\n t = get_template(template_name)\n return HttpResponseNotFound(t.render(context))\n\n\n@requires_csrf_token\ndef server_error(request, template_name=\"errors/500.html\"):\n \"\"\"\n Mimics Django's error handler but adds ``STATIC_URL`` to the\n context.\n \"\"\"\n context = RequestContext(request, {\"STATIC_URL\": settings.STATIC_URL})\n t = get_template(template_name)\n return HttpResponseServerError(t.render(context))\n", "path": "mezzanine/core/views.py"}]}
2,634
144
gh_patches_debug_13647
rasdani/github-patches
git_diff
elastic__apm-agent-python-1161
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Starlette/FastAPI python agent setup examples result in TypeError When following the [example](https://www.elastic.co/guide/en/apm/agent/python/current/starlette-support.html#starlette-fastapi) of setting up the Elastic APM agent with python + FastAPI, we find that we can't use `make_apm_client` as described as it has a required positional argument, `config`. The documentation however says that we should be able to call `make_apm_client` with no arguments and in that case it should look for environment variables and/or use defaults. **To Reproduce** Follow the example exactly: ```python from fastapi import FastAPI from elasticapm.contrib.starlette import make_apm_client, ElasticAPM apm = make_apm_client() # Client fails to start due to TypeError: TypeError: make_apm_client() missing 1 required positional argument: 'config' app = FastAPI() app.add_middleware(ElasticAPM, client=apm) ``` **Environment (please complete the following information)** - OS: Mac, Linux, Ubuntu - Python version: 3.8.8 - Framework and version [e.g. Django 2.1]: fastapi==0.65.1 - APM Server version: latest - Agent version: 6.2.2 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `elasticapm/contrib/starlette/__init__.py` Content: ``` 1 # BSD 3-Clause License 2 # 3 # Copyright (c) 2012, the Sentry Team, see AUTHORS for more details 4 # Copyright (c) 2019, Elasticsearch BV 5 # All rights reserved. 6 # 7 # Redistribution and use in source and binary forms, with or without 8 # modification, are permitted provided that the following conditions are met: 9 # 10 # * Redistributions of source code must retain the above copyright notice, this 11 # list of conditions and the following disclaimer. 12 # 13 # * Redistributions in binary form must reproduce the above copyright notice, 14 # this list of conditions and the following disclaimer in the documentation 15 # and/or other materials provided with the distribution. 16 # 17 # * Neither the name of the copyright holder nor the names of its 18 # contributors may be used to endorse or promote products derived from 19 # this software without specific prior written permission. 20 # 21 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 24 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 25 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 27 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 31 32 from __future__ import absolute_import 33 34 import starlette 35 from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint 36 from starlette.requests import Request 37 from starlette.responses import Response 38 from starlette.routing import Match, Mount 39 from starlette.types import ASGIApp 40 41 import elasticapm 42 import elasticapm.instrumentation.control 43 from elasticapm.base import Client 44 from elasticapm.conf import constants 45 from elasticapm.contrib.asyncio.traces import set_context 46 from elasticapm.contrib.starlette.utils import get_body, get_data_from_request, get_data_from_response 47 from elasticapm.utils.disttracing import TraceParent 48 from elasticapm.utils.logging import get_logger 49 50 logger = get_logger("elasticapm.errors.client") 51 52 53 def make_apm_client(config: dict, client_cls=Client, **defaults) -> Client: 54 """Builds ElasticAPM client. 55 56 Args: 57 config (dict): Dictionary of Client configuration. All keys must be uppercase. See `elasticapm.conf.Config`. 58 client_cls (Client): Must be Client or its child. 59 **defaults: Additional parameters for Client. See `elasticapm.base.Client` 60 61 Returns: 62 Client 63 """ 64 if "framework_name" not in defaults: 65 defaults["framework_name"] = "starlette" 66 defaults["framework_version"] = starlette.__version__ 67 68 return client_cls(config, **defaults) 69 70 71 class ElasticAPM(BaseHTTPMiddleware): 72 """ 73 Starlette / FastAPI middleware for Elastic APM capturing. 74 75 >>> elasticapm = make_apm_client({ 76 >>> 'SERVICE_NAME': 'myapp', 77 >>> 'DEBUG': True, 78 >>> 'SERVER_URL': 'http://localhost:8200', 79 >>> 'CAPTURE_HEADERS': True, 80 >>> 'CAPTURE_BODY': 'all' 81 >>> }) 82 83 >>> app.add_middleware(ElasticAPM, client=elasticapm) 84 85 Pass an arbitrary APP_NAME and SECRET_TOKEN:: 86 87 >>> elasticapm = ElasticAPM(app, service_name='myapp', secret_token='asdasdasd') 88 89 Pass an explicit client:: 90 91 >>> elasticapm = ElasticAPM(app, client=client) 92 93 Automatically configure logging:: 94 95 >>> elasticapm = ElasticAPM(app, logging=True) 96 97 Capture an exception:: 98 99 >>> try: 100 >>> 1 / 0 101 >>> except ZeroDivisionError: 102 >>> elasticapm.capture_exception() 103 104 Capture a message:: 105 106 >>> elasticapm.capture_message('hello, world!') 107 """ 108 109 def __init__(self, app: ASGIApp, client: Client): 110 """ 111 112 Args: 113 app (ASGIApp): Starlette app 114 client (Client): ElasticAPM Client 115 """ 116 self.client = client 117 118 if self.client.config.instrument and self.client.config.enabled: 119 elasticapm.instrumentation.control.instrument() 120 121 super().__init__(app) 122 123 async def dispatch(self, request: Request, call_next: RequestResponseEndpoint) -> Response: 124 """Processes the whole request APM capturing. 125 126 Args: 127 request (Request) 128 call_next (RequestResponseEndpoint): Next request process in Starlette. 129 130 Returns: 131 Response 132 """ 133 await self._request_started(request) 134 135 try: 136 response = await call_next(request) 137 elasticapm.set_transaction_outcome(constants.OUTCOME.SUCCESS, override=False) 138 except Exception: 139 await self.capture_exception( 140 context={"request": await get_data_from_request(request, self.client.config, constants.ERROR)} 141 ) 142 elasticapm.set_transaction_result("HTTP 5xx", override=False) 143 elasticapm.set_transaction_outcome(constants.OUTCOME.FAILURE, override=False) 144 elasticapm.set_context({"status_code": 500}, "response") 145 146 raise 147 else: 148 await self._request_finished(response) 149 finally: 150 self.client.end_transaction() 151 152 return response 153 154 async def capture_exception(self, *args, **kwargs): 155 """Captures your exception. 156 157 Args: 158 *args: 159 **kwargs: 160 """ 161 self.client.capture_exception(*args, **kwargs) 162 163 async def capture_message(self, *args, **kwargs): 164 """Captures your message. 165 166 Args: 167 *args: Whatever 168 **kwargs: Whatever 169 """ 170 self.client.capture_message(*args, **kwargs) 171 172 async def _request_started(self, request: Request): 173 """Captures the begin of the request processing to APM. 174 175 Args: 176 request (Request) 177 """ 178 # When we consume the body, we replace the streaming mechanism with 179 # a mocked version -- this workaround came from 180 # https://github.com/encode/starlette/issues/495#issuecomment-513138055 181 # and we call the workaround here to make sure that regardless of 182 # `capture_body` settings, we will have access to the body if we need it. 183 if self.client.config.capture_body != "off": 184 await get_body(request) 185 186 if not self.client.should_ignore_url(request.url.path): 187 trace_parent = TraceParent.from_headers(dict(request.headers)) 188 self.client.begin_transaction("request", trace_parent=trace_parent) 189 190 await set_context( 191 lambda: get_data_from_request(request, self.client.config, constants.TRANSACTION), "request" 192 ) 193 transaction_name = self.get_route_name(request) or request.url.path 194 elasticapm.set_transaction_name("{} {}".format(request.method, transaction_name), override=False) 195 196 async def _request_finished(self, response: Response): 197 """Captures the end of the request processing to APM. 198 199 Args: 200 response (Response) 201 """ 202 await set_context( 203 lambda: get_data_from_response(response, self.client.config, constants.TRANSACTION), "response" 204 ) 205 206 result = "HTTP {}xx".format(response.status_code // 100) 207 elasticapm.set_transaction_result(result, override=False) 208 209 def get_route_name(self, request: Request) -> str: 210 app = request.app 211 scope = request.scope 212 routes = app.routes 213 route_name = self._get_route_name(scope, routes) 214 215 # Starlette magically redirects requests if the path matches a route name with a trailing slash 216 # appended or removed. To not spam the transaction names list, we do the same here and put these 217 # redirects all in the same "redirect trailing slashes" transaction name 218 if not route_name and app.router.redirect_slashes and scope["path"] != "/": 219 redirect_scope = dict(scope) 220 if scope["path"].endswith("/"): 221 redirect_scope["path"] = scope["path"][:-1] 222 trim = True 223 else: 224 redirect_scope["path"] = scope["path"] + "/" 225 trim = False 226 227 route_name = self._get_route_name(redirect_scope, routes) 228 if route_name is not None: 229 route_name = route_name + "/" if trim else route_name[:-1] 230 return route_name 231 232 def _get_route_name(self, scope, routes, route_name=None): 233 for route in routes: 234 match, child_scope = route.matches(scope) 235 if match == Match.FULL: 236 route_name = route.path 237 child_scope = {**scope, **child_scope} 238 if isinstance(route, Mount): 239 child_route_name = self._get_route_name(child_scope, route.routes, route_name) 240 if child_route_name is None: 241 route_name = None 242 else: 243 route_name += child_route_name 244 return route_name 245 elif match == Match.PARTIAL and route_name is None: 246 route_name = route.path 247 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/elasticapm/contrib/starlette/__init__.py b/elasticapm/contrib/starlette/__init__.py --- a/elasticapm/contrib/starlette/__init__.py +++ b/elasticapm/contrib/starlette/__init__.py @@ -31,6 +31,8 @@ from __future__ import absolute_import +from typing import Dict, Optional + import starlette from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint from starlette.requests import Request @@ -50,7 +52,7 @@ logger = get_logger("elasticapm.errors.client") -def make_apm_client(config: dict, client_cls=Client, **defaults) -> Client: +def make_apm_client(config: Optional[Dict] = None, client_cls=Client, **defaults) -> Client: """Builds ElasticAPM client. Args:
{"golden_diff": "diff --git a/elasticapm/contrib/starlette/__init__.py b/elasticapm/contrib/starlette/__init__.py\n--- a/elasticapm/contrib/starlette/__init__.py\n+++ b/elasticapm/contrib/starlette/__init__.py\n@@ -31,6 +31,8 @@\n \n from __future__ import absolute_import\n \n+from typing import Dict, Optional\n+\n import starlette\n from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint\n from starlette.requests import Request\n@@ -50,7 +52,7 @@\n logger = get_logger(\"elasticapm.errors.client\")\n \n \n-def make_apm_client(config: dict, client_cls=Client, **defaults) -> Client:\n+def make_apm_client(config: Optional[Dict] = None, client_cls=Client, **defaults) -> Client:\n \"\"\"Builds ElasticAPM client.\n \n Args:\n", "issue": "Starlette/FastAPI python agent setup examples result in TypeError\nWhen following the [example](https://www.elastic.co/guide/en/apm/agent/python/current/starlette-support.html#starlette-fastapi) of setting up the Elastic APM agent with python + FastAPI, we find that we can't use `make_apm_client` as described as it has a required positional argument, `config`. The documentation however says that we should be able to call `make_apm_client` with no arguments and in that case it should look for environment variables and/or use defaults. \r\n\r\n**To Reproduce**\r\nFollow the example exactly: \r\n```python\r\nfrom fastapi import FastAPI\r\nfrom elasticapm.contrib.starlette import make_apm_client, ElasticAPM\r\n\r\napm = make_apm_client()\r\n# Client fails to start due to TypeError: TypeError: make_apm_client() missing 1 required positional argument: 'config'\r\n\r\napp = FastAPI()\r\napp.add_middleware(ElasticAPM, client=apm)\r\n```\r\n\r\n**Environment (please complete the following information)**\r\n- OS: Mac, Linux, Ubuntu\r\n- Python version: 3.8.8\r\n- Framework and version [e.g. Django 2.1]: fastapi==0.65.1\r\n- APM Server version: latest\r\n- Agent version: 6.2.2\r\n\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\n\nfrom __future__ import absolute_import\n\nimport starlette\nfrom starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint\nfrom starlette.requests import Request\nfrom starlette.responses import Response\nfrom starlette.routing import Match, Mount\nfrom starlette.types import ASGIApp\n\nimport elasticapm\nimport elasticapm.instrumentation.control\nfrom elasticapm.base import Client\nfrom elasticapm.conf import constants\nfrom elasticapm.contrib.asyncio.traces import set_context\nfrom elasticapm.contrib.starlette.utils import get_body, get_data_from_request, get_data_from_response\nfrom elasticapm.utils.disttracing import TraceParent\nfrom elasticapm.utils.logging import get_logger\n\nlogger = get_logger(\"elasticapm.errors.client\")\n\n\ndef make_apm_client(config: dict, client_cls=Client, **defaults) -> Client:\n \"\"\"Builds ElasticAPM client.\n\n Args:\n config (dict): Dictionary of Client configuration. All keys must be uppercase. See `elasticapm.conf.Config`.\n client_cls (Client): Must be Client or its child.\n **defaults: Additional parameters for Client. See `elasticapm.base.Client`\n\n Returns:\n Client\n \"\"\"\n if \"framework_name\" not in defaults:\n defaults[\"framework_name\"] = \"starlette\"\n defaults[\"framework_version\"] = starlette.__version__\n\n return client_cls(config, **defaults)\n\n\nclass ElasticAPM(BaseHTTPMiddleware):\n \"\"\"\n Starlette / FastAPI middleware for Elastic APM capturing.\n\n >>> elasticapm = make_apm_client({\n >>> 'SERVICE_NAME': 'myapp',\n >>> 'DEBUG': True,\n >>> 'SERVER_URL': 'http://localhost:8200',\n >>> 'CAPTURE_HEADERS': True,\n >>> 'CAPTURE_BODY': 'all'\n >>> })\n\n >>> app.add_middleware(ElasticAPM, client=elasticapm)\n\n Pass an arbitrary APP_NAME and SECRET_TOKEN::\n\n >>> elasticapm = ElasticAPM(app, service_name='myapp', secret_token='asdasdasd')\n\n Pass an explicit client::\n\n >>> elasticapm = ElasticAPM(app, client=client)\n\n Automatically configure logging::\n\n >>> elasticapm = ElasticAPM(app, logging=True)\n\n Capture an exception::\n\n >>> try:\n >>> 1 / 0\n >>> except ZeroDivisionError:\n >>> elasticapm.capture_exception()\n\n Capture a message::\n\n >>> elasticapm.capture_message('hello, world!')\n \"\"\"\n\n def __init__(self, app: ASGIApp, client: Client):\n \"\"\"\n\n Args:\n app (ASGIApp): Starlette app\n client (Client): ElasticAPM Client\n \"\"\"\n self.client = client\n\n if self.client.config.instrument and self.client.config.enabled:\n elasticapm.instrumentation.control.instrument()\n\n super().__init__(app)\n\n async def dispatch(self, request: Request, call_next: RequestResponseEndpoint) -> Response:\n \"\"\"Processes the whole request APM capturing.\n\n Args:\n request (Request)\n call_next (RequestResponseEndpoint): Next request process in Starlette.\n\n Returns:\n Response\n \"\"\"\n await self._request_started(request)\n\n try:\n response = await call_next(request)\n elasticapm.set_transaction_outcome(constants.OUTCOME.SUCCESS, override=False)\n except Exception:\n await self.capture_exception(\n context={\"request\": await get_data_from_request(request, self.client.config, constants.ERROR)}\n )\n elasticapm.set_transaction_result(\"HTTP 5xx\", override=False)\n elasticapm.set_transaction_outcome(constants.OUTCOME.FAILURE, override=False)\n elasticapm.set_context({\"status_code\": 500}, \"response\")\n\n raise\n else:\n await self._request_finished(response)\n finally:\n self.client.end_transaction()\n\n return response\n\n async def capture_exception(self, *args, **kwargs):\n \"\"\"Captures your exception.\n\n Args:\n *args:\n **kwargs:\n \"\"\"\n self.client.capture_exception(*args, **kwargs)\n\n async def capture_message(self, *args, **kwargs):\n \"\"\"Captures your message.\n\n Args:\n *args: Whatever\n **kwargs: Whatever\n \"\"\"\n self.client.capture_message(*args, **kwargs)\n\n async def _request_started(self, request: Request):\n \"\"\"Captures the begin of the request processing to APM.\n\n Args:\n request (Request)\n \"\"\"\n # When we consume the body, we replace the streaming mechanism with\n # a mocked version -- this workaround came from\n # https://github.com/encode/starlette/issues/495#issuecomment-513138055\n # and we call the workaround here to make sure that regardless of\n # `capture_body` settings, we will have access to the body if we need it.\n if self.client.config.capture_body != \"off\":\n await get_body(request)\n\n if not self.client.should_ignore_url(request.url.path):\n trace_parent = TraceParent.from_headers(dict(request.headers))\n self.client.begin_transaction(\"request\", trace_parent=trace_parent)\n\n await set_context(\n lambda: get_data_from_request(request, self.client.config, constants.TRANSACTION), \"request\"\n )\n transaction_name = self.get_route_name(request) or request.url.path\n elasticapm.set_transaction_name(\"{} {}\".format(request.method, transaction_name), override=False)\n\n async def _request_finished(self, response: Response):\n \"\"\"Captures the end of the request processing to APM.\n\n Args:\n response (Response)\n \"\"\"\n await set_context(\n lambda: get_data_from_response(response, self.client.config, constants.TRANSACTION), \"response\"\n )\n\n result = \"HTTP {}xx\".format(response.status_code // 100)\n elasticapm.set_transaction_result(result, override=False)\n\n def get_route_name(self, request: Request) -> str:\n app = request.app\n scope = request.scope\n routes = app.routes\n route_name = self._get_route_name(scope, routes)\n\n # Starlette magically redirects requests if the path matches a route name with a trailing slash\n # appended or removed. To not spam the transaction names list, we do the same here and put these\n # redirects all in the same \"redirect trailing slashes\" transaction name\n if not route_name and app.router.redirect_slashes and scope[\"path\"] != \"/\":\n redirect_scope = dict(scope)\n if scope[\"path\"].endswith(\"/\"):\n redirect_scope[\"path\"] = scope[\"path\"][:-1]\n trim = True\n else:\n redirect_scope[\"path\"] = scope[\"path\"] + \"/\"\n trim = False\n\n route_name = self._get_route_name(redirect_scope, routes)\n if route_name is not None:\n route_name = route_name + \"/\" if trim else route_name[:-1]\n return route_name\n\n def _get_route_name(self, scope, routes, route_name=None):\n for route in routes:\n match, child_scope = route.matches(scope)\n if match == Match.FULL:\n route_name = route.path\n child_scope = {**scope, **child_scope}\n if isinstance(route, Mount):\n child_route_name = self._get_route_name(child_scope, route.routes, route_name)\n if child_route_name is None:\n route_name = None\n else:\n route_name += child_route_name\n return route_name\n elif match == Match.PARTIAL and route_name is None:\n route_name = route.path\n", "path": "elasticapm/contrib/starlette/__init__.py"}], "after_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\n\nfrom __future__ import absolute_import\n\nfrom typing import Dict, Optional\n\nimport starlette\nfrom starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint\nfrom starlette.requests import Request\nfrom starlette.responses import Response\nfrom starlette.routing import Match, Mount\nfrom starlette.types import ASGIApp\n\nimport elasticapm\nimport elasticapm.instrumentation.control\nfrom elasticapm.base import Client\nfrom elasticapm.conf import constants\nfrom elasticapm.contrib.asyncio.traces import set_context\nfrom elasticapm.contrib.starlette.utils import get_body, get_data_from_request, get_data_from_response\nfrom elasticapm.utils.disttracing import TraceParent\nfrom elasticapm.utils.logging import get_logger\n\nlogger = get_logger(\"elasticapm.errors.client\")\n\n\ndef make_apm_client(config: Optional[Dict] = None, client_cls=Client, **defaults) -> Client:\n \"\"\"Builds ElasticAPM client.\n\n Args:\n config (dict): Dictionary of Client configuration. All keys must be uppercase. See `elasticapm.conf.Config`.\n client_cls (Client): Must be Client or its child.\n **defaults: Additional parameters for Client. See `elasticapm.base.Client`\n\n Returns:\n Client\n \"\"\"\n if \"framework_name\" not in defaults:\n defaults[\"framework_name\"] = \"starlette\"\n defaults[\"framework_version\"] = starlette.__version__\n\n return client_cls(config, **defaults)\n\n\nclass ElasticAPM(BaseHTTPMiddleware):\n \"\"\"\n Starlette / FastAPI middleware for Elastic APM capturing.\n\n >>> elasticapm = make_apm_client({\n >>> 'SERVICE_NAME': 'myapp',\n >>> 'DEBUG': True,\n >>> 'SERVER_URL': 'http://localhost:8200',\n >>> 'CAPTURE_HEADERS': True,\n >>> 'CAPTURE_BODY': 'all'\n >>> })\n\n >>> app.add_middleware(ElasticAPM, client=elasticapm)\n\n Pass an arbitrary APP_NAME and SECRET_TOKEN::\n\n >>> elasticapm = ElasticAPM(app, service_name='myapp', secret_token='asdasdasd')\n\n Pass an explicit client::\n\n >>> elasticapm = ElasticAPM(app, client=client)\n\n Automatically configure logging::\n\n >>> elasticapm = ElasticAPM(app, logging=True)\n\n Capture an exception::\n\n >>> try:\n >>> 1 / 0\n >>> except ZeroDivisionError:\n >>> elasticapm.capture_exception()\n\n Capture a message::\n\n >>> elasticapm.capture_message('hello, world!')\n \"\"\"\n\n def __init__(self, app: ASGIApp, client: Client):\n \"\"\"\n\n Args:\n app (ASGIApp): Starlette app\n client (Client): ElasticAPM Client\n \"\"\"\n self.client = client\n\n if self.client.config.instrument and self.client.config.enabled:\n elasticapm.instrumentation.control.instrument()\n\n super().__init__(app)\n\n async def dispatch(self, request: Request, call_next: RequestResponseEndpoint) -> Response:\n \"\"\"Processes the whole request APM capturing.\n\n Args:\n request (Request)\n call_next (RequestResponseEndpoint): Next request process in Starlette.\n\n Returns:\n Response\n \"\"\"\n await self._request_started(request)\n\n try:\n response = await call_next(request)\n elasticapm.set_transaction_outcome(constants.OUTCOME.SUCCESS, override=False)\n except Exception:\n await self.capture_exception(\n context={\"request\": await get_data_from_request(request, self.client.config, constants.ERROR)}\n )\n elasticapm.set_transaction_result(\"HTTP 5xx\", override=False)\n elasticapm.set_transaction_outcome(constants.OUTCOME.FAILURE, override=False)\n elasticapm.set_context({\"status_code\": 500}, \"response\")\n\n raise\n else:\n await self._request_finished(response)\n finally:\n self.client.end_transaction()\n\n return response\n\n async def capture_exception(self, *args, **kwargs):\n \"\"\"Captures your exception.\n\n Args:\n *args:\n **kwargs:\n \"\"\"\n self.client.capture_exception(*args, **kwargs)\n\n async def capture_message(self, *args, **kwargs):\n \"\"\"Captures your message.\n\n Args:\n *args: Whatever\n **kwargs: Whatever\n \"\"\"\n self.client.capture_message(*args, **kwargs)\n\n async def _request_started(self, request: Request):\n \"\"\"Captures the begin of the request processing to APM.\n\n Args:\n request (Request)\n \"\"\"\n # When we consume the body, we replace the streaming mechanism with\n # a mocked version -- this workaround came from\n # https://github.com/encode/starlette/issues/495#issuecomment-513138055\n # and we call the workaround here to make sure that regardless of\n # `capture_body` settings, we will have access to the body if we need it.\n if self.client.config.capture_body != \"off\":\n await get_body(request)\n\n if not self.client.should_ignore_url(request.url.path):\n trace_parent = TraceParent.from_headers(dict(request.headers))\n self.client.begin_transaction(\"request\", trace_parent=trace_parent)\n\n await set_context(\n lambda: get_data_from_request(request, self.client.config, constants.TRANSACTION), \"request\"\n )\n transaction_name = self.get_route_name(request) or request.url.path\n elasticapm.set_transaction_name(\"{} {}\".format(request.method, transaction_name), override=False)\n\n async def _request_finished(self, response: Response):\n \"\"\"Captures the end of the request processing to APM.\n\n Args:\n response (Response)\n \"\"\"\n await set_context(\n lambda: get_data_from_response(response, self.client.config, constants.TRANSACTION), \"response\"\n )\n\n result = \"HTTP {}xx\".format(response.status_code // 100)\n elasticapm.set_transaction_result(result, override=False)\n\n def get_route_name(self, request: Request) -> str:\n app = request.app\n scope = request.scope\n routes = app.routes\n route_name = self._get_route_name(scope, routes)\n\n # Starlette magically redirects requests if the path matches a route name with a trailing slash\n # appended or removed. To not spam the transaction names list, we do the same here and put these\n # redirects all in the same \"redirect trailing slashes\" transaction name\n if not route_name and app.router.redirect_slashes and scope[\"path\"] != \"/\":\n redirect_scope = dict(scope)\n if scope[\"path\"].endswith(\"/\"):\n redirect_scope[\"path\"] = scope[\"path\"][:-1]\n trim = True\n else:\n redirect_scope[\"path\"] = scope[\"path\"] + \"/\"\n trim = False\n\n route_name = self._get_route_name(redirect_scope, routes)\n if route_name is not None:\n route_name = route_name + \"/\" if trim else route_name[:-1]\n return route_name\n\n def _get_route_name(self, scope, routes, route_name=None):\n for route in routes:\n match, child_scope = route.matches(scope)\n if match == Match.FULL:\n route_name = route.path\n child_scope = {**scope, **child_scope}\n if isinstance(route, Mount):\n child_route_name = self._get_route_name(child_scope, route.routes, route_name)\n if child_route_name is None:\n route_name = None\n else:\n route_name += child_route_name\n return route_name\n elif match == Match.PARTIAL and route_name is None:\n route_name = route.path\n", "path": "elasticapm/contrib/starlette/__init__.py"}]}
3,172
198
gh_patches_debug_3260
rasdani/github-patches
git_diff
getredash__redash-5623
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Loading schema for Sqlite DB with "Order" column name fails ### Issue Summary I added a Sqlite Database which has an column with the name `Order`. When I try to create a query, the error `Schema refresh failed.` comes up. ### Steps to Reproduce 1. Add an Sqlite Database which has a column with the name `Order` 2. Try to create a query 3. Get the error `Schema refresh failed.` ### Technical details: * Redash Version: cloned from master * Browser/OS: Brave Browser & Ubuntu 18.1 * How did you install Redash: built from source --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `redash/query_runner/sqlite.py` Content: ``` 1 import logging 2 import sqlite3 3 4 from redash.query_runner import BaseSQLQueryRunner, register, JobTimeoutException 5 from redash.utils import json_dumps, json_loads 6 7 logger = logging.getLogger(__name__) 8 9 10 class Sqlite(BaseSQLQueryRunner): 11 noop_query = "pragma quick_check" 12 13 @classmethod 14 def configuration_schema(cls): 15 return { 16 "type": "object", 17 "properties": {"dbpath": {"type": "string", "title": "Database Path"}}, 18 "required": ["dbpath"], 19 } 20 21 @classmethod 22 def type(cls): 23 return "sqlite" 24 25 def __init__(self, configuration): 26 super(Sqlite, self).__init__(configuration) 27 28 self._dbpath = self.configuration["dbpath"] 29 30 def _get_tables(self, schema): 31 query_table = "select tbl_name from sqlite_master where type='table'" 32 query_columns = "PRAGMA table_info(%s)" 33 34 results, error = self.run_query(query_table, None) 35 36 if error is not None: 37 raise Exception("Failed getting schema.") 38 39 results = json_loads(results) 40 41 for row in results["rows"]: 42 table_name = row["tbl_name"] 43 schema[table_name] = {"name": table_name, "columns": []} 44 results_table, error = self.run_query(query_columns % (table_name,), None) 45 if error is not None: 46 raise Exception("Failed getting schema.") 47 48 results_table = json_loads(results_table) 49 for row_column in results_table["rows"]: 50 schema[table_name]["columns"].append(row_column["name"]) 51 52 return list(schema.values()) 53 54 def run_query(self, query, user): 55 connection = sqlite3.connect(self._dbpath) 56 57 cursor = connection.cursor() 58 59 try: 60 cursor.execute(query) 61 62 if cursor.description is not None: 63 columns = self.fetch_columns([(i[0], None) for i in cursor.description]) 64 rows = [ 65 dict(zip((column["name"] for column in columns), row)) 66 for row in cursor 67 ] 68 69 data = {"columns": columns, "rows": rows} 70 error = None 71 json_data = json_dumps(data) 72 else: 73 error = "Query completed but it returned no data." 74 json_data = None 75 except (KeyboardInterrupt, JobTimeoutException): 76 connection.cancel() 77 raise 78 finally: 79 connection.close() 80 return json_data, error 81 82 83 register(Sqlite) 84 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/redash/query_runner/sqlite.py b/redash/query_runner/sqlite.py --- a/redash/query_runner/sqlite.py +++ b/redash/query_runner/sqlite.py @@ -29,7 +29,7 @@ def _get_tables(self, schema): query_table = "select tbl_name from sqlite_master where type='table'" - query_columns = "PRAGMA table_info(%s)" + query_columns = "PRAGMA table_info(\"%s\")" results, error = self.run_query(query_table, None)
{"golden_diff": "diff --git a/redash/query_runner/sqlite.py b/redash/query_runner/sqlite.py\n--- a/redash/query_runner/sqlite.py\n+++ b/redash/query_runner/sqlite.py\n@@ -29,7 +29,7 @@\n \n def _get_tables(self, schema):\n query_table = \"select tbl_name from sqlite_master where type='table'\"\n- query_columns = \"PRAGMA table_info(%s)\"\n+ query_columns = \"PRAGMA table_info(\\\"%s\\\")\"\n \n results, error = self.run_query(query_table, None)\n", "issue": "Loading schema for Sqlite DB with \"Order\" column name fails\n### Issue Summary\r\n\r\nI added a Sqlite Database which has an column with the name `Order`.\r\nWhen I try to create a query, the error `Schema refresh failed.` comes up.\r\n\r\n### Steps to Reproduce\r\n\r\n1. Add an Sqlite Database which has a column with the name `Order`\r\n2. Try to create a query\r\n3. Get the error `Schema refresh failed.`\r\n\r\n\r\n### Technical details:\r\n\r\n* Redash Version: cloned from master\r\n* Browser/OS: Brave Browser & Ubuntu 18.1\r\n* How did you install Redash: built from source\r\n\n", "before_files": [{"content": "import logging\nimport sqlite3\n\nfrom redash.query_runner import BaseSQLQueryRunner, register, JobTimeoutException\nfrom redash.utils import json_dumps, json_loads\n\nlogger = logging.getLogger(__name__)\n\n\nclass Sqlite(BaseSQLQueryRunner):\n noop_query = \"pragma quick_check\"\n\n @classmethod\n def configuration_schema(cls):\n return {\n \"type\": \"object\",\n \"properties\": {\"dbpath\": {\"type\": \"string\", \"title\": \"Database Path\"}},\n \"required\": [\"dbpath\"],\n }\n\n @classmethod\n def type(cls):\n return \"sqlite\"\n\n def __init__(self, configuration):\n super(Sqlite, self).__init__(configuration)\n\n self._dbpath = self.configuration[\"dbpath\"]\n\n def _get_tables(self, schema):\n query_table = \"select tbl_name from sqlite_master where type='table'\"\n query_columns = \"PRAGMA table_info(%s)\"\n\n results, error = self.run_query(query_table, None)\n\n if error is not None:\n raise Exception(\"Failed getting schema.\")\n\n results = json_loads(results)\n\n for row in results[\"rows\"]:\n table_name = row[\"tbl_name\"]\n schema[table_name] = {\"name\": table_name, \"columns\": []}\n results_table, error = self.run_query(query_columns % (table_name,), None)\n if error is not None:\n raise Exception(\"Failed getting schema.\")\n\n results_table = json_loads(results_table)\n for row_column in results_table[\"rows\"]:\n schema[table_name][\"columns\"].append(row_column[\"name\"])\n\n return list(schema.values())\n\n def run_query(self, query, user):\n connection = sqlite3.connect(self._dbpath)\n\n cursor = connection.cursor()\n\n try:\n cursor.execute(query)\n\n if cursor.description is not None:\n columns = self.fetch_columns([(i[0], None) for i in cursor.description])\n rows = [\n dict(zip((column[\"name\"] for column in columns), row))\n for row in cursor\n ]\n\n data = {\"columns\": columns, \"rows\": rows}\n error = None\n json_data = json_dumps(data)\n else:\n error = \"Query completed but it returned no data.\"\n json_data = None\n except (KeyboardInterrupt, JobTimeoutException):\n connection.cancel()\n raise\n finally:\n connection.close()\n return json_data, error\n\n\nregister(Sqlite)\n", "path": "redash/query_runner/sqlite.py"}], "after_files": [{"content": "import logging\nimport sqlite3\n\nfrom redash.query_runner import BaseSQLQueryRunner, register, JobTimeoutException\nfrom redash.utils import json_dumps, json_loads\n\nlogger = logging.getLogger(__name__)\n\n\nclass Sqlite(BaseSQLQueryRunner):\n noop_query = \"pragma quick_check\"\n\n @classmethod\n def configuration_schema(cls):\n return {\n \"type\": \"object\",\n \"properties\": {\"dbpath\": {\"type\": \"string\", \"title\": \"Database Path\"}},\n \"required\": [\"dbpath\"],\n }\n\n @classmethod\n def type(cls):\n return \"sqlite\"\n\n def __init__(self, configuration):\n super(Sqlite, self).__init__(configuration)\n\n self._dbpath = self.configuration[\"dbpath\"]\n\n def _get_tables(self, schema):\n query_table = \"select tbl_name from sqlite_master where type='table'\"\n query_columns = \"PRAGMA table_info(\\\"%s\\\")\"\n\n results, error = self.run_query(query_table, None)\n\n if error is not None:\n raise Exception(\"Failed getting schema.\")\n\n results = json_loads(results)\n\n for row in results[\"rows\"]:\n table_name = row[\"tbl_name\"]\n schema[table_name] = {\"name\": table_name, \"columns\": []}\n results_table, error = self.run_query(query_columns % (table_name,), None)\n if error is not None:\n raise Exception(\"Failed getting schema.\")\n\n results_table = json_loads(results_table)\n for row_column in results_table[\"rows\"]:\n schema[table_name][\"columns\"].append(row_column[\"name\"])\n\n return list(schema.values())\n\n def run_query(self, query, user):\n connection = sqlite3.connect(self._dbpath)\n\n cursor = connection.cursor()\n\n try:\n cursor.execute(query)\n\n if cursor.description is not None:\n columns = self.fetch_columns([(i[0], None) for i in cursor.description])\n rows = [\n dict(zip((column[\"name\"] for column in columns), row))\n for row in cursor\n ]\n\n data = {\"columns\": columns, \"rows\": rows}\n error = None\n json_data = json_dumps(data)\n else:\n error = \"Query completed but it returned no data.\"\n json_data = None\n except (KeyboardInterrupt, JobTimeoutException):\n connection.cancel()\n raise\n finally:\n connection.close()\n return json_data, error\n\n\nregister(Sqlite)\n", "path": "redash/query_runner/sqlite.py"}]}
1,087
120
gh_patches_debug_35067
rasdani/github-patches
git_diff
fedora-infra__bodhi-3276
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- RSS feed gives no information about packages build Since commit 60dc56c that RSS feed doesn't give information about the packages affected in a build. Example from [Fedora30 Stable - RSS](https://bodhi.fedoraproject.org/rss/updates/?releases=F30&status=stable) : ```xml <item> <title>FEDORA-2019-59d394e0fd</title> <link>https://bodhi.fedoraproject.org/updates/createrepo_c-0.14.1-1.fc30</link> <description>- Update to 0.14.1 - Add --pkgorigins mode for Koji - Correct pkg count in headers if there were invalid pkgs (RhBug:1596211) - Prevent exiting with 0 if errors occur while finalizing repodata. </description> <pubDate>Fri, 24 May 2019 12:20:49 +0000</pubDate> </item> ``` Also the link is invalid as in #3248 Looking to the [Fedora30 Stable - Web UI](https://bodhi.fedoraproject.org/updates/?releases=F30&status=stable) it seems that it should be: ``` <title>createrepo_c-0.14.1-1.fc30</title> <link>https://bodhi.fedoraproject.org/updates/FEDORA-2019-59d394e0fd</link> ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bodhi/server/renderers.py` Content: ``` 1 # Copyright © 2014-2019 Red Hat, Inc. 2 # 3 # This file is part of Bodhi. 4 # 5 # This program is free software; you can redistribute it and/or 6 # modify it under the terms of the GNU General Public License 7 # as published by the Free Software Foundation; either version 2 8 # of the License, or (at your option) any later version. 9 # 10 # This program is distributed in the hope that it will be useful, 11 # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 # GNU General Public License for more details. 14 # 15 # You should have received a copy of the GNU General Public License 16 # along with this program; if not, write to the Free Software 17 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 18 """Define special view renderers, such as RSS.""" 19 import logging 20 import operator 21 22 from pytz import utc 23 from feedgen.feed import FeedGenerator 24 from pyramid.exceptions import HTTPBadRequest 25 26 27 log = logging.getLogger(__name__) 28 29 30 def rss(info): 31 """ 32 Return a RSS renderer. 33 34 Args: 35 info (pyramid.renderers.RendererHelper): Unused. 36 Returns: 37 function: A function that can be used to render a RSS view. 38 """ 39 def render(data, system): 40 """ 41 Render the given data as an RSS view. 42 43 If the request's content type is set to the default, this function will change it to 44 application/rss+xml. 45 46 Args: 47 data (dict): A dictionary describing the information to be rendered. The information can 48 be different types of objects, such as updates, users, comments, or overrides. 49 system (pyramid.events.BeforeRender): Used to get the current request. 50 Returns: 51 str: An RSS document representing the given data. 52 """ 53 request = system.get('request') 54 if request is not None: 55 response = request.response 56 ct = response.content_type 57 if ct == response.default_content_type: 58 response.content_type = 'application/rss+xml' 59 60 if 'updates' in data: 61 key = 'updates' 62 feed_title = 'Released updates' 63 elif 'users' in data: 64 key = 'users' 65 feed_title = 'Bodhi users' 66 elif 'comments' in data: 67 key = 'comments' 68 feed_title = 'User comments' 69 elif 'overrides' in data: 70 key = 'overrides' 71 feed_title = 'Update overrides' 72 else: 73 # This is a request we don't know how to render. Let's return BadRequest and log. 74 log.debug('Unable to render RSS feed for data: %s', data) 75 # See if we have a request so we can set a code without raising an Exception 76 if request is not None: 77 response.status = HTTPBadRequest.code 78 return 'Invalid RSS feed request' 79 else: 80 raise HTTPBadRequest('Invalid RSS feed request') 81 82 feed_description_list = [] 83 for k in request.GET.keys(): 84 feed_description_list.append('%s(%s)' % (k, request.GET[k])) 85 if feed_description_list: 86 feed_description = 'Filtered on: ' + ', '.join(feed_description_list) 87 else: 88 feed_description = "All %s" % (key) 89 90 feed = FeedGenerator() 91 feed.title(feed_title) 92 feed.link(href=request.url, rel='self') 93 feed.description(feed_description) 94 feed.language('en') 95 96 def linker(route, param, key): 97 def link_dict(obj): 98 return dict(href=request.route_url(route, **{param: obj[key]})) 99 return link_dict 100 101 getters = { 102 'updates': { 103 'title': operator.itemgetter('alias'), 104 'link': linker('update', 'id', 'alias'), 105 'description': operator.itemgetter('notes'), 106 'pubDate': lambda obj: utc.localize(obj['date_submitted']), 107 }, 108 'users': { 109 'title': operator.itemgetter('name'), 110 'link': linker('user', 'name', 'name'), 111 'description': operator.itemgetter('name'), 112 }, 113 'comments': { 114 'title': operator.itemgetter('rss_title'), 115 'link': linker('comment', 'id', 'id'), 116 'description': operator.itemgetter('text'), 117 'pubDate': lambda obj: utc.localize(obj['timestamp']), 118 }, 119 'overrides': { 120 'title': operator.itemgetter('nvr'), 121 'link': linker('override', 'nvr', 'nvr'), 122 'description': operator.itemgetter('notes'), 123 'pubDate': lambda obj: utc.localize(obj['submission_date']), 124 }, 125 } 126 127 for value in data[key]: 128 feed_item = feed.add_item() 129 for name, getter in getters[key].items(): 130 # Because we have to use methods to fill feed entry attributes, 131 # it's done by getting methods by name and calling them 132 # on the same line. 133 getattr(feed_item, name)(getter(value)) 134 135 return feed.rss_str() 136 137 return render 138 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bodhi/server/renderers.py b/bodhi/server/renderers.py --- a/bodhi/server/renderers.py +++ b/bodhi/server/renderers.py @@ -23,6 +23,8 @@ from feedgen.feed import FeedGenerator from pyramid.exceptions import HTTPBadRequest +from bodhi.server.util import markup + log = logging.getLogger(__name__) @@ -98,11 +100,44 @@ return dict(href=request.route_url(route, **{param: obj[key]})) return link_dict + def describe_update(alias, notes, builds): + """ + Wrap calls to operator.itemgetter to retrieve notes and builds list. + + Methods are used to fill feed entry values, so we must use a wrapper + to get an HTML formatted description from the `notes` and the `builds` + properties of the update. + + For example: + getter = describe_update(operator.itemgetter('notes'),operator.itemgetter('builds')) + description_value = getter(update_data) + + Args: + alias (operator.itemgetter): A callable object which returns update alias + as string. + notes (operator.itemgetter): A callable object which returns update notes + as string. + builds (operator.itemgetter): A callable object which returns a list of builds + associated to the update. + Returns: + function: A function which accepts a dict representing an update as parameter. + """ + def describe(*args, **kwargs): + text = f'# {alias(*args, **kwargs)}\n' + text += f'## Packages in this update:\n' + for p in builds(*args, **kwargs): + text += f'* {p.nvr}\n' + text += f'## Update description:\n{notes(*args, **kwargs)}' + return markup(None, text) + return describe + getters = { 'updates': { - 'title': operator.itemgetter('alias'), + 'title': operator.itemgetter('title'), 'link': linker('update', 'id', 'alias'), - 'description': operator.itemgetter('notes'), + 'description': describe_update(operator.itemgetter('alias'), + operator.itemgetter('notes'), + operator.itemgetter('builds')), 'pubDate': lambda obj: utc.localize(obj['date_submitted']), }, 'users': {
{"golden_diff": "diff --git a/bodhi/server/renderers.py b/bodhi/server/renderers.py\n--- a/bodhi/server/renderers.py\n+++ b/bodhi/server/renderers.py\n@@ -23,6 +23,8 @@\n from feedgen.feed import FeedGenerator\n from pyramid.exceptions import HTTPBadRequest\n \n+from bodhi.server.util import markup\n+\n \n log = logging.getLogger(__name__)\n \n@@ -98,11 +100,44 @@\n return dict(href=request.route_url(route, **{param: obj[key]}))\n return link_dict\n \n+ def describe_update(alias, notes, builds):\n+ \"\"\"\n+ Wrap calls to operator.itemgetter to retrieve notes and builds list.\n+\n+ Methods are used to fill feed entry values, so we must use a wrapper\n+ to get an HTML formatted description from the `notes` and the `builds`\n+ properties of the update.\n+\n+ For example:\n+ getter = describe_update(operator.itemgetter('notes'),operator.itemgetter('builds'))\n+ description_value = getter(update_data)\n+\n+ Args:\n+ alias (operator.itemgetter): A callable object which returns update alias\n+ as string.\n+ notes (operator.itemgetter): A callable object which returns update notes\n+ as string.\n+ builds (operator.itemgetter): A callable object which returns a list of builds\n+ associated to the update.\n+ Returns:\n+ function: A function which accepts a dict representing an update as parameter.\n+ \"\"\"\n+ def describe(*args, **kwargs):\n+ text = f'# {alias(*args, **kwargs)}\\n'\n+ text += f'## Packages in this update:\\n'\n+ for p in builds(*args, **kwargs):\n+ text += f'* {p.nvr}\\n'\n+ text += f'## Update description:\\n{notes(*args, **kwargs)}'\n+ return markup(None, text)\n+ return describe\n+\n getters = {\n 'updates': {\n- 'title': operator.itemgetter('alias'),\n+ 'title': operator.itemgetter('title'),\n 'link': linker('update', 'id', 'alias'),\n- 'description': operator.itemgetter('notes'),\n+ 'description': describe_update(operator.itemgetter('alias'),\n+ operator.itemgetter('notes'),\n+ operator.itemgetter('builds')),\n 'pubDate': lambda obj: utc.localize(obj['date_submitted']),\n },\n 'users': {\n", "issue": "RSS feed gives no information about packages build\nSince commit 60dc56c that RSS feed doesn't give information about the packages affected in a build.\r\nExample from [Fedora30 Stable - RSS](https://bodhi.fedoraproject.org/rss/updates/?releases=F30&status=stable) :\r\n```xml\r\n<item>\r\n <title>FEDORA-2019-59d394e0fd</title>\r\n <link>https://bodhi.fedoraproject.org/updates/createrepo_c-0.14.1-1.fc30</link>\r\n <description>- Update to 0.14.1\r\n- Add --pkgorigins mode for Koji\r\n- Correct pkg count in headers if there were invalid pkgs (RhBug:1596211)\r\n- Prevent exiting with 0 if errors occur while finalizing repodata.\r\n </description>\r\n <pubDate>Fri, 24 May 2019 12:20:49 +0000</pubDate>\r\n</item>\r\n```\r\n\r\nAlso the link is invalid as in #3248\r\n\r\nLooking to the [Fedora30 Stable - Web UI](https://bodhi.fedoraproject.org/updates/?releases=F30&status=stable) it seems that it should be:\r\n```\r\n <title>createrepo_c-0.14.1-1.fc30</title>\r\n <link>https://bodhi.fedoraproject.org/updates/FEDORA-2019-59d394e0fd</link>\r\n```\n", "before_files": [{"content": "# Copyright \u00a9 2014-2019 Red Hat, Inc.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"Define special view renderers, such as RSS.\"\"\"\nimport logging\nimport operator\n\nfrom pytz import utc\nfrom feedgen.feed import FeedGenerator\nfrom pyramid.exceptions import HTTPBadRequest\n\n\nlog = logging.getLogger(__name__)\n\n\ndef rss(info):\n \"\"\"\n Return a RSS renderer.\n\n Args:\n info (pyramid.renderers.RendererHelper): Unused.\n Returns:\n function: A function that can be used to render a RSS view.\n \"\"\"\n def render(data, system):\n \"\"\"\n Render the given data as an RSS view.\n\n If the request's content type is set to the default, this function will change it to\n application/rss+xml.\n\n Args:\n data (dict): A dictionary describing the information to be rendered. The information can\n be different types of objects, such as updates, users, comments, or overrides.\n system (pyramid.events.BeforeRender): Used to get the current request.\n Returns:\n str: An RSS document representing the given data.\n \"\"\"\n request = system.get('request')\n if request is not None:\n response = request.response\n ct = response.content_type\n if ct == response.default_content_type:\n response.content_type = 'application/rss+xml'\n\n if 'updates' in data:\n key = 'updates'\n feed_title = 'Released updates'\n elif 'users' in data:\n key = 'users'\n feed_title = 'Bodhi users'\n elif 'comments' in data:\n key = 'comments'\n feed_title = 'User comments'\n elif 'overrides' in data:\n key = 'overrides'\n feed_title = 'Update overrides'\n else:\n # This is a request we don't know how to render. Let's return BadRequest and log.\n log.debug('Unable to render RSS feed for data: %s', data)\n # See if we have a request so we can set a code without raising an Exception\n if request is not None:\n response.status = HTTPBadRequest.code\n return 'Invalid RSS feed request'\n else:\n raise HTTPBadRequest('Invalid RSS feed request')\n\n feed_description_list = []\n for k in request.GET.keys():\n feed_description_list.append('%s(%s)' % (k, request.GET[k]))\n if feed_description_list:\n feed_description = 'Filtered on: ' + ', '.join(feed_description_list)\n else:\n feed_description = \"All %s\" % (key)\n\n feed = FeedGenerator()\n feed.title(feed_title)\n feed.link(href=request.url, rel='self')\n feed.description(feed_description)\n feed.language('en')\n\n def linker(route, param, key):\n def link_dict(obj):\n return dict(href=request.route_url(route, **{param: obj[key]}))\n return link_dict\n\n getters = {\n 'updates': {\n 'title': operator.itemgetter('alias'),\n 'link': linker('update', 'id', 'alias'),\n 'description': operator.itemgetter('notes'),\n 'pubDate': lambda obj: utc.localize(obj['date_submitted']),\n },\n 'users': {\n 'title': operator.itemgetter('name'),\n 'link': linker('user', 'name', 'name'),\n 'description': operator.itemgetter('name'),\n },\n 'comments': {\n 'title': operator.itemgetter('rss_title'),\n 'link': linker('comment', 'id', 'id'),\n 'description': operator.itemgetter('text'),\n 'pubDate': lambda obj: utc.localize(obj['timestamp']),\n },\n 'overrides': {\n 'title': operator.itemgetter('nvr'),\n 'link': linker('override', 'nvr', 'nvr'),\n 'description': operator.itemgetter('notes'),\n 'pubDate': lambda obj: utc.localize(obj['submission_date']),\n },\n }\n\n for value in data[key]:\n feed_item = feed.add_item()\n for name, getter in getters[key].items():\n # Because we have to use methods to fill feed entry attributes,\n # it's done by getting methods by name and calling them\n # on the same line.\n getattr(feed_item, name)(getter(value))\n\n return feed.rss_str()\n\n return render\n", "path": "bodhi/server/renderers.py"}], "after_files": [{"content": "# Copyright \u00a9 2014-2019 Red Hat, Inc.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"Define special view renderers, such as RSS.\"\"\"\nimport logging\nimport operator\n\nfrom pytz import utc\nfrom feedgen.feed import FeedGenerator\nfrom pyramid.exceptions import HTTPBadRequest\n\nfrom bodhi.server.util import markup\n\n\nlog = logging.getLogger(__name__)\n\n\ndef rss(info):\n \"\"\"\n Return a RSS renderer.\n\n Args:\n info (pyramid.renderers.RendererHelper): Unused.\n Returns:\n function: A function that can be used to render a RSS view.\n \"\"\"\n def render(data, system):\n \"\"\"\n Render the given data as an RSS view.\n\n If the request's content type is set to the default, this function will change it to\n application/rss+xml.\n\n Args:\n data (dict): A dictionary describing the information to be rendered. The information can\n be different types of objects, such as updates, users, comments, or overrides.\n system (pyramid.events.BeforeRender): Used to get the current request.\n Returns:\n str: An RSS document representing the given data.\n \"\"\"\n request = system.get('request')\n if request is not None:\n response = request.response\n ct = response.content_type\n if ct == response.default_content_type:\n response.content_type = 'application/rss+xml'\n\n if 'updates' in data:\n key = 'updates'\n feed_title = 'Released updates'\n elif 'users' in data:\n key = 'users'\n feed_title = 'Bodhi users'\n elif 'comments' in data:\n key = 'comments'\n feed_title = 'User comments'\n elif 'overrides' in data:\n key = 'overrides'\n feed_title = 'Update overrides'\n else:\n # This is a request we don't know how to render. Let's return BadRequest and log.\n log.debug('Unable to render RSS feed for data: %s', data)\n # See if we have a request so we can set a code without raising an Exception\n if request is not None:\n response.status = HTTPBadRequest.code\n return 'Invalid RSS feed request'\n else:\n raise HTTPBadRequest('Invalid RSS feed request')\n\n feed_description_list = []\n for k in request.GET.keys():\n feed_description_list.append('%s(%s)' % (k, request.GET[k]))\n if feed_description_list:\n feed_description = 'Filtered on: ' + ', '.join(feed_description_list)\n else:\n feed_description = \"All %s\" % (key)\n\n feed = FeedGenerator()\n feed.title(feed_title)\n feed.link(href=request.url, rel='self')\n feed.description(feed_description)\n feed.language('en')\n\n def linker(route, param, key):\n def link_dict(obj):\n return dict(href=request.route_url(route, **{param: obj[key]}))\n return link_dict\n\n def describe_update(alias, notes, builds):\n \"\"\"\n Wrap calls to operator.itemgetter to retrieve notes and builds list.\n\n Methods are used to fill feed entry values, so we must use a wrapper\n to get an HTML formatted description from the `notes` and the `builds`\n properties of the update.\n\n For example:\n getter = describe_update(operator.itemgetter('notes'),operator.itemgetter('builds'))\n description_value = getter(update_data)\n\n Args:\n alias (operator.itemgetter): A callable object which returns update alias\n as string.\n notes (operator.itemgetter): A callable object which returns update notes\n as string.\n builds (operator.itemgetter): A callable object which returns a list of builds\n associated to the update.\n Returns:\n function: A function which accepts a dict representing an update as parameter.\n \"\"\"\n def describe(*args, **kwargs):\n text = f'# {alias(*args, **kwargs)}\\n'\n text += f'## Packages in this update:\\n'\n for p in builds(*args, **kwargs):\n text += f'* {p.nvr}\\n'\n text += f'## Update description:\\n{notes(*args, **kwargs)}'\n return markup(None, text)\n return describe\n\n getters = {\n 'updates': {\n 'title': operator.itemgetter('title'),\n 'link': linker('update', 'id', 'alias'),\n 'description': describe_update(operator.itemgetter('alias'),\n operator.itemgetter('notes'),\n operator.itemgetter('builds')),\n 'pubDate': lambda obj: utc.localize(obj['date_submitted']),\n },\n 'users': {\n 'title': operator.itemgetter('name'),\n 'link': linker('user', 'name', 'name'),\n 'description': operator.itemgetter('name'),\n },\n 'comments': {\n 'title': operator.itemgetter('rss_title'),\n 'link': linker('comment', 'id', 'id'),\n 'description': operator.itemgetter('text'),\n 'pubDate': lambda obj: utc.localize(obj['timestamp']),\n },\n 'overrides': {\n 'title': operator.itemgetter('nvr'),\n 'link': linker('override', 'nvr', 'nvr'),\n 'description': operator.itemgetter('notes'),\n 'pubDate': lambda obj: utc.localize(obj['submission_date']),\n },\n }\n\n for value in data[key]:\n feed_item = feed.add_item()\n for name, getter in getters[key].items():\n # Because we have to use methods to fill feed entry attributes,\n # it's done by getting methods by name and calling them\n # on the same line.\n getattr(feed_item, name)(getter(value))\n\n return feed.rss_str()\n\n return render\n", "path": "bodhi/server/renderers.py"}]}
2,030
536
gh_patches_debug_36153
rasdani/github-patches
git_diff
scikit-hep__pyhf-1556
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Remove Python 2 syntax from events examples in comments # Description In `pyhf.events` there are a two examples of Python 2 syntax being used for https://github.com/scikit-hep/pyhf/blob/29c3df0e23a428004a065aed61cefb6a526a7332/src/pyhf/events.py#L46-L53 and https://github.com/scikit-hep/pyhf/blob/29c3df0e23a428004a065aed61cefb6a526a7332/src/pyhf/events.py#L69-L87 These examples should be updated to use Python 3 syntax. Also the examples are wrong themselves. For example, the first example should be ```python >>> import pyhf >>> @pyhf.events.subscribe('myevent') ... def test(a,b): ... print(a+b) ... >>> pyhf.events.trigger("myevent")(1,2) 3 ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/pyhf/events.py` Content: ``` 1 import weakref 2 from functools import wraps 3 4 __events = {} 5 __disabled_events = set() 6 7 __all__ = [ 8 "Callables", 9 "disable", 10 "enable", 11 "noop", 12 "register", 13 "subscribe", 14 "trigger", 15 ] 16 17 18 def __dir__(): 19 return __all__ 20 21 22 def noop(*args, **kwargs): 23 pass 24 25 26 class Callables: 27 def __init__(self): 28 self._callbacks = [] 29 30 @property 31 def callbacks(self): 32 """ 33 Get the current list of living callbacks. 34 """ 35 self._flush() 36 return self._callbacks 37 38 def append(self, callback): 39 """ 40 Append a new bound method as a callback to the list of callables. 41 """ 42 try: 43 # methods 44 callback_ref = weakref.ref(callback.__func__), weakref.ref( 45 callback.__self__ 46 ) 47 except AttributeError: 48 callback_ref = weakref.ref(callback), None 49 self._callbacks.append(callback_ref) 50 51 def _flush(self): 52 """ 53 Flush the list of callbacks with those who are weakly-referencing deleted objects. 54 55 Note: must interact with the self._callbacks directly, and not 56 self.callbacks, to avoid infinite recursion. 57 """ 58 _callbacks = [] 59 for func, arg in self._callbacks: 60 if arg is not None: 61 arg_ref = arg() 62 if arg_ref is None: 63 continue 64 _callbacks.append((func, arg)) 65 self._callbacks = _callbacks 66 67 def __call__(self, *args, **kwargs): 68 for func, arg in self.callbacks: 69 # weakref: needs to be de-ref'd first before calling 70 if arg is not None: 71 func()(arg(), *args, **kwargs) 72 else: 73 func()(*args, **kwargs) 74 75 def __iter__(self): 76 return iter(self.callbacks) 77 78 def __getitem__(self, index): 79 return self.callbacks[index] 80 81 def __len__(self): 82 return len(self.callbacks) 83 84 def __repr__(self): 85 return f"Callables({self.callbacks})" 86 87 88 def subscribe(event): 89 """ 90 Subscribe a function or object method as a callback to an event. 91 92 Note: this is meant to be used as a decorator. 93 94 Args: 95 event (:obj:`str`): The name of the event to subscribe to. 96 97 Returns: 98 :obj:`function`: Decorated function. 99 """ 100 # Example: 101 # 102 # >>> @pyhf.events.subscribe('myevent') 103 # ... def test(a,b): 104 # ... print a+b 105 # ... 106 # >>> pyhf.events.trigger_myevent(1,2) 107 # 3 108 global __events 109 110 def __decorator(func): 111 __events.setdefault(event, Callables()).append(func) 112 return func 113 114 return __decorator 115 116 117 def register(event): 118 """ 119 Register a function or object method to trigger an event. This creates two 120 events: ``{event_name}::before`` and ``{event_name}::after``. 121 122 Note: this is meant to be used as a decorator. 123 124 Args: 125 event (:obj:`str`): The name of the event to subscribe to. 126 127 Returns: 128 :obj:`function`: Decorated function. 129 130 """ 131 # Examples: 132 # 133 # >>> @pyhf.events.register('test_func') 134 # ... def test(a,b): 135 # ... print a+b 136 # ... 137 # >>> @pyhf.events.subscribe('test_func::before') 138 # ... def precall(): 139 # ... print 'before call' 140 # ... 141 # >>> @pyhf.events.subscribe('test_func::after') 142 # ... def postcall(): 143 # ... print 'after call' 144 # ... 145 # >>> test(1,2) 146 # "before call" 147 # 3 148 # "after call" 149 # >>> 150 151 def _register(func): 152 @wraps(func) 153 def register_wrapper(*args, **kwargs): 154 trigger(f"{event:s}::before")() 155 result = func(*args, **kwargs) 156 trigger(f"{event:s}::after")() 157 return result 158 159 return register_wrapper 160 161 return _register 162 163 164 def trigger(event): 165 """ 166 Trigger an event if not disabled. 167 """ 168 global __events, __disabled_events, noop 169 is_noop = bool(event in __disabled_events or event not in __events) 170 return noop if is_noop else __events.get(event) 171 172 173 def disable(event): 174 """ 175 Disable an event from firing. 176 """ 177 global __disabled_events 178 __disabled_events.add(event) 179 180 181 def enable(event): 182 """ 183 Enable an event to be fired if disabled. 184 """ 185 global __disabled_events 186 __disabled_events.remove(event) 187 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/pyhf/events.py b/src/pyhf/events.py --- a/src/pyhf/events.py +++ b/src/pyhf/events.py @@ -89,22 +89,27 @@ """ Subscribe a function or object method as a callback to an event. - Note: this is meant to be used as a decorator. + .. note:: + + This is meant to be used as a decorator. Args: event (:obj:`str`): The name of the event to subscribe to. Returns: :obj:`function`: Decorated function. + + Example: + >>> import pyhf + >>> @pyhf.events.subscribe("myevent") + ... def test(a, b): + ... print(a + b) + ... + >>> pyhf.events.trigger("myevent")(1, 2) + 3 + """ - # Example: - # - # >>> @pyhf.events.subscribe('myevent') - # ... def test(a,b): - # ... print a+b - # ... - # >>> pyhf.events.trigger_myevent(1,2) - # 3 + global __events def __decorator(func): @@ -119,7 +124,9 @@ Register a function or object method to trigger an event. This creates two events: ``{event_name}::before`` and ``{event_name}::after``. - Note: this is meant to be used as a decorator. + .. note:: + + This is meant to be used as a decorator. Args: event (:obj:`str`): The name of the event to subscribe to. @@ -127,26 +134,26 @@ Returns: :obj:`function`: Decorated function. + Example: + >>> import pyhf + >>> @pyhf.events.register("test_func") + ... def test(a, b): + ... print(a + b) + ... + >>> @pyhf.events.subscribe("test_func::before") + ... def precall(): + ... print("before call") + ... + >>> @pyhf.events.subscribe("test_func::after") + ... def postcall(): + ... print("after call") + ... + >>> test(1, 2) + before call + 3 + after call + """ - # Examples: - # - # >>> @pyhf.events.register('test_func') - # ... def test(a,b): - # ... print a+b - # ... - # >>> @pyhf.events.subscribe('test_func::before') - # ... def precall(): - # ... print 'before call' - # ... - # >>> @pyhf.events.subscribe('test_func::after') - # ... def postcall(): - # ... print 'after call' - # ... - # >>> test(1,2) - # "before call" - # 3 - # "after call" - # >>> def _register(func): @wraps(func)
{"golden_diff": "diff --git a/src/pyhf/events.py b/src/pyhf/events.py\n--- a/src/pyhf/events.py\n+++ b/src/pyhf/events.py\n@@ -89,22 +89,27 @@\n \"\"\"\n Subscribe a function or object method as a callback to an event.\n \n- Note: this is meant to be used as a decorator.\n+ .. note::\n+\n+ This is meant to be used as a decorator.\n \n Args:\n event (:obj:`str`): The name of the event to subscribe to.\n \n Returns:\n :obj:`function`: Decorated function.\n+\n+ Example:\n+ >>> import pyhf\n+ >>> @pyhf.events.subscribe(\"myevent\")\n+ ... def test(a, b):\n+ ... print(a + b)\n+ ...\n+ >>> pyhf.events.trigger(\"myevent\")(1, 2)\n+ 3\n+\n \"\"\"\n- # Example:\n- #\n- # >>> @pyhf.events.subscribe('myevent')\n- # ... def test(a,b):\n- # ... print a+b\n- # ...\n- # >>> pyhf.events.trigger_myevent(1,2)\n- # 3\n+\n global __events\n \n def __decorator(func):\n@@ -119,7 +124,9 @@\n Register a function or object method to trigger an event. This creates two\n events: ``{event_name}::before`` and ``{event_name}::after``.\n \n- Note: this is meant to be used as a decorator.\n+ .. note::\n+\n+ This is meant to be used as a decorator.\n \n Args:\n event (:obj:`str`): The name of the event to subscribe to.\n@@ -127,26 +134,26 @@\n Returns:\n :obj:`function`: Decorated function.\n \n+ Example:\n+ >>> import pyhf\n+ >>> @pyhf.events.register(\"test_func\")\n+ ... def test(a, b):\n+ ... print(a + b)\n+ ...\n+ >>> @pyhf.events.subscribe(\"test_func::before\")\n+ ... def precall():\n+ ... print(\"before call\")\n+ ...\n+ >>> @pyhf.events.subscribe(\"test_func::after\")\n+ ... def postcall():\n+ ... print(\"after call\")\n+ ...\n+ >>> test(1, 2)\n+ before call\n+ 3\n+ after call\n+\n \"\"\"\n- # Examples:\n- #\n- # >>> @pyhf.events.register('test_func')\n- # ... def test(a,b):\n- # ... print a+b\n- # ...\n- # >>> @pyhf.events.subscribe('test_func::before')\n- # ... def precall():\n- # ... print 'before call'\n- # ...\n- # >>> @pyhf.events.subscribe('test_func::after')\n- # ... def postcall():\n- # ... print 'after call'\n- # ...\n- # >>> test(1,2)\n- # \"before call\"\n- # 3\n- # \"after call\"\n- # >>>\n \n def _register(func):\n @wraps(func)\n", "issue": "Remove Python 2 syntax from events examples in comments\n# Description\r\n\r\nIn `pyhf.events` there are a two examples of Python 2 syntax being used for \r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/29c3df0e23a428004a065aed61cefb6a526a7332/src/pyhf/events.py#L46-L53\r\n\r\nand \r\n\r\nhttps://github.com/scikit-hep/pyhf/blob/29c3df0e23a428004a065aed61cefb6a526a7332/src/pyhf/events.py#L69-L87\r\n\r\nThese examples should be updated to use Python 3 syntax.\r\n\r\nAlso the examples are wrong themselves. For example, the first example should be\r\n\r\n```python\r\n>>> import pyhf\r\n>>> @pyhf.events.subscribe('myevent')\r\n... def test(a,b):\r\n... print(a+b)\r\n... \r\n>>> pyhf.events.trigger(\"myevent\")(1,2)\r\n3\r\n```\n", "before_files": [{"content": "import weakref\nfrom functools import wraps\n\n__events = {}\n__disabled_events = set()\n\n__all__ = [\n \"Callables\",\n \"disable\",\n \"enable\",\n \"noop\",\n \"register\",\n \"subscribe\",\n \"trigger\",\n]\n\n\ndef __dir__():\n return __all__\n\n\ndef noop(*args, **kwargs):\n pass\n\n\nclass Callables:\n def __init__(self):\n self._callbacks = []\n\n @property\n def callbacks(self):\n \"\"\"\n Get the current list of living callbacks.\n \"\"\"\n self._flush()\n return self._callbacks\n\n def append(self, callback):\n \"\"\"\n Append a new bound method as a callback to the list of callables.\n \"\"\"\n try:\n # methods\n callback_ref = weakref.ref(callback.__func__), weakref.ref(\n callback.__self__\n )\n except AttributeError:\n callback_ref = weakref.ref(callback), None\n self._callbacks.append(callback_ref)\n\n def _flush(self):\n \"\"\"\n Flush the list of callbacks with those who are weakly-referencing deleted objects.\n\n Note: must interact with the self._callbacks directly, and not\n self.callbacks, to avoid infinite recursion.\n \"\"\"\n _callbacks = []\n for func, arg in self._callbacks:\n if arg is not None:\n arg_ref = arg()\n if arg_ref is None:\n continue\n _callbacks.append((func, arg))\n self._callbacks = _callbacks\n\n def __call__(self, *args, **kwargs):\n for func, arg in self.callbacks:\n # weakref: needs to be de-ref'd first before calling\n if arg is not None:\n func()(arg(), *args, **kwargs)\n else:\n func()(*args, **kwargs)\n\n def __iter__(self):\n return iter(self.callbacks)\n\n def __getitem__(self, index):\n return self.callbacks[index]\n\n def __len__(self):\n return len(self.callbacks)\n\n def __repr__(self):\n return f\"Callables({self.callbacks})\"\n\n\ndef subscribe(event):\n \"\"\"\n Subscribe a function or object method as a callback to an event.\n\n Note: this is meant to be used as a decorator.\n\n Args:\n event (:obj:`str`): The name of the event to subscribe to.\n\n Returns:\n :obj:`function`: Decorated function.\n \"\"\"\n # Example:\n #\n # >>> @pyhf.events.subscribe('myevent')\n # ... def test(a,b):\n # ... print a+b\n # ...\n # >>> pyhf.events.trigger_myevent(1,2)\n # 3\n global __events\n\n def __decorator(func):\n __events.setdefault(event, Callables()).append(func)\n return func\n\n return __decorator\n\n\ndef register(event):\n \"\"\"\n Register a function or object method to trigger an event. This creates two\n events: ``{event_name}::before`` and ``{event_name}::after``.\n\n Note: this is meant to be used as a decorator.\n\n Args:\n event (:obj:`str`): The name of the event to subscribe to.\n\n Returns:\n :obj:`function`: Decorated function.\n\n \"\"\"\n # Examples:\n #\n # >>> @pyhf.events.register('test_func')\n # ... def test(a,b):\n # ... print a+b\n # ...\n # >>> @pyhf.events.subscribe('test_func::before')\n # ... def precall():\n # ... print 'before call'\n # ...\n # >>> @pyhf.events.subscribe('test_func::after')\n # ... def postcall():\n # ... print 'after call'\n # ...\n # >>> test(1,2)\n # \"before call\"\n # 3\n # \"after call\"\n # >>>\n\n def _register(func):\n @wraps(func)\n def register_wrapper(*args, **kwargs):\n trigger(f\"{event:s}::before\")()\n result = func(*args, **kwargs)\n trigger(f\"{event:s}::after\")()\n return result\n\n return register_wrapper\n\n return _register\n\n\ndef trigger(event):\n \"\"\"\n Trigger an event if not disabled.\n \"\"\"\n global __events, __disabled_events, noop\n is_noop = bool(event in __disabled_events or event not in __events)\n return noop if is_noop else __events.get(event)\n\n\ndef disable(event):\n \"\"\"\n Disable an event from firing.\n \"\"\"\n global __disabled_events\n __disabled_events.add(event)\n\n\ndef enable(event):\n \"\"\"\n Enable an event to be fired if disabled.\n \"\"\"\n global __disabled_events\n __disabled_events.remove(event)\n", "path": "src/pyhf/events.py"}], "after_files": [{"content": "import weakref\nfrom functools import wraps\n\n__events = {}\n__disabled_events = set()\n\n__all__ = [\n \"Callables\",\n \"disable\",\n \"enable\",\n \"noop\",\n \"register\",\n \"subscribe\",\n \"trigger\",\n]\n\n\ndef __dir__():\n return __all__\n\n\ndef noop(*args, **kwargs):\n pass\n\n\nclass Callables:\n def __init__(self):\n self._callbacks = []\n\n @property\n def callbacks(self):\n \"\"\"\n Get the current list of living callbacks.\n \"\"\"\n self._flush()\n return self._callbacks\n\n def append(self, callback):\n \"\"\"\n Append a new bound method as a callback to the list of callables.\n \"\"\"\n try:\n # methods\n callback_ref = weakref.ref(callback.__func__), weakref.ref(\n callback.__self__\n )\n except AttributeError:\n callback_ref = weakref.ref(callback), None\n self._callbacks.append(callback_ref)\n\n def _flush(self):\n \"\"\"\n Flush the list of callbacks with those who are weakly-referencing deleted objects.\n\n Note: must interact with the self._callbacks directly, and not\n self.callbacks, to avoid infinite recursion.\n \"\"\"\n _callbacks = []\n for func, arg in self._callbacks:\n if arg is not None:\n arg_ref = arg()\n if arg_ref is None:\n continue\n _callbacks.append((func, arg))\n self._callbacks = _callbacks\n\n def __call__(self, *args, **kwargs):\n for func, arg in self.callbacks:\n # weakref: needs to be de-ref'd first before calling\n if arg is not None:\n func()(arg(), *args, **kwargs)\n else:\n func()(*args, **kwargs)\n\n def __iter__(self):\n return iter(self.callbacks)\n\n def __getitem__(self, index):\n return self.callbacks[index]\n\n def __len__(self):\n return len(self.callbacks)\n\n def __repr__(self):\n return f\"Callables({self.callbacks})\"\n\n\ndef subscribe(event):\n \"\"\"\n Subscribe a function or object method as a callback to an event.\n\n .. note::\n\n This is meant to be used as a decorator.\n\n Args:\n event (:obj:`str`): The name of the event to subscribe to.\n\n Returns:\n :obj:`function`: Decorated function.\n\n Example:\n >>> import pyhf\n >>> @pyhf.events.subscribe(\"myevent\")\n ... def test(a, b):\n ... print(a + b)\n ...\n >>> pyhf.events.trigger(\"myevent\")(1, 2)\n 3\n\n \"\"\"\n\n global __events\n\n def __decorator(func):\n __events.setdefault(event, Callables()).append(func)\n return func\n\n return __decorator\n\n\ndef register(event):\n \"\"\"\n Register a function or object method to trigger an event. This creates two\n events: ``{event_name}::before`` and ``{event_name}::after``.\n\n .. note::\n\n This is meant to be used as a decorator.\n\n Args:\n event (:obj:`str`): The name of the event to subscribe to.\n\n Returns:\n :obj:`function`: Decorated function.\n\n Example:\n >>> import pyhf\n >>> @pyhf.events.register(\"test_func\")\n ... def test(a, b):\n ... print(a + b)\n ...\n >>> @pyhf.events.subscribe(\"test_func::before\")\n ... def precall():\n ... print(\"before call\")\n ...\n >>> @pyhf.events.subscribe(\"test_func::after\")\n ... def postcall():\n ... print(\"after call\")\n ...\n >>> test(1, 2)\n before call\n 3\n after call\n\n \"\"\"\n\n def _register(func):\n @wraps(func)\n def register_wrapper(*args, **kwargs):\n trigger(f\"{event:s}::before\")()\n result = func(*args, **kwargs)\n trigger(f\"{event:s}::after\")()\n return result\n\n return register_wrapper\n\n return _register\n\n\ndef trigger(event):\n \"\"\"\n Trigger an event if not disabled.\n \"\"\"\n global __events, __disabled_events, noop\n is_noop = bool(event in __disabled_events or event not in __events)\n return noop if is_noop else __events.get(event)\n\n\ndef disable(event):\n \"\"\"\n Disable an event from firing.\n \"\"\"\n global __disabled_events\n __disabled_events.add(event)\n\n\ndef enable(event):\n \"\"\"\n Enable an event to be fired if disabled.\n \"\"\"\n global __disabled_events\n __disabled_events.remove(event)\n", "path": "src/pyhf/events.py"}]}
1,988
715
gh_patches_debug_36875
rasdani/github-patches
git_diff
huggingface__dataset-viewer-2144
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Give insights to the user when we raise TooBigRows in /rows, /search, or /filter https://huggingface.co/datasets/danielz01/xView2/discussions/2#655dc3ccde919d41622b51bd Currently, we only show ``` Error code: UnexpectedApiError ``` We should say that the rows contents are too heavy and that we don't show them for that reason (> 400MB in that case) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `services/rows/src/rows/routes/rows.py` Content: ``` 1 # SPDX-License-Identifier: Apache-2.0 2 # Copyright 2022 The HuggingFace Authors. 3 4 import logging 5 from typing import Literal, Optional, Union 6 7 from fsspec.implementations.http import HTTPFileSystem 8 from libapi.authentication import auth_check 9 from libapi.exceptions import ApiError, UnexpectedApiError 10 from libapi.request import ( 11 get_request_parameter, 12 get_request_parameter_length, 13 get_request_parameter_offset, 14 ) 15 from libapi.response import create_response 16 from libapi.utils import ( 17 Endpoint, 18 get_json_api_error_response, 19 get_json_error_response, 20 get_json_ok_response, 21 try_backfill_dataset_then_raise, 22 ) 23 from libcommon.parquet_utils import Indexer 24 from libcommon.processing_graph import ProcessingGraph 25 from libcommon.prometheus import StepProfiler 26 from libcommon.simple_cache import CachedArtifactError, CachedArtifactNotFoundError 27 from libcommon.storage import StrPath 28 from libcommon.storage_client import StorageClient 29 from libcommon.viewer_utils.features import UNSUPPORTED_FEATURES 30 from starlette.requests import Request 31 from starlette.responses import Response 32 33 logger = logging.getLogger(__name__) 34 35 36 ALL_COLUMNS_SUPPORTED_DATASETS_ALLOW_LIST: Union[Literal["all"], list[str]] = ["arabic_speech_corpus"] # for testing 37 38 39 def create_rows_endpoint( 40 processing_graph: ProcessingGraph, 41 cached_assets_base_url: str, 42 storage_client: StorageClient, 43 parquet_metadata_directory: StrPath, 44 cache_max_days: int, 45 max_arrow_data_in_memory: int, 46 hf_endpoint: str, 47 blocked_datasets: list[str], 48 hf_token: Optional[str] = None, 49 hf_jwt_public_keys: Optional[list[str]] = None, 50 hf_jwt_algorithm: Optional[str] = None, 51 external_auth_url: Optional[str] = None, 52 hf_timeout_seconds: Optional[float] = None, 53 max_age_long: int = 0, 54 max_age_short: int = 0, 55 ) -> Endpoint: 56 indexer = Indexer( 57 processing_graph=processing_graph, 58 hf_token=hf_token, 59 parquet_metadata_directory=parquet_metadata_directory, 60 httpfs=HTTPFileSystem(headers={"authorization": f"Bearer {hf_token}"}), 61 max_arrow_data_in_memory=max_arrow_data_in_memory, 62 unsupported_features=UNSUPPORTED_FEATURES, 63 all_columns_supported_datasets_allow_list=ALL_COLUMNS_SUPPORTED_DATASETS_ALLOW_LIST, 64 ) 65 66 async def rows_endpoint(request: Request) -> Response: 67 await indexer.httpfs.set_session() 68 revision: Optional[str] = None 69 with StepProfiler(method="rows_endpoint", step="all"): 70 try: 71 with StepProfiler(method="rows_endpoint", step="validate parameters"): 72 dataset = get_request_parameter(request, "dataset", required=True) 73 config = get_request_parameter(request, "config", required=True) 74 split = get_request_parameter(request, "split", required=True) 75 offset = get_request_parameter_offset(request) 76 length = get_request_parameter_length(request) 77 logging.info( 78 f"/rows, dataset={dataset}, config={config}, split={split}, offset={offset}, length={length}" 79 ) 80 with StepProfiler(method="rows_endpoint", step="check authentication"): 81 # if auth_check fails, it will raise an exception that will be caught below 82 await auth_check( 83 dataset=dataset, 84 external_auth_url=external_auth_url, 85 request=request, 86 hf_jwt_public_keys=hf_jwt_public_keys, 87 hf_jwt_algorithm=hf_jwt_algorithm, 88 hf_timeout_seconds=hf_timeout_seconds, 89 ) 90 try: 91 with StepProfiler(method="rows_endpoint", step="get row groups index"): 92 rows_index = indexer.get_rows_index( 93 dataset=dataset, 94 config=config, 95 split=split, 96 ) 97 revision = rows_index.revision 98 with StepProfiler(method="rows_endpoint", step="query the rows"): 99 pa_table = rows_index.query(offset=offset, length=length) 100 with StepProfiler(method="rows_endpoint", step="transform to a list"): 101 response = await create_response( 102 dataset=dataset, 103 revision=revision, 104 config=config, 105 split=split, 106 cached_assets_base_url=cached_assets_base_url, 107 storage_client=storage_client, 108 pa_table=pa_table, 109 offset=offset, 110 features=rows_index.parquet_index.features, 111 unsupported_columns=rows_index.parquet_index.unsupported_columns, 112 partial=rows_index.parquet_index.partial, 113 num_rows_total=rows_index.parquet_index.num_rows_total, 114 ) 115 except CachedArtifactNotFoundError: 116 config_parquet_processing_steps = processing_graph.get_config_parquet_processing_steps() 117 config_parquet_metadata_processing_steps = ( 118 processing_graph.get_config_parquet_metadata_processing_steps() 119 ) 120 with StepProfiler(method="rows_endpoint", step="try backfill dataset"): 121 try_backfill_dataset_then_raise( 122 processing_steps=config_parquet_metadata_processing_steps 123 + config_parquet_processing_steps, 124 processing_graph=processing_graph, 125 dataset=dataset, 126 hf_endpoint=hf_endpoint, 127 hf_timeout_seconds=hf_timeout_seconds, 128 hf_token=hf_token, 129 cache_max_days=cache_max_days, 130 blocked_datasets=blocked_datasets, 131 ) 132 with StepProfiler(method="rows_endpoint", step="generate the OK response"): 133 return get_json_ok_response(content=response, max_age=max_age_long, revision=revision) 134 except CachedArtifactError as e: 135 content = e.cache_entry_with_details["content"] 136 http_status = e.cache_entry_with_details["http_status"] 137 error_code = e.cache_entry_with_details["error_code"] 138 return get_json_error_response( 139 content=content, 140 status_code=http_status, 141 max_age=max_age_short, 142 error_code=error_code, 143 revision=revision, 144 ) 145 except Exception as e: 146 error = e if isinstance(e, ApiError) else UnexpectedApiError("Unexpected error.", e) 147 with StepProfiler(method="rows_endpoint", step="generate API error response"): 148 return get_json_api_error_response(error=error, max_age=max_age_short, revision=revision) 149 150 return rows_endpoint 151 ``` Path: `libs/libapi/src/libapi/exceptions.py` Content: ``` 1 # SPDX-License-Identifier: Apache-2.0 2 # Copyright 2022 The HuggingFace Authors. 3 4 import logging 5 from http import HTTPStatus 6 from typing import Literal, Optional 7 8 from libcommon.exceptions import CustomError 9 10 ApiErrorCode = Literal[ 11 "AuthCheckHubRequestError", 12 "ExternalAuthenticatedError", 13 "ExternalUnauthenticatedError", 14 "InvalidParameter", 15 "JWTExpiredSignature", 16 "JWTInvalidClaimRead", 17 "JWTInvalidClaimSub", 18 "JWTInvalidKeyOrAlgorithm", 19 "JWTInvalidSignature", 20 "JWTKeysError", 21 "JWTMissingRequiredClaim", 22 "MissingProcessingStepsError", 23 "MissingRequiredParameter", 24 "ResponseNotFound", 25 "ResponseNotReady", 26 "SearchFeatureNotAvailableError", 27 "TransformRowsProcessingError", 28 "UnexpectedApiError", 29 ] 30 31 32 class ApiError(CustomError): 33 """Base class for exceptions raised by an API service.""" 34 35 def __init__( 36 self, 37 message: str, 38 status_code: HTTPStatus, 39 code: ApiErrorCode, 40 cause: Optional[BaseException] = None, 41 disclose_cause: bool = False, 42 ): 43 super().__init__( 44 message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause 45 ) 46 47 48 class AuthCheckHubRequestError(ApiError): 49 """The external authentication check failed or timed out.""" 50 51 def __init__(self, message: str, cause: Optional[BaseException] = None): 52 super().__init__( 53 message, HTTPStatus.INTERNAL_SERVER_ERROR, "AuthCheckHubRequestError", cause=cause, disclose_cause=False 54 ) 55 56 57 class ExternalAuthenticatedError(ApiError): 58 """The external authentication check failed while the user was authenticated. 59 60 Even if the external authentication server returns 403 in that case, we return 404 because 61 we don't know if the dataset exist or not. It's also coherent with how the Hugging Face Hub works. 62 63 TODO: should we return DatasetNotFoundError instead? maybe the error code is leaking existence of private datasets. 64 """ 65 66 def __init__(self, message: str): 67 super().__init__(message, HTTPStatus.NOT_FOUND, "ExternalAuthenticatedError") 68 69 70 class ExternalUnauthenticatedError(ApiError): 71 """The external authentication check failed while the user was unauthenticated.""" 72 73 def __init__(self, message: str): 74 super().__init__(message, HTTPStatus.UNAUTHORIZED, "ExternalUnauthenticatedError") 75 76 77 class InvalidParameterError(ApiError): 78 """A parameter has an invalid value.""" 79 80 def __init__(self, message: str): 81 super().__init__(message, HTTPStatus.UNPROCESSABLE_ENTITY, "InvalidParameter") 82 83 84 class JWTKeysError(ApiError): 85 """The public keys for decoding JWT could not be created.""" 86 87 def __init__(self, message: str, cause: Optional[BaseException] = None): 88 super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "JWTKeysError", cause=cause, disclose_cause=False) 89 90 91 class MissingRequiredParameterError(ApiError): 92 """A required parameter is missing.""" 93 94 def __init__(self, message: str): 95 super().__init__(message, HTTPStatus.UNPROCESSABLE_ENTITY, "MissingRequiredParameter") 96 97 98 class ResponseNotFoundError(ApiError): 99 """The response has not been found.""" 100 101 def __init__(self, message: str): 102 super().__init__(message, HTTPStatus.NOT_FOUND, "ResponseNotFound") 103 104 105 class ResponseNotReadyError(ApiError): 106 """The response has not been processed yet.""" 107 108 def __init__(self, message: str): 109 super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "ResponseNotReady") 110 111 112 class SearchFeatureNotAvailableError(ApiError): 113 """The split does not have search feature enabled.""" 114 115 def __init__(self, message: str, cause: Optional[BaseException] = None): 116 super().__init__(message, HTTPStatus.BAD_REQUEST, "SearchFeatureNotAvailableError", cause, True) 117 118 119 class TransformRowsProcessingError(ApiError): 120 """There was an error when transforming rows to list.""" 121 122 def __init__(self, message: str, cause: Optional[BaseException] = None): 123 super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "TransformRowsProcessingError", cause, True) 124 125 126 class JWTExpiredSignature(ApiError): 127 """The JWT signature has expired.""" 128 129 def __init__(self, message: str, cause: Optional[BaseException] = None): 130 super().__init__(message, HTTPStatus.UNAUTHORIZED, "JWTExpiredSignature", cause, True) 131 132 133 class JWTInvalidClaimRead(ApiError): 134 """The 'read' claim in the JWT payload is invalid.""" 135 136 def __init__(self, message: str, cause: Optional[BaseException] = None): 137 super().__init__(message, HTTPStatus.UNAUTHORIZED, "JWTInvalidClaimRead", cause, True) 138 139 140 class JWTInvalidClaimSub(ApiError): 141 """The 'sub' claim in the JWT payload is invalid.""" 142 143 def __init__(self, message: str, cause: Optional[BaseException] = None): 144 super().__init__(message, HTTPStatus.UNAUTHORIZED, "JWTInvalidClaimSub", cause, True) 145 146 147 class JWTInvalidKeyOrAlgorithm(ApiError): 148 """The key and the algorithm used to verify the JWT signature are not compatible.""" 149 150 def __init__(self, message: str, cause: Optional[BaseException] = None): 151 super().__init__(message, HTTPStatus.UNAUTHORIZED, "JWTInvalidKeyOrAlgorithm", cause, True) 152 153 154 class JWTInvalidSignature(ApiError): 155 """The JWT signature verification failed.""" 156 157 def __init__(self, message: str, cause: Optional[BaseException] = None): 158 super().__init__(message, HTTPStatus.UNAUTHORIZED, "JWTInvalidSignature", cause, True) 159 160 161 class JWTMissingRequiredClaim(ApiError): 162 """A claim is missing in the JWT payload.""" 163 164 def __init__(self, message: str, cause: Optional[BaseException] = None): 165 super().__init__(message, HTTPStatus.UNAUTHORIZED, "JWTMissingRequiredClaim", cause, True) 166 167 168 class UnexpectedApiError(ApiError): 169 """The server raised an unexpected error.""" 170 171 def __init__(self, message: str, cause: Optional[BaseException] = None): 172 logging.error(message, exc_info=cause) 173 super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, "UnexpectedApiError", cause) 174 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/libs/libapi/src/libapi/exceptions.py b/libs/libapi/src/libapi/exceptions.py --- a/libs/libapi/src/libapi/exceptions.py +++ b/libs/libapi/src/libapi/exceptions.py @@ -116,6 +116,15 @@ super().__init__(message, HTTPStatus.BAD_REQUEST, "SearchFeatureNotAvailableError", cause, True) +class TooBigContentError(ApiError): + """The content size in bytes is bigger than the supported value.""" + + def __init__(self, message: str, cause: Optional[BaseException] = None): + super().__init__( + message, HTTPStatus.INTERNAL_SERVER_ERROR, "TransformRowsProcessingError", cause=cause, disclose_cause=True + ) + + class TransformRowsProcessingError(ApiError): """There was an error when transforming rows to list.""" diff --git a/services/rows/src/rows/routes/rows.py b/services/rows/src/rows/routes/rows.py --- a/services/rows/src/rows/routes/rows.py +++ b/services/rows/src/rows/routes/rows.py @@ -6,7 +6,7 @@ from fsspec.implementations.http import HTTPFileSystem from libapi.authentication import auth_check -from libapi.exceptions import ApiError, UnexpectedApiError +from libapi.exceptions import ApiError, TooBigContentError, UnexpectedApiError from libapi.request import ( get_request_parameter, get_request_parameter_length, @@ -20,7 +20,7 @@ get_json_ok_response, try_backfill_dataset_then_raise, ) -from libcommon.parquet_utils import Indexer +from libcommon.parquet_utils import Indexer, TooBigRows from libcommon.processing_graph import ProcessingGraph from libcommon.prometheus import StepProfiler from libcommon.simple_cache import CachedArtifactError, CachedArtifactNotFoundError @@ -96,7 +96,10 @@ ) revision = rows_index.revision with StepProfiler(method="rows_endpoint", step="query the rows"): - pa_table = rows_index.query(offset=offset, length=length) + try: + pa_table = rows_index.query(offset=offset, length=length) + except TooBigRows as err: + raise TooBigContentError(str(err)) from None with StepProfiler(method="rows_endpoint", step="transform to a list"): response = await create_response( dataset=dataset,
{"golden_diff": "diff --git a/libs/libapi/src/libapi/exceptions.py b/libs/libapi/src/libapi/exceptions.py\n--- a/libs/libapi/src/libapi/exceptions.py\n+++ b/libs/libapi/src/libapi/exceptions.py\n@@ -116,6 +116,15 @@\n super().__init__(message, HTTPStatus.BAD_REQUEST, \"SearchFeatureNotAvailableError\", cause, True)\n \n \n+class TooBigContentError(ApiError):\n+ \"\"\"The content size in bytes is bigger than the supported value.\"\"\"\n+\n+ def __init__(self, message: str, cause: Optional[BaseException] = None):\n+ super().__init__(\n+ message, HTTPStatus.INTERNAL_SERVER_ERROR, \"TransformRowsProcessingError\", cause=cause, disclose_cause=True\n+ )\n+\n+\n class TransformRowsProcessingError(ApiError):\n \"\"\"There was an error when transforming rows to list.\"\"\"\n \ndiff --git a/services/rows/src/rows/routes/rows.py b/services/rows/src/rows/routes/rows.py\n--- a/services/rows/src/rows/routes/rows.py\n+++ b/services/rows/src/rows/routes/rows.py\n@@ -6,7 +6,7 @@\n \n from fsspec.implementations.http import HTTPFileSystem\n from libapi.authentication import auth_check\n-from libapi.exceptions import ApiError, UnexpectedApiError\n+from libapi.exceptions import ApiError, TooBigContentError, UnexpectedApiError\n from libapi.request import (\n get_request_parameter,\n get_request_parameter_length,\n@@ -20,7 +20,7 @@\n get_json_ok_response,\n try_backfill_dataset_then_raise,\n )\n-from libcommon.parquet_utils import Indexer\n+from libcommon.parquet_utils import Indexer, TooBigRows\n from libcommon.processing_graph import ProcessingGraph\n from libcommon.prometheus import StepProfiler\n from libcommon.simple_cache import CachedArtifactError, CachedArtifactNotFoundError\n@@ -96,7 +96,10 @@\n )\n revision = rows_index.revision\n with StepProfiler(method=\"rows_endpoint\", step=\"query the rows\"):\n- pa_table = rows_index.query(offset=offset, length=length)\n+ try:\n+ pa_table = rows_index.query(offset=offset, length=length)\n+ except TooBigRows as err:\n+ raise TooBigContentError(str(err)) from None\n with StepProfiler(method=\"rows_endpoint\", step=\"transform to a list\"):\n response = await create_response(\n dataset=dataset,\n", "issue": "Give insights to the user when we raise TooBigRows in /rows, /search, or /filter\nhttps://huggingface.co/datasets/danielz01/xView2/discussions/2#655dc3ccde919d41622b51bd\r\n\r\nCurrently, we only show\r\n\r\n```\r\nError code: UnexpectedApiError\r\n```\r\n\r\nWe should say that the rows contents are too heavy and that we don't show them for that reason (> 400MB in that case)\n", "before_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n# Copyright 2022 The HuggingFace Authors.\n\nimport logging\nfrom typing import Literal, Optional, Union\n\nfrom fsspec.implementations.http import HTTPFileSystem\nfrom libapi.authentication import auth_check\nfrom libapi.exceptions import ApiError, UnexpectedApiError\nfrom libapi.request import (\n get_request_parameter,\n get_request_parameter_length,\n get_request_parameter_offset,\n)\nfrom libapi.response import create_response\nfrom libapi.utils import (\n Endpoint,\n get_json_api_error_response,\n get_json_error_response,\n get_json_ok_response,\n try_backfill_dataset_then_raise,\n)\nfrom libcommon.parquet_utils import Indexer\nfrom libcommon.processing_graph import ProcessingGraph\nfrom libcommon.prometheus import StepProfiler\nfrom libcommon.simple_cache import CachedArtifactError, CachedArtifactNotFoundError\nfrom libcommon.storage import StrPath\nfrom libcommon.storage_client import StorageClient\nfrom libcommon.viewer_utils.features import UNSUPPORTED_FEATURES\nfrom starlette.requests import Request\nfrom starlette.responses import Response\n\nlogger = logging.getLogger(__name__)\n\n\nALL_COLUMNS_SUPPORTED_DATASETS_ALLOW_LIST: Union[Literal[\"all\"], list[str]] = [\"arabic_speech_corpus\"] # for testing\n\n\ndef create_rows_endpoint(\n processing_graph: ProcessingGraph,\n cached_assets_base_url: str,\n storage_client: StorageClient,\n parquet_metadata_directory: StrPath,\n cache_max_days: int,\n max_arrow_data_in_memory: int,\n hf_endpoint: str,\n blocked_datasets: list[str],\n hf_token: Optional[str] = None,\n hf_jwt_public_keys: Optional[list[str]] = None,\n hf_jwt_algorithm: Optional[str] = None,\n external_auth_url: Optional[str] = None,\n hf_timeout_seconds: Optional[float] = None,\n max_age_long: int = 0,\n max_age_short: int = 0,\n) -> Endpoint:\n indexer = Indexer(\n processing_graph=processing_graph,\n hf_token=hf_token,\n parquet_metadata_directory=parquet_metadata_directory,\n httpfs=HTTPFileSystem(headers={\"authorization\": f\"Bearer {hf_token}\"}),\n max_arrow_data_in_memory=max_arrow_data_in_memory,\n unsupported_features=UNSUPPORTED_FEATURES,\n all_columns_supported_datasets_allow_list=ALL_COLUMNS_SUPPORTED_DATASETS_ALLOW_LIST,\n )\n\n async def rows_endpoint(request: Request) -> Response:\n await indexer.httpfs.set_session()\n revision: Optional[str] = None\n with StepProfiler(method=\"rows_endpoint\", step=\"all\"):\n try:\n with StepProfiler(method=\"rows_endpoint\", step=\"validate parameters\"):\n dataset = get_request_parameter(request, \"dataset\", required=True)\n config = get_request_parameter(request, \"config\", required=True)\n split = get_request_parameter(request, \"split\", required=True)\n offset = get_request_parameter_offset(request)\n length = get_request_parameter_length(request)\n logging.info(\n f\"/rows, dataset={dataset}, config={config}, split={split}, offset={offset}, length={length}\"\n )\n with StepProfiler(method=\"rows_endpoint\", step=\"check authentication\"):\n # if auth_check fails, it will raise an exception that will be caught below\n await auth_check(\n dataset=dataset,\n external_auth_url=external_auth_url,\n request=request,\n hf_jwt_public_keys=hf_jwt_public_keys,\n hf_jwt_algorithm=hf_jwt_algorithm,\n hf_timeout_seconds=hf_timeout_seconds,\n )\n try:\n with StepProfiler(method=\"rows_endpoint\", step=\"get row groups index\"):\n rows_index = indexer.get_rows_index(\n dataset=dataset,\n config=config,\n split=split,\n )\n revision = rows_index.revision\n with StepProfiler(method=\"rows_endpoint\", step=\"query the rows\"):\n pa_table = rows_index.query(offset=offset, length=length)\n with StepProfiler(method=\"rows_endpoint\", step=\"transform to a list\"):\n response = await create_response(\n dataset=dataset,\n revision=revision,\n config=config,\n split=split,\n cached_assets_base_url=cached_assets_base_url,\n storage_client=storage_client,\n pa_table=pa_table,\n offset=offset,\n features=rows_index.parquet_index.features,\n unsupported_columns=rows_index.parquet_index.unsupported_columns,\n partial=rows_index.parquet_index.partial,\n num_rows_total=rows_index.parquet_index.num_rows_total,\n )\n except CachedArtifactNotFoundError:\n config_parquet_processing_steps = processing_graph.get_config_parquet_processing_steps()\n config_parquet_metadata_processing_steps = (\n processing_graph.get_config_parquet_metadata_processing_steps()\n )\n with StepProfiler(method=\"rows_endpoint\", step=\"try backfill dataset\"):\n try_backfill_dataset_then_raise(\n processing_steps=config_parquet_metadata_processing_steps\n + config_parquet_processing_steps,\n processing_graph=processing_graph,\n dataset=dataset,\n hf_endpoint=hf_endpoint,\n hf_timeout_seconds=hf_timeout_seconds,\n hf_token=hf_token,\n cache_max_days=cache_max_days,\n blocked_datasets=blocked_datasets,\n )\n with StepProfiler(method=\"rows_endpoint\", step=\"generate the OK response\"):\n return get_json_ok_response(content=response, max_age=max_age_long, revision=revision)\n except CachedArtifactError as e:\n content = e.cache_entry_with_details[\"content\"]\n http_status = e.cache_entry_with_details[\"http_status\"]\n error_code = e.cache_entry_with_details[\"error_code\"]\n return get_json_error_response(\n content=content,\n status_code=http_status,\n max_age=max_age_short,\n error_code=error_code,\n revision=revision,\n )\n except Exception as e:\n error = e if isinstance(e, ApiError) else UnexpectedApiError(\"Unexpected error.\", e)\n with StepProfiler(method=\"rows_endpoint\", step=\"generate API error response\"):\n return get_json_api_error_response(error=error, max_age=max_age_short, revision=revision)\n\n return rows_endpoint\n", "path": "services/rows/src/rows/routes/rows.py"}, {"content": "# SPDX-License-Identifier: Apache-2.0\n# Copyright 2022 The HuggingFace Authors.\n\nimport logging\nfrom http import HTTPStatus\nfrom typing import Literal, Optional\n\nfrom libcommon.exceptions import CustomError\n\nApiErrorCode = Literal[\n \"AuthCheckHubRequestError\",\n \"ExternalAuthenticatedError\",\n \"ExternalUnauthenticatedError\",\n \"InvalidParameter\",\n \"JWTExpiredSignature\",\n \"JWTInvalidClaimRead\",\n \"JWTInvalidClaimSub\",\n \"JWTInvalidKeyOrAlgorithm\",\n \"JWTInvalidSignature\",\n \"JWTKeysError\",\n \"JWTMissingRequiredClaim\",\n \"MissingProcessingStepsError\",\n \"MissingRequiredParameter\",\n \"ResponseNotFound\",\n \"ResponseNotReady\",\n \"SearchFeatureNotAvailableError\",\n \"TransformRowsProcessingError\",\n \"UnexpectedApiError\",\n]\n\n\nclass ApiError(CustomError):\n \"\"\"Base class for exceptions raised by an API service.\"\"\"\n\n def __init__(\n self,\n message: str,\n status_code: HTTPStatus,\n code: ApiErrorCode,\n cause: Optional[BaseException] = None,\n disclose_cause: bool = False,\n ):\n super().__init__(\n message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause\n )\n\n\nclass AuthCheckHubRequestError(ApiError):\n \"\"\"The external authentication check failed or timed out.\"\"\"\n\n def __init__(self, message: str, cause: Optional[BaseException] = None):\n super().__init__(\n message, HTTPStatus.INTERNAL_SERVER_ERROR, \"AuthCheckHubRequestError\", cause=cause, disclose_cause=False\n )\n\n\nclass ExternalAuthenticatedError(ApiError):\n \"\"\"The external authentication check failed while the user was authenticated.\n\n Even if the external authentication server returns 403 in that case, we return 404 because\n we don't know if the dataset exist or not. It's also coherent with how the Hugging Face Hub works.\n\n TODO: should we return DatasetNotFoundError instead? maybe the error code is leaking existence of private datasets.\n \"\"\"\n\n def __init__(self, message: str):\n super().__init__(message, HTTPStatus.NOT_FOUND, \"ExternalAuthenticatedError\")\n\n\nclass ExternalUnauthenticatedError(ApiError):\n \"\"\"The external authentication check failed while the user was unauthenticated.\"\"\"\n\n def __init__(self, message: str):\n super().__init__(message, HTTPStatus.UNAUTHORIZED, \"ExternalUnauthenticatedError\")\n\n\nclass InvalidParameterError(ApiError):\n \"\"\"A parameter has an invalid value.\"\"\"\n\n def __init__(self, message: str):\n super().__init__(message, HTTPStatus.UNPROCESSABLE_ENTITY, \"InvalidParameter\")\n\n\nclass JWTKeysError(ApiError):\n \"\"\"The public keys for decoding JWT could not be created.\"\"\"\n\n def __init__(self, message: str, cause: Optional[BaseException] = None):\n super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, \"JWTKeysError\", cause=cause, disclose_cause=False)\n\n\nclass MissingRequiredParameterError(ApiError):\n \"\"\"A required parameter is missing.\"\"\"\n\n def __init__(self, message: str):\n super().__init__(message, HTTPStatus.UNPROCESSABLE_ENTITY, \"MissingRequiredParameter\")\n\n\nclass ResponseNotFoundError(ApiError):\n \"\"\"The response has not been found.\"\"\"\n\n def __init__(self, message: str):\n super().__init__(message, HTTPStatus.NOT_FOUND, \"ResponseNotFound\")\n\n\nclass ResponseNotReadyError(ApiError):\n \"\"\"The response has not been processed yet.\"\"\"\n\n def __init__(self, message: str):\n super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, \"ResponseNotReady\")\n\n\nclass SearchFeatureNotAvailableError(ApiError):\n \"\"\"The split does not have search feature enabled.\"\"\"\n\n def __init__(self, message: str, cause: Optional[BaseException] = None):\n super().__init__(message, HTTPStatus.BAD_REQUEST, \"SearchFeatureNotAvailableError\", cause, True)\n\n\nclass TransformRowsProcessingError(ApiError):\n \"\"\"There was an error when transforming rows to list.\"\"\"\n\n def __init__(self, message: str, cause: Optional[BaseException] = None):\n super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, \"TransformRowsProcessingError\", cause, True)\n\n\nclass JWTExpiredSignature(ApiError):\n \"\"\"The JWT signature has expired.\"\"\"\n\n def __init__(self, message: str, cause: Optional[BaseException] = None):\n super().__init__(message, HTTPStatus.UNAUTHORIZED, \"JWTExpiredSignature\", cause, True)\n\n\nclass JWTInvalidClaimRead(ApiError):\n \"\"\"The 'read' claim in the JWT payload is invalid.\"\"\"\n\n def __init__(self, message: str, cause: Optional[BaseException] = None):\n super().__init__(message, HTTPStatus.UNAUTHORIZED, \"JWTInvalidClaimRead\", cause, True)\n\n\nclass JWTInvalidClaimSub(ApiError):\n \"\"\"The 'sub' claim in the JWT payload is invalid.\"\"\"\n\n def __init__(self, message: str, cause: Optional[BaseException] = None):\n super().__init__(message, HTTPStatus.UNAUTHORIZED, \"JWTInvalidClaimSub\", cause, True)\n\n\nclass JWTInvalidKeyOrAlgorithm(ApiError):\n \"\"\"The key and the algorithm used to verify the JWT signature are not compatible.\"\"\"\n\n def __init__(self, message: str, cause: Optional[BaseException] = None):\n super().__init__(message, HTTPStatus.UNAUTHORIZED, \"JWTInvalidKeyOrAlgorithm\", cause, True)\n\n\nclass JWTInvalidSignature(ApiError):\n \"\"\"The JWT signature verification failed.\"\"\"\n\n def __init__(self, message: str, cause: Optional[BaseException] = None):\n super().__init__(message, HTTPStatus.UNAUTHORIZED, \"JWTInvalidSignature\", cause, True)\n\n\nclass JWTMissingRequiredClaim(ApiError):\n \"\"\"A claim is missing in the JWT payload.\"\"\"\n\n def __init__(self, message: str, cause: Optional[BaseException] = None):\n super().__init__(message, HTTPStatus.UNAUTHORIZED, \"JWTMissingRequiredClaim\", cause, True)\n\n\nclass UnexpectedApiError(ApiError):\n \"\"\"The server raised an unexpected error.\"\"\"\n\n def __init__(self, message: str, cause: Optional[BaseException] = None):\n logging.error(message, exc_info=cause)\n super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, \"UnexpectedApiError\", cause)\n", "path": "libs/libapi/src/libapi/exceptions.py"}], "after_files": [{"content": "# SPDX-License-Identifier: Apache-2.0\n# Copyright 2022 The HuggingFace Authors.\n\nimport logging\nfrom typing import Literal, Optional, Union\n\nfrom fsspec.implementations.http import HTTPFileSystem\nfrom libapi.authentication import auth_check\nfrom libapi.exceptions import ApiError, TooBigContentError, UnexpectedApiError\nfrom libapi.request import (\n get_request_parameter,\n get_request_parameter_length,\n get_request_parameter_offset,\n)\nfrom libapi.response import create_response\nfrom libapi.utils import (\n Endpoint,\n get_json_api_error_response,\n get_json_error_response,\n get_json_ok_response,\n try_backfill_dataset_then_raise,\n)\nfrom libcommon.parquet_utils import Indexer, TooBigRows\nfrom libcommon.processing_graph import ProcessingGraph\nfrom libcommon.prometheus import StepProfiler\nfrom libcommon.simple_cache import CachedArtifactError, CachedArtifactNotFoundError\nfrom libcommon.storage import StrPath\nfrom libcommon.storage_client import StorageClient\nfrom libcommon.viewer_utils.features import UNSUPPORTED_FEATURES\nfrom starlette.requests import Request\nfrom starlette.responses import Response\n\nlogger = logging.getLogger(__name__)\n\n\nALL_COLUMNS_SUPPORTED_DATASETS_ALLOW_LIST: Union[Literal[\"all\"], list[str]] = [\"arabic_speech_corpus\"] # for testing\n\n\ndef create_rows_endpoint(\n processing_graph: ProcessingGraph,\n cached_assets_base_url: str,\n storage_client: StorageClient,\n parquet_metadata_directory: StrPath,\n cache_max_days: int,\n max_arrow_data_in_memory: int,\n hf_endpoint: str,\n blocked_datasets: list[str],\n hf_token: Optional[str] = None,\n hf_jwt_public_keys: Optional[list[str]] = None,\n hf_jwt_algorithm: Optional[str] = None,\n external_auth_url: Optional[str] = None,\n hf_timeout_seconds: Optional[float] = None,\n max_age_long: int = 0,\n max_age_short: int = 0,\n) -> Endpoint:\n indexer = Indexer(\n processing_graph=processing_graph,\n hf_token=hf_token,\n parquet_metadata_directory=parquet_metadata_directory,\n httpfs=HTTPFileSystem(headers={\"authorization\": f\"Bearer {hf_token}\"}),\n max_arrow_data_in_memory=max_arrow_data_in_memory,\n unsupported_features=UNSUPPORTED_FEATURES,\n all_columns_supported_datasets_allow_list=ALL_COLUMNS_SUPPORTED_DATASETS_ALLOW_LIST,\n )\n\n async def rows_endpoint(request: Request) -> Response:\n await indexer.httpfs.set_session()\n revision: Optional[str] = None\n with StepProfiler(method=\"rows_endpoint\", step=\"all\"):\n try:\n with StepProfiler(method=\"rows_endpoint\", step=\"validate parameters\"):\n dataset = get_request_parameter(request, \"dataset\", required=True)\n config = get_request_parameter(request, \"config\", required=True)\n split = get_request_parameter(request, \"split\", required=True)\n offset = get_request_parameter_offset(request)\n length = get_request_parameter_length(request)\n logging.info(\n f\"/rows, dataset={dataset}, config={config}, split={split}, offset={offset}, length={length}\"\n )\n with StepProfiler(method=\"rows_endpoint\", step=\"check authentication\"):\n # if auth_check fails, it will raise an exception that will be caught below\n await auth_check(\n dataset=dataset,\n external_auth_url=external_auth_url,\n request=request,\n hf_jwt_public_keys=hf_jwt_public_keys,\n hf_jwt_algorithm=hf_jwt_algorithm,\n hf_timeout_seconds=hf_timeout_seconds,\n )\n try:\n with StepProfiler(method=\"rows_endpoint\", step=\"get row groups index\"):\n rows_index = indexer.get_rows_index(\n dataset=dataset,\n config=config,\n split=split,\n )\n revision = rows_index.revision\n with StepProfiler(method=\"rows_endpoint\", step=\"query the rows\"):\n try:\n pa_table = rows_index.query(offset=offset, length=length)\n except TooBigRows as err:\n raise TooBigContentError(str(err)) from None\n with StepProfiler(method=\"rows_endpoint\", step=\"transform to a list\"):\n response = await create_response(\n dataset=dataset,\n revision=revision,\n config=config,\n split=split,\n cached_assets_base_url=cached_assets_base_url,\n storage_client=storage_client,\n pa_table=pa_table,\n offset=offset,\n features=rows_index.parquet_index.features,\n unsupported_columns=rows_index.parquet_index.unsupported_columns,\n partial=rows_index.parquet_index.partial,\n num_rows_total=rows_index.parquet_index.num_rows_total,\n )\n except CachedArtifactNotFoundError:\n config_parquet_processing_steps = processing_graph.get_config_parquet_processing_steps()\n config_parquet_metadata_processing_steps = (\n processing_graph.get_config_parquet_metadata_processing_steps()\n )\n with StepProfiler(method=\"rows_endpoint\", step=\"try backfill dataset\"):\n try_backfill_dataset_then_raise(\n processing_steps=config_parquet_metadata_processing_steps\n + config_parquet_processing_steps,\n processing_graph=processing_graph,\n dataset=dataset,\n hf_endpoint=hf_endpoint,\n hf_timeout_seconds=hf_timeout_seconds,\n hf_token=hf_token,\n cache_max_days=cache_max_days,\n blocked_datasets=blocked_datasets,\n )\n with StepProfiler(method=\"rows_endpoint\", step=\"generate the OK response\"):\n return get_json_ok_response(content=response, max_age=max_age_long, revision=revision)\n except CachedArtifactError as e:\n content = e.cache_entry_with_details[\"content\"]\n http_status = e.cache_entry_with_details[\"http_status\"]\n error_code = e.cache_entry_with_details[\"error_code\"]\n return get_json_error_response(\n content=content,\n status_code=http_status,\n max_age=max_age_short,\n error_code=error_code,\n revision=revision,\n )\n except Exception as e:\n error = e if isinstance(e, ApiError) else UnexpectedApiError(\"Unexpected error.\", e)\n with StepProfiler(method=\"rows_endpoint\", step=\"generate API error response\"):\n return get_json_api_error_response(error=error, max_age=max_age_short, revision=revision)\n\n return rows_endpoint\n", "path": "services/rows/src/rows/routes/rows.py"}, {"content": "# SPDX-License-Identifier: Apache-2.0\n# Copyright 2022 The HuggingFace Authors.\n\nimport logging\nfrom http import HTTPStatus\nfrom typing import Literal, Optional\n\nfrom libcommon.exceptions import CustomError\n\nApiErrorCode = Literal[\n \"AuthCheckHubRequestError\",\n \"ExternalAuthenticatedError\",\n \"ExternalUnauthenticatedError\",\n \"InvalidParameter\",\n \"JWTExpiredSignature\",\n \"JWTInvalidClaimRead\",\n \"JWTInvalidClaimSub\",\n \"JWTInvalidKeyOrAlgorithm\",\n \"JWTInvalidSignature\",\n \"JWTKeysError\",\n \"JWTMissingRequiredClaim\",\n \"MissingProcessingStepsError\",\n \"MissingRequiredParameter\",\n \"ResponseNotFound\",\n \"ResponseNotReady\",\n \"SearchFeatureNotAvailableError\",\n \"TransformRowsProcessingError\",\n \"UnexpectedApiError\",\n]\n\n\nclass ApiError(CustomError):\n \"\"\"Base class for exceptions raised by an API service.\"\"\"\n\n def __init__(\n self,\n message: str,\n status_code: HTTPStatus,\n code: ApiErrorCode,\n cause: Optional[BaseException] = None,\n disclose_cause: bool = False,\n ):\n super().__init__(\n message=message, status_code=status_code, code=code, cause=cause, disclose_cause=disclose_cause\n )\n\n\nclass AuthCheckHubRequestError(ApiError):\n \"\"\"The external authentication check failed or timed out.\"\"\"\n\n def __init__(self, message: str, cause: Optional[BaseException] = None):\n super().__init__(\n message, HTTPStatus.INTERNAL_SERVER_ERROR, \"AuthCheckHubRequestError\", cause=cause, disclose_cause=False\n )\n\n\nclass ExternalAuthenticatedError(ApiError):\n \"\"\"The external authentication check failed while the user was authenticated.\n\n Even if the external authentication server returns 403 in that case, we return 404 because\n we don't know if the dataset exist or not. It's also coherent with how the Hugging Face Hub works.\n\n TODO: should we return DatasetNotFoundError instead? maybe the error code is leaking existence of private datasets.\n \"\"\"\n\n def __init__(self, message: str):\n super().__init__(message, HTTPStatus.NOT_FOUND, \"ExternalAuthenticatedError\")\n\n\nclass ExternalUnauthenticatedError(ApiError):\n \"\"\"The external authentication check failed while the user was unauthenticated.\"\"\"\n\n def __init__(self, message: str):\n super().__init__(message, HTTPStatus.UNAUTHORIZED, \"ExternalUnauthenticatedError\")\n\n\nclass InvalidParameterError(ApiError):\n \"\"\"A parameter has an invalid value.\"\"\"\n\n def __init__(self, message: str):\n super().__init__(message, HTTPStatus.UNPROCESSABLE_ENTITY, \"InvalidParameter\")\n\n\nclass JWTKeysError(ApiError):\n \"\"\"The public keys for decoding JWT could not be created.\"\"\"\n\n def __init__(self, message: str, cause: Optional[BaseException] = None):\n super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, \"JWTKeysError\", cause=cause, disclose_cause=False)\n\n\nclass MissingRequiredParameterError(ApiError):\n \"\"\"A required parameter is missing.\"\"\"\n\n def __init__(self, message: str):\n super().__init__(message, HTTPStatus.UNPROCESSABLE_ENTITY, \"MissingRequiredParameter\")\n\n\nclass ResponseNotFoundError(ApiError):\n \"\"\"The response has not been found.\"\"\"\n\n def __init__(self, message: str):\n super().__init__(message, HTTPStatus.NOT_FOUND, \"ResponseNotFound\")\n\n\nclass ResponseNotReadyError(ApiError):\n \"\"\"The response has not been processed yet.\"\"\"\n\n def __init__(self, message: str):\n super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, \"ResponseNotReady\")\n\n\nclass SearchFeatureNotAvailableError(ApiError):\n \"\"\"The split does not have search feature enabled.\"\"\"\n\n def __init__(self, message: str, cause: Optional[BaseException] = None):\n super().__init__(message, HTTPStatus.BAD_REQUEST, \"SearchFeatureNotAvailableError\", cause, True)\n\n\nclass TooBigContentError(ApiError):\n \"\"\"The content size in bytes is bigger than the supported value.\"\"\"\n\n def __init__(self, message: str, cause: Optional[BaseException] = None):\n super().__init__(\n message, HTTPStatus.INTERNAL_SERVER_ERROR, \"TransformRowsProcessingError\", cause=cause, disclose_cause=True\n )\n\n\nclass TransformRowsProcessingError(ApiError):\n \"\"\"There was an error when transforming rows to list.\"\"\"\n\n def __init__(self, message: str, cause: Optional[BaseException] = None):\n super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, \"TransformRowsProcessingError\", cause, True)\n\n\nclass JWTExpiredSignature(ApiError):\n \"\"\"The JWT signature has expired.\"\"\"\n\n def __init__(self, message: str, cause: Optional[BaseException] = None):\n super().__init__(message, HTTPStatus.UNAUTHORIZED, \"JWTExpiredSignature\", cause, True)\n\n\nclass JWTInvalidClaimRead(ApiError):\n \"\"\"The 'read' claim in the JWT payload is invalid.\"\"\"\n\n def __init__(self, message: str, cause: Optional[BaseException] = None):\n super().__init__(message, HTTPStatus.UNAUTHORIZED, \"JWTInvalidClaimRead\", cause, True)\n\n\nclass JWTInvalidClaimSub(ApiError):\n \"\"\"The 'sub' claim in the JWT payload is invalid.\"\"\"\n\n def __init__(self, message: str, cause: Optional[BaseException] = None):\n super().__init__(message, HTTPStatus.UNAUTHORIZED, \"JWTInvalidClaimSub\", cause, True)\n\n\nclass JWTInvalidKeyOrAlgorithm(ApiError):\n \"\"\"The key and the algorithm used to verify the JWT signature are not compatible.\"\"\"\n\n def __init__(self, message: str, cause: Optional[BaseException] = None):\n super().__init__(message, HTTPStatus.UNAUTHORIZED, \"JWTInvalidKeyOrAlgorithm\", cause, True)\n\n\nclass JWTInvalidSignature(ApiError):\n \"\"\"The JWT signature verification failed.\"\"\"\n\n def __init__(self, message: str, cause: Optional[BaseException] = None):\n super().__init__(message, HTTPStatus.UNAUTHORIZED, \"JWTInvalidSignature\", cause, True)\n\n\nclass JWTMissingRequiredClaim(ApiError):\n \"\"\"A claim is missing in the JWT payload.\"\"\"\n\n def __init__(self, message: str, cause: Optional[BaseException] = None):\n super().__init__(message, HTTPStatus.UNAUTHORIZED, \"JWTMissingRequiredClaim\", cause, True)\n\n\nclass UnexpectedApiError(ApiError):\n \"\"\"The server raised an unexpected error.\"\"\"\n\n def __init__(self, message: str, cause: Optional[BaseException] = None):\n logging.error(message, exc_info=cause)\n super().__init__(message, HTTPStatus.INTERNAL_SERVER_ERROR, \"UnexpectedApiError\", cause)\n", "path": "libs/libapi/src/libapi/exceptions.py"}]}
3,818
528
gh_patches_debug_11482
rasdani/github-patches
git_diff
scoutapp__scout_apm_python-228
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Capture details of Celery Chains and Chords Celery has some more advanced features to join multiple jobs into one. The agent needs testing and investigation into how they can be best instrumented. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/scout_apm/celery.py` Content: ``` 1 # coding=utf-8 2 from __future__ import absolute_import, division, print_function, unicode_literals 3 4 import datetime as dt 5 6 from celery.signals import before_task_publish, task_postrun, task_prerun 7 8 import scout_apm.core 9 from scout_apm.compat import datetime_to_timestamp 10 from scout_apm.core.tracked_request import TrackedRequest 11 12 13 def before_publish_callback(headers=None, properties=None, **kwargs): 14 if "scout_task_start" not in headers: 15 headers["scout_task_start"] = datetime_to_timestamp(dt.datetime.utcnow()) 16 17 18 def prerun_callback(task=None, **kwargs): 19 tracked_request = TrackedRequest.instance() 20 tracked_request.mark_real_request() 21 22 start = getattr(task.request, "scout_task_start", None) 23 if start is not None: 24 now = datetime_to_timestamp(dt.datetime.utcnow()) 25 try: 26 queue_time = now - start 27 except TypeError: 28 pass 29 else: 30 tracked_request.tag("queue_time", queue_time) 31 32 delivery_info = task.request.delivery_info 33 tracked_request.tag("is_eager", delivery_info.get("is_eager", False)) 34 tracked_request.tag("exchange", delivery_info.get("exchange", "unknown")) 35 tracked_request.tag("routing_key", delivery_info.get("routing_key", "unknown")) 36 tracked_request.tag("queue", delivery_info.get("queue", "unknown")) 37 38 tracked_request.start_span(operation=("Job/" + task.name)) 39 40 41 def postrun_callback(task=None, **kwargs): 42 tracked_request = TrackedRequest.instance() 43 tracked_request.stop_span() 44 45 46 def install(): 47 installed = scout_apm.core.install() 48 if not installed: 49 return 50 51 before_task_publish.connect(before_publish_callback) 52 task_prerun.connect(prerun_callback) 53 task_postrun.connect(postrun_callback) 54 55 56 def uninstall(): 57 before_task_publish.disconnect(before_publish_callback) 58 task_prerun.disconnect(prerun_callback) 59 task_postrun.disconnect(postrun_callback) 60 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/scout_apm/celery.py b/src/scout_apm/celery.py --- a/src/scout_apm/celery.py +++ b/src/scout_apm/celery.py @@ -29,6 +29,13 @@ else: tracked_request.tag("queue_time", queue_time) + task_id = getattr(task.request, "id", None) + if task_id: + tracked_request.tag("task_id", task_id) + parent_task_id = getattr(task.request, "parent_id", None) + if parent_task_id: + tracked_request.tag("parent_task_id", parent_task_id) + delivery_info = task.request.delivery_info tracked_request.tag("is_eager", delivery_info.get("is_eager", False)) tracked_request.tag("exchange", delivery_info.get("exchange", "unknown"))
{"golden_diff": "diff --git a/src/scout_apm/celery.py b/src/scout_apm/celery.py\n--- a/src/scout_apm/celery.py\n+++ b/src/scout_apm/celery.py\n@@ -29,6 +29,13 @@\n else:\n tracked_request.tag(\"queue_time\", queue_time)\n \n+ task_id = getattr(task.request, \"id\", None)\n+ if task_id:\n+ tracked_request.tag(\"task_id\", task_id)\n+ parent_task_id = getattr(task.request, \"parent_id\", None)\n+ if parent_task_id:\n+ tracked_request.tag(\"parent_task_id\", parent_task_id)\n+\n delivery_info = task.request.delivery_info\n tracked_request.tag(\"is_eager\", delivery_info.get(\"is_eager\", False))\n tracked_request.tag(\"exchange\", delivery_info.get(\"exchange\", \"unknown\"))\n", "issue": "Capture details of Celery Chains and Chords\nCelery has some more advanced features to join multiple jobs into one. The agent needs testing and investigation into how they can be best instrumented.\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport datetime as dt\n\nfrom celery.signals import before_task_publish, task_postrun, task_prerun\n\nimport scout_apm.core\nfrom scout_apm.compat import datetime_to_timestamp\nfrom scout_apm.core.tracked_request import TrackedRequest\n\n\ndef before_publish_callback(headers=None, properties=None, **kwargs):\n if \"scout_task_start\" not in headers:\n headers[\"scout_task_start\"] = datetime_to_timestamp(dt.datetime.utcnow())\n\n\ndef prerun_callback(task=None, **kwargs):\n tracked_request = TrackedRequest.instance()\n tracked_request.mark_real_request()\n\n start = getattr(task.request, \"scout_task_start\", None)\n if start is not None:\n now = datetime_to_timestamp(dt.datetime.utcnow())\n try:\n queue_time = now - start\n except TypeError:\n pass\n else:\n tracked_request.tag(\"queue_time\", queue_time)\n\n delivery_info = task.request.delivery_info\n tracked_request.tag(\"is_eager\", delivery_info.get(\"is_eager\", False))\n tracked_request.tag(\"exchange\", delivery_info.get(\"exchange\", \"unknown\"))\n tracked_request.tag(\"routing_key\", delivery_info.get(\"routing_key\", \"unknown\"))\n tracked_request.tag(\"queue\", delivery_info.get(\"queue\", \"unknown\"))\n\n tracked_request.start_span(operation=(\"Job/\" + task.name))\n\n\ndef postrun_callback(task=None, **kwargs):\n tracked_request = TrackedRequest.instance()\n tracked_request.stop_span()\n\n\ndef install():\n installed = scout_apm.core.install()\n if not installed:\n return\n\n before_task_publish.connect(before_publish_callback)\n task_prerun.connect(prerun_callback)\n task_postrun.connect(postrun_callback)\n\n\ndef uninstall():\n before_task_publish.disconnect(before_publish_callback)\n task_prerun.disconnect(prerun_callback)\n task_postrun.disconnect(postrun_callback)\n", "path": "src/scout_apm/celery.py"}], "after_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport datetime as dt\n\nfrom celery.signals import before_task_publish, task_postrun, task_prerun\n\nimport scout_apm.core\nfrom scout_apm.compat import datetime_to_timestamp\nfrom scout_apm.core.tracked_request import TrackedRequest\n\n\ndef before_publish_callback(headers=None, properties=None, **kwargs):\n if \"scout_task_start\" not in headers:\n headers[\"scout_task_start\"] = datetime_to_timestamp(dt.datetime.utcnow())\n\n\ndef prerun_callback(task=None, **kwargs):\n tracked_request = TrackedRequest.instance()\n tracked_request.mark_real_request()\n\n start = getattr(task.request, \"scout_task_start\", None)\n if start is not None:\n now = datetime_to_timestamp(dt.datetime.utcnow())\n try:\n queue_time = now - start\n except TypeError:\n pass\n else:\n tracked_request.tag(\"queue_time\", queue_time)\n\n task_id = getattr(task.request, \"id\", None)\n if task_id:\n tracked_request.tag(\"task_id\", task_id)\n parent_task_id = getattr(task.request, \"parent_id\", None)\n if parent_task_id:\n tracked_request.tag(\"parent_task_id\", parent_task_id)\n\n delivery_info = task.request.delivery_info\n tracked_request.tag(\"is_eager\", delivery_info.get(\"is_eager\", False))\n tracked_request.tag(\"exchange\", delivery_info.get(\"exchange\", \"unknown\"))\n tracked_request.tag(\"routing_key\", delivery_info.get(\"routing_key\", \"unknown\"))\n tracked_request.tag(\"queue\", delivery_info.get(\"queue\", \"unknown\"))\n\n tracked_request.start_span(operation=(\"Job/\" + task.name))\n\n\ndef postrun_callback(task=None, **kwargs):\n tracked_request = TrackedRequest.instance()\n tracked_request.stop_span()\n\n\ndef install():\n installed = scout_apm.core.install()\n if not installed:\n return\n\n before_task_publish.connect(before_publish_callback)\n task_prerun.connect(prerun_callback)\n task_postrun.connect(postrun_callback)\n\n\ndef uninstall():\n before_task_publish.disconnect(before_publish_callback)\n task_prerun.disconnect(prerun_callback)\n task_postrun.disconnect(postrun_callback)\n", "path": "src/scout_apm/celery.py"}]}
837
190
gh_patches_debug_20353
rasdani/github-patches
git_diff
WeblateOrg__weblate-10604
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Some languages don't have all strings available for translation ### Describe the issue My project is here: https://hosted.weblate.org/projects/feeder/android-strings A few languages Polish, French and Chinese (Simplified), are missing a dozen strings. One example is the string `other_minutes` which is not available for translation in these languages. I have tried re-scanning strings and similar with no change. ### I already tried - [X] I've read and searched [the documentation](https://docs.weblate.org/). - [X] I've searched for similar issues in this repository. ### Steps to reproduce the behavior Not sure how to reproduce it but it is happening here :https://hosted.weblate.org/projects/feeder/android-strings look at string `other_minutes`, it is missing from Polish, French, and Chinese (Simplified) ### Expected behavior All strings should be available for translation in all languages. ### Screenshots _No response_ ### Exception traceback _No response_ ### How do you run Weblate? weblate.org service ### Weblate versions _No response_ ### Weblate deploy checks _No response_ ### Additional context _No response_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `weblate/addons/cleanup.py` Content: ``` 1 # Copyright © Michal Čihař <[email protected]> 2 # 3 # SPDX-License-Identifier: GPL-3.0-or-later 4 5 from django.utils.translation import gettext_lazy 6 7 from weblate.addons.base import UpdateBaseAddon 8 from weblate.addons.events import EVENT_POST_COMMIT, EVENT_POST_UPDATE, EVENT_PRE_COMMIT 9 from weblate.trans.exceptions import FileParseError 10 11 12 class BaseCleanupAddon(UpdateBaseAddon): 13 @classmethod 14 def can_install(cls, component, user): 15 if not component.has_template(): 16 return False 17 return super().can_install(component, user) 18 19 20 class CleanupAddon(BaseCleanupAddon): 21 name = "weblate.cleanup.generic" 22 verbose = gettext_lazy("Cleanup translation files") 23 description = gettext_lazy( 24 "Update all translation files to match the monolingual base file. " 25 "For most file formats, this means removing stale translation keys " 26 "no longer present in the base file." 27 ) 28 icon = "eraser.svg" 29 events = (EVENT_PRE_COMMIT, EVENT_POST_UPDATE) 30 31 def update_translations(self, component, previous_head): 32 for translation in self.iterate_translations(component): 33 filenames = translation.store.cleanup_unused() 34 if filenames is None: 35 continue 36 self.extra_files.extend(filenames) 37 translation.store_hash() 38 39 def pre_commit(self, translation, author): 40 if translation.is_source and not translation.component.intermediate: 41 return 42 try: 43 filenames = translation.store.cleanup_unused() 44 except FileParseError: 45 return 46 if filenames is not None: 47 self.extra_files.extend(filenames) 48 translation.store_hash() 49 50 51 class RemoveBlankAddon(BaseCleanupAddon): 52 name = "weblate.cleanup.blank" 53 verbose = gettext_lazy("Remove blank strings") 54 description = gettext_lazy( 55 "Removes strings without a translation from translation files." 56 ) 57 events = (EVENT_POST_COMMIT, EVENT_POST_UPDATE) 58 icon = "eraser.svg" 59 60 def update_translations(self, component, previous_head): 61 for translation in self.iterate_translations(component): 62 filenames = translation.store.cleanup_blank() 63 if filenames is None: 64 continue 65 self.extra_files.extend(filenames) 66 translation.store_hash() 67 68 def post_commit(self, component): 69 self.post_update(component, None, skip_push=True) 70 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/weblate/addons/cleanup.py b/weblate/addons/cleanup.py --- a/weblate/addons/cleanup.py +++ b/weblate/addons/cleanup.py @@ -34,7 +34,7 @@ if filenames is None: continue self.extra_files.extend(filenames) - translation.store_hash() + # Do not update hash here as this is just before parsing updated files def pre_commit(self, translation, author): if translation.is_source and not translation.component.intermediate: @@ -63,7 +63,9 @@ if filenames is None: continue self.extra_files.extend(filenames) - translation.store_hash() + # Do not update hash in post_update, only in post_commit + if previous_head == "weblate:post-commit": + translation.store_hash() def post_commit(self, component): - self.post_update(component, None, skip_push=True) + self.post_update(component, "weblate:post-commit", skip_push=True)
{"golden_diff": "diff --git a/weblate/addons/cleanup.py b/weblate/addons/cleanup.py\n--- a/weblate/addons/cleanup.py\n+++ b/weblate/addons/cleanup.py\n@@ -34,7 +34,7 @@\n if filenames is None:\n continue\n self.extra_files.extend(filenames)\n- translation.store_hash()\n+ # Do not update hash here as this is just before parsing updated files\n \n def pre_commit(self, translation, author):\n if translation.is_source and not translation.component.intermediate:\n@@ -63,7 +63,9 @@\n if filenames is None:\n continue\n self.extra_files.extend(filenames)\n- translation.store_hash()\n+ # Do not update hash in post_update, only in post_commit\n+ if previous_head == \"weblate:post-commit\":\n+ translation.store_hash()\n \n def post_commit(self, component):\n- self.post_update(component, None, skip_push=True)\n+ self.post_update(component, \"weblate:post-commit\", skip_push=True)\n", "issue": "Some languages don't have all strings available for translation\n### Describe the issue\n\nMy project is here: https://hosted.weblate.org/projects/feeder/android-strings\r\n\r\nA few languages Polish, French and Chinese (Simplified), are missing a dozen strings.\r\n\r\nOne example is the string `other_minutes` which is not available for translation in these languages.\r\n\r\nI have tried re-scanning strings and similar with no change.\n\n### I already tried\n\n- [X] I've read and searched [the documentation](https://docs.weblate.org/).\n- [X] I've searched for similar issues in this repository.\n\n### Steps to reproduce the behavior\n\nNot sure how to reproduce it but it is happening here :https://hosted.weblate.org/projects/feeder/android-strings\r\n\r\nlook at string `other_minutes`, it is missing from Polish, French, and Chinese (Simplified)\n\n### Expected behavior\n\nAll strings should be available for translation in all languages.\n\n### Screenshots\n\n_No response_\n\n### Exception traceback\n\n_No response_\n\n### How do you run Weblate?\n\nweblate.org service\n\n### Weblate versions\n\n_No response_\n\n### Weblate deploy checks\n\n_No response_\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "# Copyright \u00a9 Michal \u010ciha\u0159 <[email protected]>\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom django.utils.translation import gettext_lazy\n\nfrom weblate.addons.base import UpdateBaseAddon\nfrom weblate.addons.events import EVENT_POST_COMMIT, EVENT_POST_UPDATE, EVENT_PRE_COMMIT\nfrom weblate.trans.exceptions import FileParseError\n\n\nclass BaseCleanupAddon(UpdateBaseAddon):\n @classmethod\n def can_install(cls, component, user):\n if not component.has_template():\n return False\n return super().can_install(component, user)\n\n\nclass CleanupAddon(BaseCleanupAddon):\n name = \"weblate.cleanup.generic\"\n verbose = gettext_lazy(\"Cleanup translation files\")\n description = gettext_lazy(\n \"Update all translation files to match the monolingual base file. \"\n \"For most file formats, this means removing stale translation keys \"\n \"no longer present in the base file.\"\n )\n icon = \"eraser.svg\"\n events = (EVENT_PRE_COMMIT, EVENT_POST_UPDATE)\n\n def update_translations(self, component, previous_head):\n for translation in self.iterate_translations(component):\n filenames = translation.store.cleanup_unused()\n if filenames is None:\n continue\n self.extra_files.extend(filenames)\n translation.store_hash()\n\n def pre_commit(self, translation, author):\n if translation.is_source and not translation.component.intermediate:\n return\n try:\n filenames = translation.store.cleanup_unused()\n except FileParseError:\n return\n if filenames is not None:\n self.extra_files.extend(filenames)\n translation.store_hash()\n\n\nclass RemoveBlankAddon(BaseCleanupAddon):\n name = \"weblate.cleanup.blank\"\n verbose = gettext_lazy(\"Remove blank strings\")\n description = gettext_lazy(\n \"Removes strings without a translation from translation files.\"\n )\n events = (EVENT_POST_COMMIT, EVENT_POST_UPDATE)\n icon = \"eraser.svg\"\n\n def update_translations(self, component, previous_head):\n for translation in self.iterate_translations(component):\n filenames = translation.store.cleanup_blank()\n if filenames is None:\n continue\n self.extra_files.extend(filenames)\n translation.store_hash()\n\n def post_commit(self, component):\n self.post_update(component, None, skip_push=True)\n", "path": "weblate/addons/cleanup.py"}], "after_files": [{"content": "# Copyright \u00a9 Michal \u010ciha\u0159 <[email protected]>\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom django.utils.translation import gettext_lazy\n\nfrom weblate.addons.base import UpdateBaseAddon\nfrom weblate.addons.events import EVENT_POST_COMMIT, EVENT_POST_UPDATE, EVENT_PRE_COMMIT\nfrom weblate.trans.exceptions import FileParseError\n\n\nclass BaseCleanupAddon(UpdateBaseAddon):\n @classmethod\n def can_install(cls, component, user):\n if not component.has_template():\n return False\n return super().can_install(component, user)\n\n\nclass CleanupAddon(BaseCleanupAddon):\n name = \"weblate.cleanup.generic\"\n verbose = gettext_lazy(\"Cleanup translation files\")\n description = gettext_lazy(\n \"Update all translation files to match the monolingual base file. \"\n \"For most file formats, this means removing stale translation keys \"\n \"no longer present in the base file.\"\n )\n icon = \"eraser.svg\"\n events = (EVENT_PRE_COMMIT, EVENT_POST_UPDATE)\n\n def update_translations(self, component, previous_head):\n for translation in self.iterate_translations(component):\n filenames = translation.store.cleanup_unused()\n if filenames is None:\n continue\n self.extra_files.extend(filenames)\n # Do not update hash here as this is just before parsing updated files\n\n def pre_commit(self, translation, author):\n if translation.is_source and not translation.component.intermediate:\n return\n try:\n filenames = translation.store.cleanup_unused()\n except FileParseError:\n return\n if filenames is not None:\n self.extra_files.extend(filenames)\n translation.store_hash()\n\n\nclass RemoveBlankAddon(BaseCleanupAddon):\n name = \"weblate.cleanup.blank\"\n verbose = gettext_lazy(\"Remove blank strings\")\n description = gettext_lazy(\n \"Removes strings without a translation from translation files.\"\n )\n events = (EVENT_POST_COMMIT, EVENT_POST_UPDATE)\n icon = \"eraser.svg\"\n\n def update_translations(self, component, previous_head):\n for translation in self.iterate_translations(component):\n filenames = translation.store.cleanup_blank()\n if filenames is None:\n continue\n self.extra_files.extend(filenames)\n # Do not update hash in post_update, only in post_commit\n if previous_head == \"weblate:post-commit\":\n translation.store_hash()\n\n def post_commit(self, component):\n self.post_update(component, \"weblate:post-commit\", skip_push=True)\n", "path": "weblate/addons/cleanup.py"}]}
1,143
230
gh_patches_debug_37951
rasdani/github-patches
git_diff
scrapy__scrapy-5760
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Fixes issue with calling fetch in scrapy shell. Reference Issue Fixes #5740 , #5742 You can recreate the issue with the following script. ``` import asyncio import threading from twisted.internet import asyncioreactor from scrapy.utils.defer import deferred_from_coro from scrapy.utils.reactor import get_asyncio_event_loop_policy async def test_coro(): pass def test_deferred_from_coro(): return deferred_from_coro(test_coro()) def trigger_warning_message(): event_loop = get_asyncio_event_loop_policy().new_event_loop() asyncio.set_event_loop(event_loop) asyncioreactor.install() thread = threading.Thread(target=test_deferred_from_coro) thread.start() thread.join() trigger_warning_message() ``` I was able to recreate this issue using `scrapy shell` and `fetch` on both windows and linux. However it only occurs inside of a project with the `TWISTED_REACTOR` setting set to AsyncioSelectorReactor. What causes the issue is when `get_asyncio_event_loop_policy().get_event_loop()` is called, there is no event loop in the thread that the function is called from. Which raises an exception. asyncio exception in scrapy shell We got a bug report that using `fetch(<url>)` in `scrapy shell` on Windows in a new project (so with `TWISTED_REACTOR` set to the asyncio one) raises "There is no current event loop" in `deferred_from_coro()`. The Python version is 3.9 and 3.10 and the Scrapy version is 2.7.1. It is possible that the recenty changed loop code indeed doesn't work correctly on Windows but I cannot test or fix it at this time. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scrapy/shell.py` Content: ``` 1 """Scrapy Shell 2 3 See documentation in docs/topics/shell.rst 4 5 """ 6 import os 7 import signal 8 9 from itemadapter import is_item 10 from twisted.internet import threads, defer 11 from twisted.python import threadable 12 from w3lib.url import any_to_uri 13 14 from scrapy.crawler import Crawler 15 from scrapy.exceptions import IgnoreRequest 16 from scrapy.http import Request, Response 17 from scrapy.settings import Settings 18 from scrapy.spiders import Spider 19 from scrapy.utils.conf import get_config 20 from scrapy.utils.console import DEFAULT_PYTHON_SHELLS, start_python_console 21 from scrapy.utils.datatypes import SequenceExclude 22 from scrapy.utils.misc import load_object 23 from scrapy.utils.response import open_in_browser 24 25 26 class Shell: 27 28 relevant_classes = (Crawler, Spider, Request, Response, Settings) 29 30 def __init__(self, crawler, update_vars=None, code=None): 31 self.crawler = crawler 32 self.update_vars = update_vars or (lambda x: None) 33 self.item_class = load_object(crawler.settings['DEFAULT_ITEM_CLASS']) 34 self.spider = None 35 self.inthread = not threadable.isInIOThread() 36 self.code = code 37 self.vars = {} 38 39 def start(self, url=None, request=None, response=None, spider=None, redirect=True): 40 # disable accidental Ctrl-C key press from shutting down the engine 41 signal.signal(signal.SIGINT, signal.SIG_IGN) 42 if url: 43 self.fetch(url, spider, redirect=redirect) 44 elif request: 45 self.fetch(request, spider) 46 elif response: 47 request = response.request 48 self.populate_vars(response, request, spider) 49 else: 50 self.populate_vars() 51 if self.code: 52 print(eval(self.code, globals(), self.vars)) 53 else: 54 """ 55 Detect interactive shell setting in scrapy.cfg 56 e.g.: ~/.config/scrapy.cfg or ~/.scrapy.cfg 57 [settings] 58 # shell can be one of ipython, bpython or python; 59 # to be used as the interactive python console, if available. 60 # (default is ipython, fallbacks in the order listed above) 61 shell = python 62 """ 63 cfg = get_config() 64 section, option = 'settings', 'shell' 65 env = os.environ.get('SCRAPY_PYTHON_SHELL') 66 shells = [] 67 if env: 68 shells += env.strip().lower().split(',') 69 elif cfg.has_option(section, option): 70 shells += [cfg.get(section, option).strip().lower()] 71 else: # try all by default 72 shells += DEFAULT_PYTHON_SHELLS.keys() 73 # always add standard shell as fallback 74 shells += ['python'] 75 start_python_console(self.vars, shells=shells, 76 banner=self.vars.pop('banner', '')) 77 78 def _schedule(self, request, spider): 79 spider = self._open_spider(request, spider) 80 d = _request_deferred(request) 81 d.addCallback(lambda x: (x, spider)) 82 self.crawler.engine.crawl(request) 83 return d 84 85 def _open_spider(self, request, spider): 86 if self.spider: 87 return self.spider 88 89 if spider is None: 90 spider = self.crawler.spider or self.crawler._create_spider() 91 92 self.crawler.spider = spider 93 self.crawler.engine.open_spider(spider, close_if_idle=False) 94 self.spider = spider 95 return spider 96 97 def fetch(self, request_or_url, spider=None, redirect=True, **kwargs): 98 from twisted.internet import reactor 99 if isinstance(request_or_url, Request): 100 request = request_or_url 101 else: 102 url = any_to_uri(request_or_url) 103 request = Request(url, dont_filter=True, **kwargs) 104 if redirect: 105 request.meta['handle_httpstatus_list'] = SequenceExclude(range(300, 400)) 106 else: 107 request.meta['handle_httpstatus_all'] = True 108 response = None 109 try: 110 response, spider = threads.blockingCallFromThread( 111 reactor, self._schedule, request, spider) 112 except IgnoreRequest: 113 pass 114 self.populate_vars(response, request, spider) 115 116 def populate_vars(self, response=None, request=None, spider=None): 117 import scrapy 118 119 self.vars['scrapy'] = scrapy 120 self.vars['crawler'] = self.crawler 121 self.vars['item'] = self.item_class() 122 self.vars['settings'] = self.crawler.settings 123 self.vars['spider'] = spider 124 self.vars['request'] = request 125 self.vars['response'] = response 126 if self.inthread: 127 self.vars['fetch'] = self.fetch 128 self.vars['view'] = open_in_browser 129 self.vars['shelp'] = self.print_help 130 self.update_vars(self.vars) 131 if not self.code: 132 self.vars['banner'] = self.get_help() 133 134 def print_help(self): 135 print(self.get_help()) 136 137 def get_help(self): 138 b = [] 139 b.append("Available Scrapy objects:") 140 b.append(" scrapy scrapy module (contains scrapy.Request, scrapy.Selector, etc)") 141 for k, v in sorted(self.vars.items()): 142 if self._is_relevant(v): 143 b.append(f" {k:<10} {v}") 144 b.append("Useful shortcuts:") 145 if self.inthread: 146 b.append(" fetch(url[, redirect=True]) " 147 "Fetch URL and update local objects (by default, redirects are followed)") 148 b.append(" fetch(req) " 149 "Fetch a scrapy.Request and update local objects ") 150 b.append(" shelp() Shell help (print this help)") 151 b.append(" view(response) View response in a browser") 152 153 return "\n".join(f"[s] {line}" for line in b) 154 155 def _is_relevant(self, value): 156 return isinstance(value, self.relevant_classes) or is_item(value) 157 158 159 def inspect_response(response, spider): 160 """Open a shell to inspect the given response""" 161 Shell(spider.crawler).start(response=response, spider=spider) 162 163 164 def _request_deferred(request): 165 """Wrap a request inside a Deferred. 166 167 This function is harmful, do not use it until you know what you are doing. 168 169 This returns a Deferred whose first pair of callbacks are the request 170 callback and errback. The Deferred also triggers when the request 171 callback/errback is executed (i.e. when the request is downloaded) 172 173 WARNING: Do not call request.replace() until after the deferred is called. 174 """ 175 request_callback = request.callback 176 request_errback = request.errback 177 178 def _restore_callbacks(result): 179 request.callback = request_callback 180 request.errback = request_errback 181 return result 182 183 d = defer.Deferred() 184 d.addBoth(_restore_callbacks) 185 if request.callback: 186 d.addCallbacks(request.callback, request.errback) 187 188 request.callback, request.errback = d.callback, d.errback 189 return d 190 ``` Path: `scrapy/utils/reactor.py` Content: ``` 1 import asyncio 2 import sys 3 from contextlib import suppress 4 5 from twisted.internet import asyncioreactor, error 6 7 from scrapy.utils.misc import load_object 8 9 10 def listen_tcp(portrange, host, factory): 11 """Like reactor.listenTCP but tries different ports in a range.""" 12 from twisted.internet import reactor 13 if len(portrange) > 2: 14 raise ValueError(f"invalid portrange: {portrange}") 15 if not portrange: 16 return reactor.listenTCP(0, factory, interface=host) 17 if not hasattr(portrange, '__iter__'): 18 return reactor.listenTCP(portrange, factory, interface=host) 19 if len(portrange) == 1: 20 return reactor.listenTCP(portrange[0], factory, interface=host) 21 for x in range(portrange[0], portrange[1] + 1): 22 try: 23 return reactor.listenTCP(x, factory, interface=host) 24 except error.CannotListenError: 25 if x == portrange[1]: 26 raise 27 28 29 class CallLaterOnce: 30 """Schedule a function to be called in the next reactor loop, but only if 31 it hasn't been already scheduled since the last time it ran. 32 """ 33 34 def __init__(self, func, *a, **kw): 35 self._func = func 36 self._a = a 37 self._kw = kw 38 self._call = None 39 40 def schedule(self, delay=0): 41 from twisted.internet import reactor 42 if self._call is None: 43 self._call = reactor.callLater(delay, self) 44 45 def cancel(self): 46 if self._call: 47 self._call.cancel() 48 49 def __call__(self): 50 self._call = None 51 return self._func(*self._a, **self._kw) 52 53 54 def get_asyncio_event_loop_policy(): 55 policy = asyncio.get_event_loop_policy() 56 if ( 57 sys.version_info >= (3, 8) 58 and sys.platform == "win32" 59 and not isinstance(policy, asyncio.WindowsSelectorEventLoopPolicy) 60 ): 61 policy = asyncio.WindowsSelectorEventLoopPolicy() 62 asyncio.set_event_loop_policy(policy) 63 64 return policy 65 66 67 def install_reactor(reactor_path, event_loop_path=None): 68 """Installs the :mod:`~twisted.internet.reactor` with the specified 69 import path. Also installs the asyncio event loop with the specified import 70 path if the asyncio reactor is enabled""" 71 reactor_class = load_object(reactor_path) 72 if reactor_class is asyncioreactor.AsyncioSelectorReactor: 73 with suppress(error.ReactorAlreadyInstalledError): 74 policy = get_asyncio_event_loop_policy() 75 if event_loop_path is not None: 76 event_loop_class = load_object(event_loop_path) 77 event_loop = event_loop_class() 78 asyncio.set_event_loop(event_loop) 79 else: 80 event_loop = policy.get_event_loop() 81 82 asyncioreactor.install(eventloop=event_loop) 83 else: 84 *module, _ = reactor_path.split(".") 85 installer_path = module + ["install"] 86 installer = load_object(".".join(installer_path)) 87 with suppress(error.ReactorAlreadyInstalledError): 88 installer() 89 90 91 def verify_installed_reactor(reactor_path): 92 """Raises :exc:`Exception` if the installed 93 :mod:`~twisted.internet.reactor` does not match the specified import 94 path.""" 95 from twisted.internet import reactor 96 reactor_class = load_object(reactor_path) 97 if not reactor.__class__ == reactor_class: 98 msg = ("The installed reactor " 99 f"({reactor.__module__}.{reactor.__class__.__name__}) does not " 100 f"match the requested one ({reactor_path})") 101 raise Exception(msg) 102 103 104 def verify_installed_asyncio_event_loop(loop_path): 105 from twisted.internet import reactor 106 loop_class = load_object(loop_path) 107 if isinstance(reactor._asyncioEventloop, loop_class): 108 return 109 installed = ( 110 f"{reactor._asyncioEventloop.__class__.__module__}" 111 f".{reactor._asyncioEventloop.__class__.__qualname__}" 112 ) 113 specified = f"{loop_class.__module__}.{loop_class.__qualname__}" 114 raise Exception( 115 "Scrapy found an asyncio Twisted reactor already " 116 f"installed, and its event loop class ({installed}) does " 117 "not match the one specified in the ASYNCIO_EVENT_LOOP " 118 f"setting ({specified})" 119 ) 120 121 122 def is_asyncio_reactor_installed(): 123 from twisted.internet import reactor 124 return isinstance(reactor, asyncioreactor.AsyncioSelectorReactor) 125 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scrapy/shell.py b/scrapy/shell.py --- a/scrapy/shell.py +++ b/scrapy/shell.py @@ -21,6 +21,7 @@ from scrapy.utils.datatypes import SequenceExclude from scrapy.utils.misc import load_object from scrapy.utils.response import open_in_browser +from scrapy.utils.reactor import is_asyncio_reactor_installed, set_asyncio_event_loop class Shell: @@ -76,6 +77,10 @@ banner=self.vars.pop('banner', '')) def _schedule(self, request, spider): + if is_asyncio_reactor_installed(): + # set the asyncio event loop for the current thread + event_loop_path = self.crawler.settings['ASYNCIO_EVENT_LOOP'] + set_asyncio_event_loop(event_loop_path) spider = self._open_spider(request, spider) d = _request_deferred(request) d.addCallback(lambda x: (x, spider)) diff --git a/scrapy/utils/reactor.py b/scrapy/utils/reactor.py --- a/scrapy/utils/reactor.py +++ b/scrapy/utils/reactor.py @@ -71,14 +71,7 @@ reactor_class = load_object(reactor_path) if reactor_class is asyncioreactor.AsyncioSelectorReactor: with suppress(error.ReactorAlreadyInstalledError): - policy = get_asyncio_event_loop_policy() - if event_loop_path is not None: - event_loop_class = load_object(event_loop_path) - event_loop = event_loop_class() - asyncio.set_event_loop(event_loop) - else: - event_loop = policy.get_event_loop() - + event_loop = set_asyncio_event_loop(event_loop_path) asyncioreactor.install(eventloop=event_loop) else: *module, _ = reactor_path.split(".") @@ -88,6 +81,25 @@ installer() +def set_asyncio_event_loop(event_loop_path): + """Sets and returns the event loop with specified import path.""" + policy = get_asyncio_event_loop_policy() + if event_loop_path is not None: + event_loop_class = load_object(event_loop_path) + event_loop = event_loop_class() + asyncio.set_event_loop(event_loop) + else: + try: + event_loop = policy.get_event_loop() + except RuntimeError: + # `get_event_loop` is expected to fail when called from a new thread + # with no asyncio event loop yet installed. Such is the case when + # called from `scrapy shell` + event_loop = policy.new_event_loop() + asyncio.set_event_loop(event_loop) + return event_loop + + def verify_installed_reactor(reactor_path): """Raises :exc:`Exception` if the installed :mod:`~twisted.internet.reactor` does not match the specified import
{"golden_diff": "diff --git a/scrapy/shell.py b/scrapy/shell.py\n--- a/scrapy/shell.py\n+++ b/scrapy/shell.py\n@@ -21,6 +21,7 @@\n from scrapy.utils.datatypes import SequenceExclude\n from scrapy.utils.misc import load_object\n from scrapy.utils.response import open_in_browser\n+from scrapy.utils.reactor import is_asyncio_reactor_installed, set_asyncio_event_loop\n \n \n class Shell:\n@@ -76,6 +77,10 @@\n banner=self.vars.pop('banner', ''))\n \n def _schedule(self, request, spider):\n+ if is_asyncio_reactor_installed():\n+ # set the asyncio event loop for the current thread\n+ event_loop_path = self.crawler.settings['ASYNCIO_EVENT_LOOP']\n+ set_asyncio_event_loop(event_loop_path)\n spider = self._open_spider(request, spider)\n d = _request_deferred(request)\n d.addCallback(lambda x: (x, spider))\ndiff --git a/scrapy/utils/reactor.py b/scrapy/utils/reactor.py\n--- a/scrapy/utils/reactor.py\n+++ b/scrapy/utils/reactor.py\n@@ -71,14 +71,7 @@\n reactor_class = load_object(reactor_path)\n if reactor_class is asyncioreactor.AsyncioSelectorReactor:\n with suppress(error.ReactorAlreadyInstalledError):\n- policy = get_asyncio_event_loop_policy()\n- if event_loop_path is not None:\n- event_loop_class = load_object(event_loop_path)\n- event_loop = event_loop_class()\n- asyncio.set_event_loop(event_loop)\n- else:\n- event_loop = policy.get_event_loop()\n-\n+ event_loop = set_asyncio_event_loop(event_loop_path)\n asyncioreactor.install(eventloop=event_loop)\n else:\n *module, _ = reactor_path.split(\".\")\n@@ -88,6 +81,25 @@\n installer()\n \n \n+def set_asyncio_event_loop(event_loop_path):\n+ \"\"\"Sets and returns the event loop with specified import path.\"\"\"\n+ policy = get_asyncio_event_loop_policy()\n+ if event_loop_path is not None:\n+ event_loop_class = load_object(event_loop_path)\n+ event_loop = event_loop_class()\n+ asyncio.set_event_loop(event_loop)\n+ else:\n+ try:\n+ event_loop = policy.get_event_loop()\n+ except RuntimeError:\n+ # `get_event_loop` is expected to fail when called from a new thread\n+ # with no asyncio event loop yet installed. Such is the case when\n+ # called from `scrapy shell`\n+ event_loop = policy.new_event_loop()\n+ asyncio.set_event_loop(event_loop)\n+ return event_loop\n+\n+\n def verify_installed_reactor(reactor_path):\n \"\"\"Raises :exc:`Exception` if the installed\n :mod:`~twisted.internet.reactor` does not match the specified import\n", "issue": "Fixes issue with calling fetch in scrapy shell.\nReference Issue Fixes #5740 , #5742\r\n\r\nYou can recreate the issue with the following script.\r\n\r\n```\r\nimport asyncio\r\nimport threading\r\nfrom twisted.internet import asyncioreactor\r\nfrom scrapy.utils.defer import deferred_from_coro\r\nfrom scrapy.utils.reactor import get_asyncio_event_loop_policy\r\n\r\nasync def test_coro():\r\n pass\r\n\r\ndef test_deferred_from_coro():\r\n return deferred_from_coro(test_coro())\r\n\r\ndef trigger_warning_message():\r\n event_loop = get_asyncio_event_loop_policy().new_event_loop()\r\n asyncio.set_event_loop(event_loop)\r\n asyncioreactor.install()\r\n thread = threading.Thread(target=test_deferred_from_coro)\r\n thread.start()\r\n thread.join()\r\n\r\ntrigger_warning_message()\r\n```\r\n\r\nI was able to recreate this issue using `scrapy shell` and `fetch` on both windows and linux. However it only occurs inside of a project with the `TWISTED_REACTOR` setting set to AsyncioSelectorReactor. \r\n\r\nWhat causes the issue is when `get_asyncio_event_loop_policy().get_event_loop()` is called, there is no event loop in the thread that the function is called from. Which raises an exception. \nasyncio exception in scrapy shell\nWe got a bug report that using `fetch(<url>)` in `scrapy shell` on Windows in a new project (so with `TWISTED_REACTOR` set to the asyncio one) raises \"There is no current event loop\" in `deferred_from_coro()`. The Python version is 3.9 and 3.10 and the Scrapy version is 2.7.1. It is possible that the recenty changed loop code indeed doesn't work correctly on Windows but I cannot test or fix it at this time.\n", "before_files": [{"content": "\"\"\"Scrapy Shell\n\nSee documentation in docs/topics/shell.rst\n\n\"\"\"\nimport os\nimport signal\n\nfrom itemadapter import is_item\nfrom twisted.internet import threads, defer\nfrom twisted.python import threadable\nfrom w3lib.url import any_to_uri\n\nfrom scrapy.crawler import Crawler\nfrom scrapy.exceptions import IgnoreRequest\nfrom scrapy.http import Request, Response\nfrom scrapy.settings import Settings\nfrom scrapy.spiders import Spider\nfrom scrapy.utils.conf import get_config\nfrom scrapy.utils.console import DEFAULT_PYTHON_SHELLS, start_python_console\nfrom scrapy.utils.datatypes import SequenceExclude\nfrom scrapy.utils.misc import load_object\nfrom scrapy.utils.response import open_in_browser\n\n\nclass Shell:\n\n relevant_classes = (Crawler, Spider, Request, Response, Settings)\n\n def __init__(self, crawler, update_vars=None, code=None):\n self.crawler = crawler\n self.update_vars = update_vars or (lambda x: None)\n self.item_class = load_object(crawler.settings['DEFAULT_ITEM_CLASS'])\n self.spider = None\n self.inthread = not threadable.isInIOThread()\n self.code = code\n self.vars = {}\n\n def start(self, url=None, request=None, response=None, spider=None, redirect=True):\n # disable accidental Ctrl-C key press from shutting down the engine\n signal.signal(signal.SIGINT, signal.SIG_IGN)\n if url:\n self.fetch(url, spider, redirect=redirect)\n elif request:\n self.fetch(request, spider)\n elif response:\n request = response.request\n self.populate_vars(response, request, spider)\n else:\n self.populate_vars()\n if self.code:\n print(eval(self.code, globals(), self.vars))\n else:\n \"\"\"\n Detect interactive shell setting in scrapy.cfg\n e.g.: ~/.config/scrapy.cfg or ~/.scrapy.cfg\n [settings]\n # shell can be one of ipython, bpython or python;\n # to be used as the interactive python console, if available.\n # (default is ipython, fallbacks in the order listed above)\n shell = python\n \"\"\"\n cfg = get_config()\n section, option = 'settings', 'shell'\n env = os.environ.get('SCRAPY_PYTHON_SHELL')\n shells = []\n if env:\n shells += env.strip().lower().split(',')\n elif cfg.has_option(section, option):\n shells += [cfg.get(section, option).strip().lower()]\n else: # try all by default\n shells += DEFAULT_PYTHON_SHELLS.keys()\n # always add standard shell as fallback\n shells += ['python']\n start_python_console(self.vars, shells=shells,\n banner=self.vars.pop('banner', ''))\n\n def _schedule(self, request, spider):\n spider = self._open_spider(request, spider)\n d = _request_deferred(request)\n d.addCallback(lambda x: (x, spider))\n self.crawler.engine.crawl(request)\n return d\n\n def _open_spider(self, request, spider):\n if self.spider:\n return self.spider\n\n if spider is None:\n spider = self.crawler.spider or self.crawler._create_spider()\n\n self.crawler.spider = spider\n self.crawler.engine.open_spider(spider, close_if_idle=False)\n self.spider = spider\n return spider\n\n def fetch(self, request_or_url, spider=None, redirect=True, **kwargs):\n from twisted.internet import reactor\n if isinstance(request_or_url, Request):\n request = request_or_url\n else:\n url = any_to_uri(request_or_url)\n request = Request(url, dont_filter=True, **kwargs)\n if redirect:\n request.meta['handle_httpstatus_list'] = SequenceExclude(range(300, 400))\n else:\n request.meta['handle_httpstatus_all'] = True\n response = None\n try:\n response, spider = threads.blockingCallFromThread(\n reactor, self._schedule, request, spider)\n except IgnoreRequest:\n pass\n self.populate_vars(response, request, spider)\n\n def populate_vars(self, response=None, request=None, spider=None):\n import scrapy\n\n self.vars['scrapy'] = scrapy\n self.vars['crawler'] = self.crawler\n self.vars['item'] = self.item_class()\n self.vars['settings'] = self.crawler.settings\n self.vars['spider'] = spider\n self.vars['request'] = request\n self.vars['response'] = response\n if self.inthread:\n self.vars['fetch'] = self.fetch\n self.vars['view'] = open_in_browser\n self.vars['shelp'] = self.print_help\n self.update_vars(self.vars)\n if not self.code:\n self.vars['banner'] = self.get_help()\n\n def print_help(self):\n print(self.get_help())\n\n def get_help(self):\n b = []\n b.append(\"Available Scrapy objects:\")\n b.append(\" scrapy scrapy module (contains scrapy.Request, scrapy.Selector, etc)\")\n for k, v in sorted(self.vars.items()):\n if self._is_relevant(v):\n b.append(f\" {k:<10} {v}\")\n b.append(\"Useful shortcuts:\")\n if self.inthread:\n b.append(\" fetch(url[, redirect=True]) \"\n \"Fetch URL and update local objects (by default, redirects are followed)\")\n b.append(\" fetch(req) \"\n \"Fetch a scrapy.Request and update local objects \")\n b.append(\" shelp() Shell help (print this help)\")\n b.append(\" view(response) View response in a browser\")\n\n return \"\\n\".join(f\"[s] {line}\" for line in b)\n\n def _is_relevant(self, value):\n return isinstance(value, self.relevant_classes) or is_item(value)\n\n\ndef inspect_response(response, spider):\n \"\"\"Open a shell to inspect the given response\"\"\"\n Shell(spider.crawler).start(response=response, spider=spider)\n\n\ndef _request_deferred(request):\n \"\"\"Wrap a request inside a Deferred.\n\n This function is harmful, do not use it until you know what you are doing.\n\n This returns a Deferred whose first pair of callbacks are the request\n callback and errback. The Deferred also triggers when the request\n callback/errback is executed (i.e. when the request is downloaded)\n\n WARNING: Do not call request.replace() until after the deferred is called.\n \"\"\"\n request_callback = request.callback\n request_errback = request.errback\n\n def _restore_callbacks(result):\n request.callback = request_callback\n request.errback = request_errback\n return result\n\n d = defer.Deferred()\n d.addBoth(_restore_callbacks)\n if request.callback:\n d.addCallbacks(request.callback, request.errback)\n\n request.callback, request.errback = d.callback, d.errback\n return d\n", "path": "scrapy/shell.py"}, {"content": "import asyncio\nimport sys\nfrom contextlib import suppress\n\nfrom twisted.internet import asyncioreactor, error\n\nfrom scrapy.utils.misc import load_object\n\n\ndef listen_tcp(portrange, host, factory):\n \"\"\"Like reactor.listenTCP but tries different ports in a range.\"\"\"\n from twisted.internet import reactor\n if len(portrange) > 2:\n raise ValueError(f\"invalid portrange: {portrange}\")\n if not portrange:\n return reactor.listenTCP(0, factory, interface=host)\n if not hasattr(portrange, '__iter__'):\n return reactor.listenTCP(portrange, factory, interface=host)\n if len(portrange) == 1:\n return reactor.listenTCP(portrange[0], factory, interface=host)\n for x in range(portrange[0], portrange[1] + 1):\n try:\n return reactor.listenTCP(x, factory, interface=host)\n except error.CannotListenError:\n if x == portrange[1]:\n raise\n\n\nclass CallLaterOnce:\n \"\"\"Schedule a function to be called in the next reactor loop, but only if\n it hasn't been already scheduled since the last time it ran.\n \"\"\"\n\n def __init__(self, func, *a, **kw):\n self._func = func\n self._a = a\n self._kw = kw\n self._call = None\n\n def schedule(self, delay=0):\n from twisted.internet import reactor\n if self._call is None:\n self._call = reactor.callLater(delay, self)\n\n def cancel(self):\n if self._call:\n self._call.cancel()\n\n def __call__(self):\n self._call = None\n return self._func(*self._a, **self._kw)\n\n\ndef get_asyncio_event_loop_policy():\n policy = asyncio.get_event_loop_policy()\n if (\n sys.version_info >= (3, 8)\n and sys.platform == \"win32\"\n and not isinstance(policy, asyncio.WindowsSelectorEventLoopPolicy)\n ):\n policy = asyncio.WindowsSelectorEventLoopPolicy()\n asyncio.set_event_loop_policy(policy)\n\n return policy\n\n\ndef install_reactor(reactor_path, event_loop_path=None):\n \"\"\"Installs the :mod:`~twisted.internet.reactor` with the specified\n import path. Also installs the asyncio event loop with the specified import\n path if the asyncio reactor is enabled\"\"\"\n reactor_class = load_object(reactor_path)\n if reactor_class is asyncioreactor.AsyncioSelectorReactor:\n with suppress(error.ReactorAlreadyInstalledError):\n policy = get_asyncio_event_loop_policy()\n if event_loop_path is not None:\n event_loop_class = load_object(event_loop_path)\n event_loop = event_loop_class()\n asyncio.set_event_loop(event_loop)\n else:\n event_loop = policy.get_event_loop()\n\n asyncioreactor.install(eventloop=event_loop)\n else:\n *module, _ = reactor_path.split(\".\")\n installer_path = module + [\"install\"]\n installer = load_object(\".\".join(installer_path))\n with suppress(error.ReactorAlreadyInstalledError):\n installer()\n\n\ndef verify_installed_reactor(reactor_path):\n \"\"\"Raises :exc:`Exception` if the installed\n :mod:`~twisted.internet.reactor` does not match the specified import\n path.\"\"\"\n from twisted.internet import reactor\n reactor_class = load_object(reactor_path)\n if not reactor.__class__ == reactor_class:\n msg = (\"The installed reactor \"\n f\"({reactor.__module__}.{reactor.__class__.__name__}) does not \"\n f\"match the requested one ({reactor_path})\")\n raise Exception(msg)\n\n\ndef verify_installed_asyncio_event_loop(loop_path):\n from twisted.internet import reactor\n loop_class = load_object(loop_path)\n if isinstance(reactor._asyncioEventloop, loop_class):\n return\n installed = (\n f\"{reactor._asyncioEventloop.__class__.__module__}\"\n f\".{reactor._asyncioEventloop.__class__.__qualname__}\"\n )\n specified = f\"{loop_class.__module__}.{loop_class.__qualname__}\"\n raise Exception(\n \"Scrapy found an asyncio Twisted reactor already \"\n f\"installed, and its event loop class ({installed}) does \"\n \"not match the one specified in the ASYNCIO_EVENT_LOOP \"\n f\"setting ({specified})\"\n )\n\n\ndef is_asyncio_reactor_installed():\n from twisted.internet import reactor\n return isinstance(reactor, asyncioreactor.AsyncioSelectorReactor)\n", "path": "scrapy/utils/reactor.py"}], "after_files": [{"content": "\"\"\"Scrapy Shell\n\nSee documentation in docs/topics/shell.rst\n\n\"\"\"\nimport os\nimport signal\n\nfrom itemadapter import is_item\nfrom twisted.internet import threads, defer\nfrom twisted.python import threadable\nfrom w3lib.url import any_to_uri\n\nfrom scrapy.crawler import Crawler\nfrom scrapy.exceptions import IgnoreRequest\nfrom scrapy.http import Request, Response\nfrom scrapy.settings import Settings\nfrom scrapy.spiders import Spider\nfrom scrapy.utils.conf import get_config\nfrom scrapy.utils.console import DEFAULT_PYTHON_SHELLS, start_python_console\nfrom scrapy.utils.datatypes import SequenceExclude\nfrom scrapy.utils.misc import load_object\nfrom scrapy.utils.response import open_in_browser\nfrom scrapy.utils.reactor import is_asyncio_reactor_installed, set_asyncio_event_loop\n\n\nclass Shell:\n\n relevant_classes = (Crawler, Spider, Request, Response, Settings)\n\n def __init__(self, crawler, update_vars=None, code=None):\n self.crawler = crawler\n self.update_vars = update_vars or (lambda x: None)\n self.item_class = load_object(crawler.settings['DEFAULT_ITEM_CLASS'])\n self.spider = None\n self.inthread = not threadable.isInIOThread()\n self.code = code\n self.vars = {}\n\n def start(self, url=None, request=None, response=None, spider=None, redirect=True):\n # disable accidental Ctrl-C key press from shutting down the engine\n signal.signal(signal.SIGINT, signal.SIG_IGN)\n if url:\n self.fetch(url, spider, redirect=redirect)\n elif request:\n self.fetch(request, spider)\n elif response:\n request = response.request\n self.populate_vars(response, request, spider)\n else:\n self.populate_vars()\n if self.code:\n print(eval(self.code, globals(), self.vars))\n else:\n \"\"\"\n Detect interactive shell setting in scrapy.cfg\n e.g.: ~/.config/scrapy.cfg or ~/.scrapy.cfg\n [settings]\n # shell can be one of ipython, bpython or python;\n # to be used as the interactive python console, if available.\n # (default is ipython, fallbacks in the order listed above)\n shell = python\n \"\"\"\n cfg = get_config()\n section, option = 'settings', 'shell'\n env = os.environ.get('SCRAPY_PYTHON_SHELL')\n shells = []\n if env:\n shells += env.strip().lower().split(',')\n elif cfg.has_option(section, option):\n shells += [cfg.get(section, option).strip().lower()]\n else: # try all by default\n shells += DEFAULT_PYTHON_SHELLS.keys()\n # always add standard shell as fallback\n shells += ['python']\n start_python_console(self.vars, shells=shells,\n banner=self.vars.pop('banner', ''))\n\n def _schedule(self, request, spider):\n if is_asyncio_reactor_installed():\n # set the asyncio event loop for the current thread\n event_loop_path = self.crawler.settings['ASYNCIO_EVENT_LOOP']\n set_asyncio_event_loop(event_loop_path)\n spider = self._open_spider(request, spider)\n d = _request_deferred(request)\n d.addCallback(lambda x: (x, spider))\n self.crawler.engine.crawl(request)\n return d\n\n def _open_spider(self, request, spider):\n if self.spider:\n return self.spider\n\n if spider is None:\n spider = self.crawler.spider or self.crawler._create_spider()\n\n self.crawler.spider = spider\n self.crawler.engine.open_spider(spider, close_if_idle=False)\n self.spider = spider\n return spider\n\n def fetch(self, request_or_url, spider=None, redirect=True, **kwargs):\n from twisted.internet import reactor\n if isinstance(request_or_url, Request):\n request = request_or_url\n else:\n url = any_to_uri(request_or_url)\n request = Request(url, dont_filter=True, **kwargs)\n if redirect:\n request.meta['handle_httpstatus_list'] = SequenceExclude(range(300, 400))\n else:\n request.meta['handle_httpstatus_all'] = True\n response = None\n try:\n response, spider = threads.blockingCallFromThread(\n reactor, self._schedule, request, spider)\n except IgnoreRequest:\n pass\n self.populate_vars(response, request, spider)\n\n def populate_vars(self, response=None, request=None, spider=None):\n import scrapy\n\n self.vars['scrapy'] = scrapy\n self.vars['crawler'] = self.crawler\n self.vars['item'] = self.item_class()\n self.vars['settings'] = self.crawler.settings\n self.vars['spider'] = spider\n self.vars['request'] = request\n self.vars['response'] = response\n if self.inthread:\n self.vars['fetch'] = self.fetch\n self.vars['view'] = open_in_browser\n self.vars['shelp'] = self.print_help\n self.update_vars(self.vars)\n if not self.code:\n self.vars['banner'] = self.get_help()\n\n def print_help(self):\n print(self.get_help())\n\n def get_help(self):\n b = []\n b.append(\"Available Scrapy objects:\")\n b.append(\" scrapy scrapy module (contains scrapy.Request, scrapy.Selector, etc)\")\n for k, v in sorted(self.vars.items()):\n if self._is_relevant(v):\n b.append(f\" {k:<10} {v}\")\n b.append(\"Useful shortcuts:\")\n if self.inthread:\n b.append(\" fetch(url[, redirect=True]) \"\n \"Fetch URL and update local objects (by default, redirects are followed)\")\n b.append(\" fetch(req) \"\n \"Fetch a scrapy.Request and update local objects \")\n b.append(\" shelp() Shell help (print this help)\")\n b.append(\" view(response) View response in a browser\")\n\n return \"\\n\".join(f\"[s] {line}\" for line in b)\n\n def _is_relevant(self, value):\n return isinstance(value, self.relevant_classes) or is_item(value)\n\n\ndef inspect_response(response, spider):\n \"\"\"Open a shell to inspect the given response\"\"\"\n Shell(spider.crawler).start(response=response, spider=spider)\n\n\ndef _request_deferred(request):\n \"\"\"Wrap a request inside a Deferred.\n\n This function is harmful, do not use it until you know what you are doing.\n\n This returns a Deferred whose first pair of callbacks are the request\n callback and errback. The Deferred also triggers when the request\n callback/errback is executed (i.e. when the request is downloaded)\n\n WARNING: Do not call request.replace() until after the deferred is called.\n \"\"\"\n request_callback = request.callback\n request_errback = request.errback\n\n def _restore_callbacks(result):\n request.callback = request_callback\n request.errback = request_errback\n return result\n\n d = defer.Deferred()\n d.addBoth(_restore_callbacks)\n if request.callback:\n d.addCallbacks(request.callback, request.errback)\n\n request.callback, request.errback = d.callback, d.errback\n return d\n", "path": "scrapy/shell.py"}, {"content": "import asyncio\nimport sys\nfrom contextlib import suppress\n\nfrom twisted.internet import asyncioreactor, error\n\nfrom scrapy.utils.misc import load_object\n\n\ndef listen_tcp(portrange, host, factory):\n \"\"\"Like reactor.listenTCP but tries different ports in a range.\"\"\"\n from twisted.internet import reactor\n if len(portrange) > 2:\n raise ValueError(f\"invalid portrange: {portrange}\")\n if not portrange:\n return reactor.listenTCP(0, factory, interface=host)\n if not hasattr(portrange, '__iter__'):\n return reactor.listenTCP(portrange, factory, interface=host)\n if len(portrange) == 1:\n return reactor.listenTCP(portrange[0], factory, interface=host)\n for x in range(portrange[0], portrange[1] + 1):\n try:\n return reactor.listenTCP(x, factory, interface=host)\n except error.CannotListenError:\n if x == portrange[1]:\n raise\n\n\nclass CallLaterOnce:\n \"\"\"Schedule a function to be called in the next reactor loop, but only if\n it hasn't been already scheduled since the last time it ran.\n \"\"\"\n\n def __init__(self, func, *a, **kw):\n self._func = func\n self._a = a\n self._kw = kw\n self._call = None\n\n def schedule(self, delay=0):\n from twisted.internet import reactor\n if self._call is None:\n self._call = reactor.callLater(delay, self)\n\n def cancel(self):\n if self._call:\n self._call.cancel()\n\n def __call__(self):\n self._call = None\n return self._func(*self._a, **self._kw)\n\n\ndef get_asyncio_event_loop_policy():\n policy = asyncio.get_event_loop_policy()\n if (\n sys.version_info >= (3, 8)\n and sys.platform == \"win32\"\n and not isinstance(policy, asyncio.WindowsSelectorEventLoopPolicy)\n ):\n policy = asyncio.WindowsSelectorEventLoopPolicy()\n asyncio.set_event_loop_policy(policy)\n\n return policy\n\n\ndef install_reactor(reactor_path, event_loop_path=None):\n \"\"\"Installs the :mod:`~twisted.internet.reactor` with the specified\n import path. Also installs the asyncio event loop with the specified import\n path if the asyncio reactor is enabled\"\"\"\n reactor_class = load_object(reactor_path)\n if reactor_class is asyncioreactor.AsyncioSelectorReactor:\n with suppress(error.ReactorAlreadyInstalledError):\n event_loop = set_asyncio_event_loop(event_loop_path)\n asyncioreactor.install(eventloop=event_loop)\n else:\n *module, _ = reactor_path.split(\".\")\n installer_path = module + [\"install\"]\n installer = load_object(\".\".join(installer_path))\n with suppress(error.ReactorAlreadyInstalledError):\n installer()\n\n\ndef set_asyncio_event_loop(event_loop_path):\n \"\"\"Sets and returns the event loop with specified import path.\"\"\"\n policy = get_asyncio_event_loop_policy()\n if event_loop_path is not None:\n event_loop_class = load_object(event_loop_path)\n event_loop = event_loop_class()\n asyncio.set_event_loop(event_loop)\n else:\n try:\n event_loop = policy.get_event_loop()\n except RuntimeError:\n # `get_event_loop` is expected to fail when called from a new thread\n # with no asyncio event loop yet installed. Such is the case when\n # called from `scrapy shell`\n event_loop = policy.new_event_loop()\n asyncio.set_event_loop(event_loop)\n return event_loop\n\n\ndef verify_installed_reactor(reactor_path):\n \"\"\"Raises :exc:`Exception` if the installed\n :mod:`~twisted.internet.reactor` does not match the specified import\n path.\"\"\"\n from twisted.internet import reactor\n reactor_class = load_object(reactor_path)\n if not reactor.__class__ == reactor_class:\n msg = (\"The installed reactor \"\n f\"({reactor.__module__}.{reactor.__class__.__name__}) does not \"\n f\"match the requested one ({reactor_path})\")\n raise Exception(msg)\n\n\ndef verify_installed_asyncio_event_loop(loop_path):\n from twisted.internet import reactor\n loop_class = load_object(loop_path)\n if isinstance(reactor._asyncioEventloop, loop_class):\n return\n installed = (\n f\"{reactor._asyncioEventloop.__class__.__module__}\"\n f\".{reactor._asyncioEventloop.__class__.__qualname__}\"\n )\n specified = f\"{loop_class.__module__}.{loop_class.__qualname__}\"\n raise Exception(\n \"Scrapy found an asyncio Twisted reactor already \"\n f\"installed, and its event loop class ({installed}) does \"\n \"not match the one specified in the ASYNCIO_EVENT_LOOP \"\n f\"setting ({specified})\"\n )\n\n\ndef is_asyncio_reactor_installed():\n from twisted.internet import reactor\n return isinstance(reactor, asyncioreactor.AsyncioSelectorReactor)\n", "path": "scrapy/utils/reactor.py"}]}
3,877
629
gh_patches_debug_32100
rasdani/github-patches
git_diff
cocotb__cocotb-1420
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Simulator timestamps are broken in logs Looks like gh-1411 didn't work properly. I'll try and find the fix shortly, but if we want to do a release before I do we should revert it. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cocotb/log.py` Content: ``` 1 # Copyright (c) 2013, 2018 Potential Ventures Ltd 2 # Copyright (c) 2013 SolarFlare Communications Inc 3 # All rights reserved. 4 # 5 # Redistribution and use in source and binary forms, with or without 6 # modification, are permitted provided that the following conditions are met: 7 # * Redistributions of source code must retain the above copyright 8 # notice, this list of conditions and the following disclaimer. 9 # * Redistributions in binary form must reproduce the above copyright 10 # notice, this list of conditions and the following disclaimer in the 11 # documentation and/or other materials provided with the distribution. 12 # * Neither the name of Potential Ventures Ltd, 13 # SolarFlare Communications Inc nor the 14 # names of its contributors may be used to endorse or promote products 15 # derived from this software without specific prior written permission. 16 # 17 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 18 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 20 # DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY 21 # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22 # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 23 # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 26 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28 """ 29 Everything related to logging 30 """ 31 32 import os 33 import sys 34 import logging 35 import warnings 36 37 from cocotb.utils import ( 38 get_sim_time, get_time_from_sim_steps, want_color_output 39 ) 40 41 import cocotb.ANSI as ANSI 42 43 if "COCOTB_REDUCED_LOG_FMT" in os.environ: 44 _suppress = True 45 else: 46 _suppress = False 47 48 # Column alignment 49 _LEVEL_CHARS = len("CRITICAL") # noqa 50 _RECORD_CHARS = 35 # noqa 51 _FILENAME_CHARS = 20 # noqa 52 _LINENO_CHARS = 4 # noqa 53 _FUNCNAME_CHARS = 31 # noqa 54 55 56 def default_config(): 57 """ Apply the default cocotb log formatting to the root logger. 58 59 This hooks up the logger to write to stdout, using either 60 :class:`SimColourLogFormatter` or :class:`SimLogFormatter` depending 61 on whether colored output is requested. It also adds a 62 :class:`SimTimeContextFilter` filter so that 63 :attr:`~logging.LogRecord.created_sim_time` is available to the formatter. 64 65 The logging level for cocotb logs is set based on the 66 :envvar:`COCOTB_LOG_LEVEL` environment variable, which defaults to ``INFO``. 67 68 If desired, this logging configuration can be overwritten by calling 69 ``logging.basicConfig(..., force=True)`` (in Python 3.8 onwards), or by 70 manually resetting the root logger instance, for which examples can be 71 found online. 72 """ 73 # construct an appropriate handler 74 hdlr = logging.StreamHandler(sys.stdout) 75 if want_color_output(): 76 hdlr.setFormatter(SimColourLogFormatter()) 77 else: 78 hdlr.setFormatter(SimLogFormatter()) 79 80 filter = SimTimeContextFilter() 81 82 logging.setLoggerClass(SimBaseLog) # For backwards compatibility 83 logging.basicConfig() 84 logging.getLogger().handlers = [hdlr] # overwrite default handlers 85 logging.getLogger().filters = [filter] 86 87 # apply level settings for cocotb 88 log = logging.getLogger('cocotb') 89 level = os.getenv("COCOTB_LOG_LEVEL", "INFO") 90 try: 91 _default_log = getattr(logging, level) 92 except AttributeError: 93 log.error("Unable to set logging level to %r" % level) 94 _default_log = logging.INFO 95 log.setLevel(_default_log) 96 97 # Notify GPI of log level, which it uses as an optimization to avoid 98 # calling into Python. 99 if "COCOTB_SIM" in os.environ: 100 import simulator 101 simulator.log_level(_default_log) 102 103 104 class SimBaseLog(logging.getLoggerClass()): 105 """ This class only exists for backwards compatibility """ 106 107 @property 108 def logger(self): 109 warnings.warn( 110 "the .logger attribute should not be used now that `SimLog` " 111 "returns a native logger instance directly.", 112 DeprecationWarning, stacklevel=2) 113 return self 114 115 @property 116 def colour(self): 117 warnings.warn( 118 "the .colour attribute may be removed in future, use the " 119 "equivalent `cocotb.utils.want_color_output()` instead", 120 DeprecationWarning, stacklevel=2) 121 return want_color_output() 122 123 124 # this used to be a class, hence the unusual capitalization 125 def SimLog(name, ident=None): 126 """ Like logging.getLogger, but append a numeric identifier to the name """ 127 if ident is not None: 128 name = "%s.0x%x" % (name, ident) 129 return logging.getLogger(name) 130 131 132 class SimTimeContextFilter(logging.Filter): 133 """ 134 A filter to inject simulator times into the log records. 135 136 This uses the approach described in the :ref:`Python logging cookbook <python:filters-contextual>`. 137 138 This adds the :attr:`~logging.LogRecord.created_sim_time` attribute. 139 """ 140 141 # needed to make our docs render well 142 def __init__(self, *args, **kwargs): 143 """ See :class:`logging.Filter` for argument descriptions """ 144 super().__init__(*args, **kwargs) 145 146 def filter(self, record): 147 try: 148 record.created_sim_time = get_sim_time() 149 except RecursionError: 150 # get_sim_time may try to log - if that happens, we can't 151 # attach a simulator time to this message. 152 record.created_sim_time = None 153 return True 154 155 156 class SimLogFormatter(logging.Formatter): 157 """Log formatter to provide consistent log message handling. 158 159 This will only add simulator timestamps if the logger object has a 160 :class:`SimTimeContextFilter` filter attached, which cocotb ensures by 161 default. 162 """ 163 164 # Removes the arguments from the base class. Docstring needed to make 165 # sphinx happy. 166 def __init__(self): 167 """ Takes no arguments. """ 168 super().__init__() 169 170 # Justify and truncate 171 @staticmethod 172 def ljust(string, chars): 173 if len(string) > chars: 174 return ".." + string[(chars - 2) * -1:] 175 return string.ljust(chars) 176 177 @staticmethod 178 def rjust(string, chars): 179 if len(string) > chars: 180 return ".." + string[(chars - 2) * -1:] 181 return string.rjust(chars) 182 183 def _format(self, level, record, msg, coloured=False): 184 sim_time = getattr(record, 'created_sim_time', None) 185 if sim_time is None: 186 sim_time_str = " -.--ns" 187 else: 188 time_ns = get_time_from_sim_steps(sim_time, 'ns') 189 sim_time_str = "{:6.2f}ns".format(time_ns) 190 prefix = sim_time_str.rjust(11) + ' ' + level + ' ' 191 if not _suppress: 192 prefix += self.ljust(record.name, _RECORD_CHARS) + \ 193 self.rjust(os.path.split(record.filename)[1], _FILENAME_CHARS) + \ 194 ':' + self.ljust(str(record.lineno), _LINENO_CHARS) + \ 195 ' in ' + self.ljust(str(record.funcName), _FUNCNAME_CHARS) + ' ' 196 197 # these lines are copied from the builtin logger 198 if record.exc_info: 199 # Cache the traceback text to avoid converting it multiple times 200 # (it's constant anyway) 201 if not record.exc_text: 202 record.exc_text = self.formatException(record.exc_info) 203 if record.exc_text: 204 if msg[-1:] != "\n": 205 msg = msg + "\n" 206 msg = msg + record.exc_text 207 208 prefix_len = len(prefix) 209 if coloured: 210 prefix_len -= (len(level) - _LEVEL_CHARS) 211 pad = "\n" + " " * (prefix_len) 212 return prefix + pad.join(msg.split('\n')) 213 214 def format(self, record): 215 """Prettify the log output, annotate with simulation time""" 216 217 msg = record.getMessage() 218 level = record.levelname.ljust(_LEVEL_CHARS) 219 220 return self._format(level, record, msg) 221 222 223 class SimColourLogFormatter(SimLogFormatter): 224 """Log formatter to provide consistent log message handling.""" 225 226 loglevel2colour = { 227 logging.DEBUG : "%s", 228 logging.INFO : ANSI.COLOR_INFO + "%s" + ANSI.COLOR_DEFAULT, 229 logging.WARNING : ANSI.COLOR_WARNING + "%s" + ANSI.COLOR_DEFAULT, 230 logging.ERROR : ANSI.COLOR_ERROR + "%s" + ANSI.COLOR_DEFAULT, 231 logging.CRITICAL: ANSI.COLOR_CRITICAL + "%s" + ANSI.COLOR_DEFAULT, 232 } 233 234 def format(self, record): 235 """Prettify the log output, annotate with simulation time""" 236 237 msg = record.getMessage() 238 239 # Need to colour each line in case coloring is applied in the message 240 msg = '\n'.join([SimColourLogFormatter.loglevel2colour[record.levelno] % line for line in msg.split('\n')]) 241 level = (SimColourLogFormatter.loglevel2colour[record.levelno] % 242 record.levelname.ljust(_LEVEL_CHARS)) 243 244 return self._format(level, record, msg, coloured=True) 245 246 247 def _filter_from_c(logger_name, level): 248 return logging.getLogger(logger_name).isEnabledFor(level) 249 250 251 def _log_from_c(logger_name, level, filename, lineno, msg, function_name): 252 """ 253 This is for use from the C world, and allows us to insert C stack 254 information. 255 """ 256 logger = logging.getLogger(logger_name) 257 if logger.isEnabledFor(level): 258 record = logger.makeRecord( 259 logger.name, 260 level, 261 filename, 262 lineno, 263 msg, 264 None, 265 None, 266 function_name 267 ) 268 logger.handle(record) 269 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cocotb/log.py b/cocotb/log.py --- a/cocotb/log.py +++ b/cocotb/log.py @@ -72,17 +72,16 @@ """ # construct an appropriate handler hdlr = logging.StreamHandler(sys.stdout) + hdlr.addFilter(SimTimeContextFilter()) if want_color_output(): hdlr.setFormatter(SimColourLogFormatter()) else: hdlr.setFormatter(SimLogFormatter()) - filter = SimTimeContextFilter() logging.setLoggerClass(SimBaseLog) # For backwards compatibility logging.basicConfig() logging.getLogger().handlers = [hdlr] # overwrite default handlers - logging.getLogger().filters = [filter] # apply level settings for cocotb log = logging.getLogger('cocotb') @@ -139,9 +138,9 @@ """ # needed to make our docs render well - def __init__(self, *args, **kwargs): - """ See :class:`logging.Filter` for argument descriptions """ - super().__init__(*args, **kwargs) + def __init__(self): + """ Takes no arguments """ + super().__init__() def filter(self, record): try: @@ -156,9 +155,9 @@ class SimLogFormatter(logging.Formatter): """Log formatter to provide consistent log message handling. - This will only add simulator timestamps if the logger object has a - :class:`SimTimeContextFilter` filter attached, which cocotb ensures by - default. + This will only add simulator timestamps if the handler object this + formatter is attached to has a :class:`SimTimeContextFilter` filter + attached, which cocotb ensures by default. """ # Removes the arguments from the base class. Docstring needed to make
{"golden_diff": "diff --git a/cocotb/log.py b/cocotb/log.py\n--- a/cocotb/log.py\n+++ b/cocotb/log.py\n@@ -72,17 +72,16 @@\n \"\"\"\n # construct an appropriate handler\n hdlr = logging.StreamHandler(sys.stdout)\n+ hdlr.addFilter(SimTimeContextFilter())\n if want_color_output():\n hdlr.setFormatter(SimColourLogFormatter())\n else:\n hdlr.setFormatter(SimLogFormatter())\n \n- filter = SimTimeContextFilter()\n \n logging.setLoggerClass(SimBaseLog) # For backwards compatibility\n logging.basicConfig()\n logging.getLogger().handlers = [hdlr] # overwrite default handlers\n- logging.getLogger().filters = [filter]\n \n # apply level settings for cocotb\n log = logging.getLogger('cocotb')\n@@ -139,9 +138,9 @@\n \"\"\"\n \n # needed to make our docs render well\n- def __init__(self, *args, **kwargs):\n- \"\"\" See :class:`logging.Filter` for argument descriptions \"\"\"\n- super().__init__(*args, **kwargs)\n+ def __init__(self):\n+ \"\"\" Takes no arguments \"\"\"\n+ super().__init__()\n \n def filter(self, record):\n try:\n@@ -156,9 +155,9 @@\n class SimLogFormatter(logging.Formatter):\n \"\"\"Log formatter to provide consistent log message handling.\n \n- This will only add simulator timestamps if the logger object has a\n- :class:`SimTimeContextFilter` filter attached, which cocotb ensures by\n- default.\n+ This will only add simulator timestamps if the handler object this\n+ formatter is attached to has a :class:`SimTimeContextFilter` filter\n+ attached, which cocotb ensures by default.\n \"\"\"\n \n # Removes the arguments from the base class. Docstring needed to make\n", "issue": "Simulator timestamps are broken in logs\nLooks like gh-1411 didn't work properly. I'll try and find the fix shortly, but if we want to do a release before I do we should revert it.\n", "before_files": [{"content": "# Copyright (c) 2013, 2018 Potential Ventures Ltd\n# Copyright (c) 2013 SolarFlare Communications Inc\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd,\n# SolarFlare Communications Inc nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\nEverything related to logging\n\"\"\"\n\nimport os\nimport sys\nimport logging\nimport warnings\n\nfrom cocotb.utils import (\n get_sim_time, get_time_from_sim_steps, want_color_output\n)\n\nimport cocotb.ANSI as ANSI\n\nif \"COCOTB_REDUCED_LOG_FMT\" in os.environ:\n _suppress = True\nelse:\n _suppress = False\n\n# Column alignment\n_LEVEL_CHARS = len(\"CRITICAL\") # noqa\n_RECORD_CHARS = 35 # noqa\n_FILENAME_CHARS = 20 # noqa\n_LINENO_CHARS = 4 # noqa\n_FUNCNAME_CHARS = 31 # noqa\n\n\ndef default_config():\n \"\"\" Apply the default cocotb log formatting to the root logger.\n\n This hooks up the logger to write to stdout, using either\n :class:`SimColourLogFormatter` or :class:`SimLogFormatter` depending\n on whether colored output is requested. It also adds a\n :class:`SimTimeContextFilter` filter so that\n :attr:`~logging.LogRecord.created_sim_time` is available to the formatter.\n\n The logging level for cocotb logs is set based on the\n :envvar:`COCOTB_LOG_LEVEL` environment variable, which defaults to ``INFO``.\n\n If desired, this logging configuration can be overwritten by calling\n ``logging.basicConfig(..., force=True)`` (in Python 3.8 onwards), or by\n manually resetting the root logger instance, for which examples can be\n found online.\n \"\"\"\n # construct an appropriate handler\n hdlr = logging.StreamHandler(sys.stdout)\n if want_color_output():\n hdlr.setFormatter(SimColourLogFormatter())\n else:\n hdlr.setFormatter(SimLogFormatter())\n\n filter = SimTimeContextFilter()\n\n logging.setLoggerClass(SimBaseLog) # For backwards compatibility\n logging.basicConfig()\n logging.getLogger().handlers = [hdlr] # overwrite default handlers\n logging.getLogger().filters = [filter]\n\n # apply level settings for cocotb\n log = logging.getLogger('cocotb')\n level = os.getenv(\"COCOTB_LOG_LEVEL\", \"INFO\")\n try:\n _default_log = getattr(logging, level)\n except AttributeError:\n log.error(\"Unable to set logging level to %r\" % level)\n _default_log = logging.INFO\n log.setLevel(_default_log)\n\n # Notify GPI of log level, which it uses as an optimization to avoid\n # calling into Python.\n if \"COCOTB_SIM\" in os.environ:\n import simulator\n simulator.log_level(_default_log)\n\n\nclass SimBaseLog(logging.getLoggerClass()):\n \"\"\" This class only exists for backwards compatibility \"\"\"\n\n @property\n def logger(self):\n warnings.warn(\n \"the .logger attribute should not be used now that `SimLog` \"\n \"returns a native logger instance directly.\",\n DeprecationWarning, stacklevel=2)\n return self\n\n @property\n def colour(self):\n warnings.warn(\n \"the .colour attribute may be removed in future, use the \"\n \"equivalent `cocotb.utils.want_color_output()` instead\",\n DeprecationWarning, stacklevel=2)\n return want_color_output()\n\n\n# this used to be a class, hence the unusual capitalization\ndef SimLog(name, ident=None):\n \"\"\" Like logging.getLogger, but append a numeric identifier to the name \"\"\"\n if ident is not None:\n name = \"%s.0x%x\" % (name, ident)\n return logging.getLogger(name)\n\n\nclass SimTimeContextFilter(logging.Filter):\n \"\"\"\n A filter to inject simulator times into the log records.\n\n This uses the approach described in the :ref:`Python logging cookbook <python:filters-contextual>`.\n\n This adds the :attr:`~logging.LogRecord.created_sim_time` attribute.\n \"\"\"\n\n # needed to make our docs render well\n def __init__(self, *args, **kwargs):\n \"\"\" See :class:`logging.Filter` for argument descriptions \"\"\"\n super().__init__(*args, **kwargs)\n\n def filter(self, record):\n try:\n record.created_sim_time = get_sim_time()\n except RecursionError:\n # get_sim_time may try to log - if that happens, we can't\n # attach a simulator time to this message.\n record.created_sim_time = None\n return True\n\n\nclass SimLogFormatter(logging.Formatter):\n \"\"\"Log formatter to provide consistent log message handling.\n\n This will only add simulator timestamps if the logger object has a\n :class:`SimTimeContextFilter` filter attached, which cocotb ensures by\n default.\n \"\"\"\n\n # Removes the arguments from the base class. Docstring needed to make\n # sphinx happy.\n def __init__(self):\n \"\"\" Takes no arguments. \"\"\"\n super().__init__()\n\n # Justify and truncate\n @staticmethod\n def ljust(string, chars):\n if len(string) > chars:\n return \"..\" + string[(chars - 2) * -1:]\n return string.ljust(chars)\n\n @staticmethod\n def rjust(string, chars):\n if len(string) > chars:\n return \"..\" + string[(chars - 2) * -1:]\n return string.rjust(chars)\n\n def _format(self, level, record, msg, coloured=False):\n sim_time = getattr(record, 'created_sim_time', None)\n if sim_time is None:\n sim_time_str = \" -.--ns\"\n else:\n time_ns = get_time_from_sim_steps(sim_time, 'ns')\n sim_time_str = \"{:6.2f}ns\".format(time_ns)\n prefix = sim_time_str.rjust(11) + ' ' + level + ' '\n if not _suppress:\n prefix += self.ljust(record.name, _RECORD_CHARS) + \\\n self.rjust(os.path.split(record.filename)[1], _FILENAME_CHARS) + \\\n ':' + self.ljust(str(record.lineno), _LINENO_CHARS) + \\\n ' in ' + self.ljust(str(record.funcName), _FUNCNAME_CHARS) + ' '\n\n # these lines are copied from the builtin logger\n if record.exc_info:\n # Cache the traceback text to avoid converting it multiple times\n # (it's constant anyway)\n if not record.exc_text:\n record.exc_text = self.formatException(record.exc_info)\n if record.exc_text:\n if msg[-1:] != \"\\n\":\n msg = msg + \"\\n\"\n msg = msg + record.exc_text\n\n prefix_len = len(prefix)\n if coloured:\n prefix_len -= (len(level) - _LEVEL_CHARS)\n pad = \"\\n\" + \" \" * (prefix_len)\n return prefix + pad.join(msg.split('\\n'))\n\n def format(self, record):\n \"\"\"Prettify the log output, annotate with simulation time\"\"\"\n\n msg = record.getMessage()\n level = record.levelname.ljust(_LEVEL_CHARS)\n\n return self._format(level, record, msg)\n\n\nclass SimColourLogFormatter(SimLogFormatter):\n \"\"\"Log formatter to provide consistent log message handling.\"\"\"\n\n loglevel2colour = {\n logging.DEBUG : \"%s\",\n logging.INFO : ANSI.COLOR_INFO + \"%s\" + ANSI.COLOR_DEFAULT,\n logging.WARNING : ANSI.COLOR_WARNING + \"%s\" + ANSI.COLOR_DEFAULT,\n logging.ERROR : ANSI.COLOR_ERROR + \"%s\" + ANSI.COLOR_DEFAULT,\n logging.CRITICAL: ANSI.COLOR_CRITICAL + \"%s\" + ANSI.COLOR_DEFAULT,\n }\n\n def format(self, record):\n \"\"\"Prettify the log output, annotate with simulation time\"\"\"\n\n msg = record.getMessage()\n\n # Need to colour each line in case coloring is applied in the message\n msg = '\\n'.join([SimColourLogFormatter.loglevel2colour[record.levelno] % line for line in msg.split('\\n')])\n level = (SimColourLogFormatter.loglevel2colour[record.levelno] %\n record.levelname.ljust(_LEVEL_CHARS))\n\n return self._format(level, record, msg, coloured=True)\n\n\ndef _filter_from_c(logger_name, level):\n return logging.getLogger(logger_name).isEnabledFor(level)\n\n\ndef _log_from_c(logger_name, level, filename, lineno, msg, function_name):\n \"\"\"\n This is for use from the C world, and allows us to insert C stack\n information.\n \"\"\"\n logger = logging.getLogger(logger_name)\n if logger.isEnabledFor(level):\n record = logger.makeRecord(\n logger.name,\n level,\n filename,\n lineno,\n msg,\n None,\n None,\n function_name\n )\n logger.handle(record)\n", "path": "cocotb/log.py"}], "after_files": [{"content": "# Copyright (c) 2013, 2018 Potential Ventures Ltd\n# Copyright (c) 2013 SolarFlare Communications Inc\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd,\n# SolarFlare Communications Inc nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\"\"\"\nEverything related to logging\n\"\"\"\n\nimport os\nimport sys\nimport logging\nimport warnings\n\nfrom cocotb.utils import (\n get_sim_time, get_time_from_sim_steps, want_color_output\n)\n\nimport cocotb.ANSI as ANSI\n\nif \"COCOTB_REDUCED_LOG_FMT\" in os.environ:\n _suppress = True\nelse:\n _suppress = False\n\n# Column alignment\n_LEVEL_CHARS = len(\"CRITICAL\") # noqa\n_RECORD_CHARS = 35 # noqa\n_FILENAME_CHARS = 20 # noqa\n_LINENO_CHARS = 4 # noqa\n_FUNCNAME_CHARS = 31 # noqa\n\n\ndef default_config():\n \"\"\" Apply the default cocotb log formatting to the root logger.\n\n This hooks up the logger to write to stdout, using either\n :class:`SimColourLogFormatter` or :class:`SimLogFormatter` depending\n on whether colored output is requested. It also adds a\n :class:`SimTimeContextFilter` filter so that\n :attr:`~logging.LogRecord.created_sim_time` is available to the formatter.\n\n The logging level for cocotb logs is set based on the\n :envvar:`COCOTB_LOG_LEVEL` environment variable, which defaults to ``INFO``.\n\n If desired, this logging configuration can be overwritten by calling\n ``logging.basicConfig(..., force=True)`` (in Python 3.8 onwards), or by\n manually resetting the root logger instance, for which examples can be\n found online.\n \"\"\"\n # construct an appropriate handler\n hdlr = logging.StreamHandler(sys.stdout)\n hdlr.addFilter(SimTimeContextFilter())\n if want_color_output():\n hdlr.setFormatter(SimColourLogFormatter())\n else:\n hdlr.setFormatter(SimLogFormatter())\n\n\n logging.setLoggerClass(SimBaseLog) # For backwards compatibility\n logging.basicConfig()\n logging.getLogger().handlers = [hdlr] # overwrite default handlers\n\n # apply level settings for cocotb\n log = logging.getLogger('cocotb')\n level = os.getenv(\"COCOTB_LOG_LEVEL\", \"INFO\")\n try:\n _default_log = getattr(logging, level)\n except AttributeError:\n log.error(\"Unable to set logging level to %r\" % level)\n _default_log = logging.INFO\n log.setLevel(_default_log)\n\n # Notify GPI of log level, which it uses as an optimization to avoid\n # calling into Python.\n if \"COCOTB_SIM\" in os.environ:\n import simulator\n simulator.log_level(_default_log)\n\n\nclass SimBaseLog(logging.getLoggerClass()):\n \"\"\" This class only exists for backwards compatibility \"\"\"\n\n @property\n def logger(self):\n warnings.warn(\n \"the .logger attribute should not be used now that `SimLog` \"\n \"returns a native logger instance directly.\",\n DeprecationWarning, stacklevel=2)\n return self\n\n @property\n def colour(self):\n warnings.warn(\n \"the .colour attribute may be removed in future, use the \"\n \"equivalent `cocotb.utils.want_color_output()` instead\",\n DeprecationWarning, stacklevel=2)\n return want_color_output()\n\n\n# this used to be a class, hence the unusual capitalization\ndef SimLog(name, ident=None):\n \"\"\" Like logging.getLogger, but append a numeric identifier to the name \"\"\"\n if ident is not None:\n name = \"%s.0x%x\" % (name, ident)\n return logging.getLogger(name)\n\n\nclass SimTimeContextFilter(logging.Filter):\n \"\"\"\n A filter to inject simulator times into the log records.\n\n This uses the approach described in the :ref:`Python logging cookbook <python:filters-contextual>`.\n\n This adds the :attr:`~logging.LogRecord.created_sim_time` attribute.\n \"\"\"\n\n # needed to make our docs render well\n def __init__(self):\n \"\"\" Takes no arguments \"\"\"\n super().__init__()\n\n def filter(self, record):\n try:\n record.created_sim_time = get_sim_time()\n except RecursionError:\n # get_sim_time may try to log - if that happens, we can't\n # attach a simulator time to this message.\n record.created_sim_time = None\n return True\n\n\nclass SimLogFormatter(logging.Formatter):\n \"\"\"Log formatter to provide consistent log message handling.\n\n This will only add simulator timestamps if the handler object this\n formatter is attached to has a :class:`SimTimeContextFilter` filter\n attached, which cocotb ensures by default.\n \"\"\"\n\n # Removes the arguments from the base class. Docstring needed to make\n # sphinx happy.\n def __init__(self):\n \"\"\" Takes no arguments. \"\"\"\n super().__init__()\n\n # Justify and truncate\n @staticmethod\n def ljust(string, chars):\n if len(string) > chars:\n return \"..\" + string[(chars - 2) * -1:]\n return string.ljust(chars)\n\n @staticmethod\n def rjust(string, chars):\n if len(string) > chars:\n return \"..\" + string[(chars - 2) * -1:]\n return string.rjust(chars)\n\n def _format(self, level, record, msg, coloured=False):\n sim_time = getattr(record, 'created_sim_time', None)\n if sim_time is None:\n sim_time_str = \" -.--ns\"\n else:\n time_ns = get_time_from_sim_steps(sim_time, 'ns')\n sim_time_str = \"{:6.2f}ns\".format(time_ns)\n prefix = sim_time_str.rjust(11) + ' ' + level + ' '\n if not _suppress:\n prefix += self.ljust(record.name, _RECORD_CHARS) + \\\n self.rjust(os.path.split(record.filename)[1], _FILENAME_CHARS) + \\\n ':' + self.ljust(str(record.lineno), _LINENO_CHARS) + \\\n ' in ' + self.ljust(str(record.funcName), _FUNCNAME_CHARS) + ' '\n\n # these lines are copied from the builtin logger\n if record.exc_info:\n # Cache the traceback text to avoid converting it multiple times\n # (it's constant anyway)\n if not record.exc_text:\n record.exc_text = self.formatException(record.exc_info)\n if record.exc_text:\n if msg[-1:] != \"\\n\":\n msg = msg + \"\\n\"\n msg = msg + record.exc_text\n\n prefix_len = len(prefix)\n if coloured:\n prefix_len -= (len(level) - _LEVEL_CHARS)\n pad = \"\\n\" + \" \" * (prefix_len)\n return prefix + pad.join(msg.split('\\n'))\n\n def format(self, record):\n \"\"\"Prettify the log output, annotate with simulation time\"\"\"\n\n msg = record.getMessage()\n level = record.levelname.ljust(_LEVEL_CHARS)\n\n return self._format(level, record, msg)\n\n\nclass SimColourLogFormatter(SimLogFormatter):\n \"\"\"Log formatter to provide consistent log message handling.\"\"\"\n\n loglevel2colour = {\n logging.DEBUG : \"%s\",\n logging.INFO : ANSI.COLOR_INFO + \"%s\" + ANSI.COLOR_DEFAULT,\n logging.WARNING : ANSI.COLOR_WARNING + \"%s\" + ANSI.COLOR_DEFAULT,\n logging.ERROR : ANSI.COLOR_ERROR + \"%s\" + ANSI.COLOR_DEFAULT,\n logging.CRITICAL: ANSI.COLOR_CRITICAL + \"%s\" + ANSI.COLOR_DEFAULT,\n }\n\n def format(self, record):\n \"\"\"Prettify the log output, annotate with simulation time\"\"\"\n\n msg = record.getMessage()\n\n # Need to colour each line in case coloring is applied in the message\n msg = '\\n'.join([SimColourLogFormatter.loglevel2colour[record.levelno] % line for line in msg.split('\\n')])\n level = (SimColourLogFormatter.loglevel2colour[record.levelno] %\n record.levelname.ljust(_LEVEL_CHARS))\n\n return self._format(level, record, msg, coloured=True)\n\n\ndef _filter_from_c(logger_name, level):\n return logging.getLogger(logger_name).isEnabledFor(level)\n\n\ndef _log_from_c(logger_name, level, filename, lineno, msg, function_name):\n \"\"\"\n This is for use from the C world, and allows us to insert C stack\n information.\n \"\"\"\n logger = logging.getLogger(logger_name)\n if logger.isEnabledFor(level):\n record = logger.makeRecord(\n logger.name,\n level,\n filename,\n lineno,\n msg,\n None,\n None,\n function_name\n )\n logger.handle(record)\n", "path": "cocotb/log.py"}]}
3,248
426
gh_patches_debug_3202
rasdani/github-patches
git_diff
hylang__hy-2190
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add `project_urls` to `setup.py` This would allow us to provide links to our GitHub repository etc. in a sidebar on PyPI. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 3 from setuptools import find_packages, setup 4 import fastentrypoints # Monkey-patches setuptools. 5 6 from get_version import __version__ 7 8 os.chdir(os.path.split(os.path.abspath(__file__))[0]) 9 10 PKG = "hy" 11 12 long_description = """Hy is a Python <--> Lisp layer. It helps 13 make things work nicer, and lets Python and the Hy lisp variant play 14 nice together. """ 15 16 setup( 17 name=PKG, 18 version=__version__, 19 install_requires=[ 20 'rply>=0.7.7', 21 'funcparserlib>=1.0.0a0', 22 'colorama', 23 'astor>=0.8 ; python_version < "3.9"', 24 ], 25 python_requires = '>= 3.7, <= 3.10', 26 entry_points={ 27 'console_scripts': [ 28 'hy = hy.cmdline:hy_main', 29 'hy3 = hy.cmdline:hy_main', 30 'hyc = hy.cmdline:hyc_main', 31 'hyc3 = hy.cmdline:hyc_main', 32 'hy2py = hy.cmdline:hy2py_main', 33 'hy2py3 = hy.cmdline:hy2py_main', 34 ] 35 }, 36 packages=find_packages(exclude=['tests*']), 37 package_data={ 38 'hy': ['*.hy', '__pycache__/*'], 39 'hy.contrib': ['*.hy', '__pycache__/*'], 40 'hy.core': ['*.hy', '__pycache__/*'], 41 'hy.extra': ['*.hy', '__pycache__/*'], 42 }, 43 data_files=[ 44 ('get_version', ['get_version.py']) 45 ], 46 author="Paul Tagliamonte", 47 author_email="[email protected]", 48 long_description=long_description, 49 description='Lisp and Python love each other.', 50 license="Expat", 51 url="http://hylang.org/", 52 platforms=['any'], 53 classifiers=[ 54 "Development Status :: 4 - Beta", 55 "Intended Audience :: Developers", 56 "License :: DFSG approved", 57 "License :: OSI Approved :: MIT License", # Really "Expat". Ugh. 58 "Operating System :: OS Independent", 59 "Programming Language :: Lisp", 60 "Programming Language :: Python", 61 "Programming Language :: Python :: 3", 62 "Programming Language :: Python :: 3.7", 63 "Programming Language :: Python :: 3.8", 64 "Programming Language :: Python :: 3.9", 65 "Programming Language :: Python :: 3.10", 66 "Topic :: Software Development :: Code Generators", 67 "Topic :: Software Development :: Compilers", 68 "Topic :: Software Development :: Libraries", 69 ] 70 ) 71 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -66,5 +66,9 @@ "Topic :: Software Development :: Code Generators", "Topic :: Software Development :: Compilers", "Topic :: Software Development :: Libraries", - ] + ], + project_urls={ + "Documentation": "https://docs.hylang.org/", + "Source": "https://github.com/hylang/hy", + } )
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -66,5 +66,9 @@\n \"Topic :: Software Development :: Code Generators\",\n \"Topic :: Software Development :: Compilers\",\n \"Topic :: Software Development :: Libraries\",\n- ]\n+ ],\n+ project_urls={\n+ \"Documentation\": \"https://docs.hylang.org/\",\n+ \"Source\": \"https://github.com/hylang/hy\",\n+ }\n )\n", "issue": "Add `project_urls` to `setup.py`\nThis would allow us to provide links to our GitHub repository etc. in a sidebar on PyPI.\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import find_packages, setup\nimport fastentrypoints # Monkey-patches setuptools.\n\nfrom get_version import __version__\n\nos.chdir(os.path.split(os.path.abspath(__file__))[0])\n\nPKG = \"hy\"\n\nlong_description = \"\"\"Hy is a Python <--> Lisp layer. It helps\nmake things work nicer, and lets Python and the Hy lisp variant play\nnice together. \"\"\"\n\nsetup(\n name=PKG,\n version=__version__,\n install_requires=[\n 'rply>=0.7.7',\n 'funcparserlib>=1.0.0a0',\n 'colorama',\n 'astor>=0.8 ; python_version < \"3.9\"',\n ],\n python_requires = '>= 3.7, <= 3.10',\n entry_points={\n 'console_scripts': [\n 'hy = hy.cmdline:hy_main',\n 'hy3 = hy.cmdline:hy_main',\n 'hyc = hy.cmdline:hyc_main',\n 'hyc3 = hy.cmdline:hyc_main',\n 'hy2py = hy.cmdline:hy2py_main',\n 'hy2py3 = hy.cmdline:hy2py_main',\n ]\n },\n packages=find_packages(exclude=['tests*']),\n package_data={\n 'hy': ['*.hy', '__pycache__/*'],\n 'hy.contrib': ['*.hy', '__pycache__/*'],\n 'hy.core': ['*.hy', '__pycache__/*'],\n 'hy.extra': ['*.hy', '__pycache__/*'],\n },\n data_files=[\n ('get_version', ['get_version.py'])\n ],\n author=\"Paul Tagliamonte\",\n author_email=\"[email protected]\",\n long_description=long_description,\n description='Lisp and Python love each other.',\n license=\"Expat\",\n url=\"http://hylang.org/\",\n platforms=['any'],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: DFSG approved\",\n \"License :: OSI Approved :: MIT License\", # Really \"Expat\". Ugh.\n \"Operating System :: OS Independent\",\n \"Programming Language :: Lisp\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Software Development :: Code Generators\",\n \"Topic :: Software Development :: Compilers\",\n \"Topic :: Software Development :: Libraries\",\n ]\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import find_packages, setup\nimport fastentrypoints # Monkey-patches setuptools.\n\nfrom get_version import __version__\n\nos.chdir(os.path.split(os.path.abspath(__file__))[0])\n\nPKG = \"hy\"\n\nlong_description = \"\"\"Hy is a Python <--> Lisp layer. It helps\nmake things work nicer, and lets Python and the Hy lisp variant play\nnice together. \"\"\"\n\nsetup(\n name=PKG,\n version=__version__,\n install_requires=[\n 'rply>=0.7.7',\n 'funcparserlib>=1.0.0a0',\n 'colorama',\n 'astor>=0.8 ; python_version < \"3.9\"',\n ],\n python_requires = '>= 3.7, <= 3.10',\n entry_points={\n 'console_scripts': [\n 'hy = hy.cmdline:hy_main',\n 'hy3 = hy.cmdline:hy_main',\n 'hyc = hy.cmdline:hyc_main',\n 'hyc3 = hy.cmdline:hyc_main',\n 'hy2py = hy.cmdline:hy2py_main',\n 'hy2py3 = hy.cmdline:hy2py_main',\n ]\n },\n packages=find_packages(exclude=['tests*']),\n package_data={\n 'hy': ['*.hy', '__pycache__/*'],\n 'hy.contrib': ['*.hy', '__pycache__/*'],\n 'hy.core': ['*.hy', '__pycache__/*'],\n 'hy.extra': ['*.hy', '__pycache__/*'],\n },\n data_files=[\n ('get_version', ['get_version.py'])\n ],\n author=\"Paul Tagliamonte\",\n author_email=\"[email protected]\",\n long_description=long_description,\n description='Lisp and Python love each other.',\n license=\"Expat\",\n url=\"http://hylang.org/\",\n platforms=['any'],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: DFSG approved\",\n \"License :: OSI Approved :: MIT License\", # Really \"Expat\". Ugh.\n \"Operating System :: OS Independent\",\n \"Programming Language :: Lisp\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Software Development :: Code Generators\",\n \"Topic :: Software Development :: Compilers\",\n \"Topic :: Software Development :: Libraries\",\n ],\n project_urls={\n \"Documentation\": \"https://docs.hylang.org/\",\n \"Source\": \"https://github.com/hylang/hy\",\n }\n)\n", "path": "setup.py"}]}
1,002
107
gh_patches_debug_22425
rasdani/github-patches
git_diff
liqd__a4-meinberlin-68
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- HTML editor in create-text process not fully working The HTML editor in create-text-process not working. I currently can't add links to other website or upload pictures ;) <img width="803" alt="bildschirmfoto 2017-02-13 um 17 24 40" src="https://cloud.githubusercontent.com/assets/11075214/22891998/5bc735ac-f211-11e6-8766-a7588873b436.png"> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `meinberlin/urls.py` Content: ``` 1 """meinberlin URL Configuration.""" 2 3 from allauth import urls as allauth_urls 4 from ckeditor_uploader import views as ck_views 5 from django.conf.urls import include 6 from django.conf.urls import url 7 from django.contrib import admin 8 from django.contrib.auth.decorators import login_required 9 from django.views.decorators.cache import never_cache 10 from django.views.i18n import javascript_catalog 11 from rest_framework import routers 12 from wagtail.wagtailadmin import urls as wagtailadmin_urls 13 from wagtail.wagtailcore import urls as wagtail_urls 14 from wagtail.wagtaildocs import urls as wagtaildocs_urls 15 16 from adhocracy4.comments.api import CommentViewSet 17 from adhocracy4.projects import urls as projects_urls 18 from adhocracy4.ratings.api import RatingViewSet 19 20 from apps.dashboard import urls as dashboard_urls 21 from apps.documents import urls as paragraph_urls 22 from apps.documents.api import DocumentViewSet 23 from apps.ideas import urls as ideas_urls 24 25 js_info_dict = { 26 'packages': ('adhocracy4.comments',), 27 } 28 29 router = routers.DefaultRouter() 30 router.register(r'ratings', RatingViewSet, base_name='ratings') 31 router.register(r'comments', CommentViewSet, base_name='comments') 32 router.register(r'documents', DocumentViewSet, base_name='documents') 33 34 35 urlpatterns = [ 36 url(r'^django-admin/', include(admin.site.urls)), 37 url(r'^dashboard/', include(dashboard_urls)), 38 39 url(r'^admin/', include(wagtailadmin_urls)), 40 url(r'^accounts/', include(allauth_urls)), 41 url(r'^documents/', include(wagtaildocs_urls)), 42 url(r'^projects/', include(projects_urls)), 43 44 url(r'^ideas/', include(ideas_urls)), 45 url(r'^paragraphs/', include(paragraph_urls)), 46 47 url(r'^api/', include(router.urls)), 48 49 url(r'^upload/', 50 login_required(ck_views.upload), name='ckeditor_upload'), 51 url(r'^browse/', 52 never_cache(login_required(ck_views.browse)), name='ckeditor_browse'), 53 54 url(r'^jsi18n/$', javascript_catalog, 55 js_info_dict, name='javascript-catalog'), 56 url(r'', include(wagtail_urls)), 57 ] 58 ``` Path: `meinberlin/settings/base.py` Content: ``` 1 """ 2 Django settings for meinberlin project. 3 4 Generated by 'django-admin startproject' using Django 1.8.17. 5 6 For more information on this file, see 7 https://docs.djangoproject.com/en/1.8/topics/settings/ 8 9 For the full list of settings and their values, see 10 https://docs.djangoproject.com/en/1.8/ref/settings/ 11 """ 12 13 # Build paths inside the project like this: os.path.join(BASE_DIR, ...) 14 import os 15 16 PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 17 BASE_DIR = os.path.dirname(PROJECT_DIR) 18 19 # Application definition 20 21 INSTALLED_APPS = ( 22 'django.contrib.sites', 23 'django.contrib.admin', 24 'django.contrib.auth', 25 'django.contrib.contenttypes', 26 'django.contrib.sessions', 27 'django.contrib.messages', 28 'django.contrib.staticfiles', 29 30 'wagtail.wagtailforms', 31 'wagtail.wagtailredirects', 32 'wagtail.wagtailembeds', 33 'wagtail.wagtailsites', 34 'wagtail.wagtailusers', 35 'wagtail.wagtailsnippets', 36 'wagtail.wagtaildocs', 37 'wagtail.wagtailimages', 38 'wagtail.wagtailsearch', 39 'wagtail.wagtailadmin', 40 'wagtail.wagtailcore', 41 'wagtail.contrib.wagtailstyleguide', 42 43 'taggit', # wagtail dependency 44 'rest_framework', 45 'allauth', 46 'allauth.account', 47 'allauth.socialaccount', 48 'rules.apps.AutodiscoverRulesConfig', 49 'easy_thumbnails', 50 'ckeditor', 51 'ckeditor_uploader', 52 53 'adhocracy4.contrib.apps.ContribConfig', 54 'adhocracy4.organisations.apps.OrganisationsConfig', 55 'adhocracy4.projects.apps.ProjectsConfig', 56 'adhocracy4.phases.apps.PhasesConfig', 57 'adhocracy4.modules.apps.ModulesConfig', 58 'adhocracy4.ratings.apps.RatingsConfig', 59 'adhocracy4.comments.apps.CommentsConfig', 60 61 'apps.contrib.apps.Config', 62 'apps.cms.apps.Config', 63 'apps.users.apps.Config', 64 'apps.projects.apps.Config', 65 'apps.organisations.apps.Config', 66 'apps.dashboard.apps.Config', 67 68 'apps.ideas.apps.Config', 69 'apps.documents.apps.Config', 70 ) 71 72 MIDDLEWARE_CLASSES = ( 73 'django.contrib.sessions.middleware.SessionMiddleware', 74 'django.middleware.common.CommonMiddleware', 75 'django.middleware.csrf.CsrfViewMiddleware', 76 'django.contrib.auth.middleware.AuthenticationMiddleware', 77 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 78 'django.contrib.messages.middleware.MessageMiddleware', 79 'django.middleware.clickjacking.XFrameOptionsMiddleware', 80 'django.middleware.security.SecurityMiddleware', 81 'django.middleware.locale.LocaleMiddleware', 82 83 'wagtail.wagtailcore.middleware.SiteMiddleware', 84 'wagtail.wagtailredirects.middleware.RedirectMiddleware', 85 ) 86 87 SITE_ID = 1 88 89 ROOT_URLCONF = 'meinberlin.urls' 90 91 LOCALE_PATHS = [os.path.join(PROJECT_DIR, 'locale')] 92 93 TEMPLATES = [ 94 { 95 'BACKEND': 'django.template.backends.django.DjangoTemplates', 96 'DIRS': [ 97 os.path.join(PROJECT_DIR, 'templates'), 98 ], 99 'APP_DIRS': True, 100 'OPTIONS': { 101 'context_processors': [ 102 'django.template.context_processors.debug', 103 'django.template.context_processors.request', 104 'django.contrib.auth.context_processors.auth', 105 'django.contrib.messages.context_processors.messages', 106 ], 107 }, 108 }, 109 ] 110 111 WSGI_APPLICATION = 'meinberlin.wsgi.application' 112 113 114 # Database 115 # https://docs.djangoproject.com/en/1.8/ref/settings/#databases 116 117 DATABASES = { 118 'default': { 119 'ENGINE': 'django.db.backends.sqlite3', 120 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), 121 } 122 } 123 124 125 # Internationalization 126 # https://docs.djangoproject.com/en/1.8/topics/i18n/ 127 128 LANGUAGE_CODE = 'en-us' 129 130 TIME_ZONE = 'UTC' 131 132 USE_I18N = True 133 134 USE_L10N = True 135 136 USE_TZ = True 137 138 139 # Static files (CSS, JavaScript, Images) 140 # https://docs.djangoproject.com/en/1.8/howto/static-files/ 141 142 STATICFILES_DIRS = [ 143 os.path.join(PROJECT_DIR, 'static'), 144 ] 145 146 STATIC_ROOT = os.path.join(BASE_DIR, 'static') 147 STATIC_URL = '/static/' 148 149 IMAGE_ALIASES = { 150 '*': { 151 'max_size': 5*10**6, 152 'fileformats': ('image/png', 'image/jpeg', 'image/gif') 153 }, 154 'heroimage': {'min_resolution': (1300, 600)}, 155 'logo': {'min_resolution': (200, 200), 'aspect_ratio': (1, 1)}, 156 'avatar': {'min_resolution': (200, 200)}, 157 'idea_image': {'min_resolution': (800, 200)}, 158 } 159 160 ALLOWED_UPLOAD_IMAGES = ('png', 'jpeg', 'gif') 161 162 163 # Wagtail settings 164 165 WAGTAIL_SITE_NAME = 'meinberlin' 166 167 # Base URL to use when referring to full URLs within the Wagtail admin backend - 168 # e.g. in notification emails. Don't include '/admin' or a trailing slash 169 BASE_URL = 'http://localhost:8000' 170 171 # Authentication 172 173 AUTH_USER_MODEL = 'meinberlin_users.User' 174 175 AUTHENTICATION_BACKENDS = ( 176 'rules.permissions.ObjectPermissionBackend', 177 'django.contrib.auth.backends.ModelBackend', 178 'allauth.account.auth_backends.AuthenticationBackend', 179 ) 180 181 ACCOUNT_ADAPTER = 'apps.users.adapters.AccountAdapter' 182 ACCOUNT_AUTHENTICATION_METHOD = 'email' 183 ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 3 184 ACCOUNT_EMAIL_REQUIRED = True 185 ACCOUNT_EMAIL_VERIFICATION = 'mandatory' 186 ACCOUNT_USERNAME_REQUIRED = True 187 ACCOUNT_LOGIN_ATTEMPTS_LIMIT = 10 188 ACCOUNT_LOGIN_ATTEMPTS_TIMEOUT = 300 # seconds 189 ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True 190 ACCOUNT_LOGIN_ON_PASSWORD_RESET = True 191 192 LOGIN_REDIRECT_URL = '/' 193 194 EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' 195 196 197 # ckeditor 198 199 CKEDITOR_UPLOAD_PATH = "uploads/" 200 CKEDITOR_RESTRICT_BY_USER = True 201 CKEDITOR_ALLOW_NONIMAGE_FILES = False 202 203 CKEDITOR_CONFIGS = { 204 'default': { 205 'width': '100%', 206 'toolbar': 'Custom', 207 'toolbar_Custom': [ 208 ['Bold', 'Italic', 'Underline'], 209 ['NumberedList', 'BulletedList'], 210 ['Link', 'Unlink'] 211 ] 212 }, 213 'image-editor': { 214 'width': '100%', 215 'toolbar': 'Custom', 216 'toolbar_Custom': [ 217 ['Bold', 'Italic', 'Underline'], 218 ['Image'], 219 ['NumberedList', 'BulletedList'], 220 ['Link', 'Unlink'] 221 ] 222 } 223 } 224 225 BLEACH_LIST = { 226 'default' : { 227 'tags': ['p','strong','em','u','ol','li','ul','a'], 228 'attributes': { 229 'a': ['href', 'rel'], 230 }, 231 }, 232 'image-editor': { 233 'tags': ['p','strong','em','u','ol','li','ul','a','img'], 234 'attributes': { 235 'a': ['href', 'rel'], 236 'img': ['src', 'alt', 'style'] 237 }, 238 'styles': [ 239 'float', 240 'margin', 241 'padding', 242 'width', 243 'height', 244 'margin-bottom', 245 'margin-top', 246 'margin-left', 247 'margin-right', 248 ], 249 } 250 } 251 252 253 # adhocracy4 254 255 A4_ORGANISATIONS_MODEL = 'meinberlin_organisations.Organisation' 256 257 A4_RATEABLES = ( 258 ('meinberlin_ideas', 'idea'), 259 ) 260 261 A4_COMMENTABLES = ( 262 ('meinberlin_ideas', 'idea'), 263 ) 264 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/meinberlin/settings/base.py b/meinberlin/settings/base.py --- a/meinberlin/settings/base.py +++ b/meinberlin/settings/base.py @@ -146,6 +146,9 @@ STATIC_ROOT = os.path.join(BASE_DIR, 'static') STATIC_URL = '/static/' +MEDIA_ROOT = os.path.join(BASE_DIR, 'media') +MEDIA_URL = '/media/' + IMAGE_ALIASES = { '*': { 'max_size': 5*10**6, diff --git a/meinberlin/urls.py b/meinberlin/urls.py --- a/meinberlin/urls.py +++ b/meinberlin/urls.py @@ -2,6 +2,7 @@ from allauth import urls as allauth_urls from ckeditor_uploader import views as ck_views +from django.conf import settings from django.conf.urls import include from django.conf.urls import url from django.contrib import admin @@ -55,3 +56,12 @@ js_info_dict, name='javascript-catalog'), url(r'', include(wagtail_urls)), ] + +if settings.DEBUG: + from django.conf.urls.static import static + from django.contrib.staticfiles.urls import staticfiles_urlpatterns + + # Serve static and media locally + urlpatterns += staticfiles_urlpatterns() + urlpatterns += static(settings.MEDIA_URL, + document_root=settings.MEDIA_ROOT)
{"golden_diff": "diff --git a/meinberlin/settings/base.py b/meinberlin/settings/base.py\n--- a/meinberlin/settings/base.py\n+++ b/meinberlin/settings/base.py\n@@ -146,6 +146,9 @@\n STATIC_ROOT = os.path.join(BASE_DIR, 'static')\n STATIC_URL = '/static/'\n \n+MEDIA_ROOT = os.path.join(BASE_DIR, 'media')\n+MEDIA_URL = '/media/'\n+\n IMAGE_ALIASES = {\n '*': {\n 'max_size': 5*10**6,\ndiff --git a/meinberlin/urls.py b/meinberlin/urls.py\n--- a/meinberlin/urls.py\n+++ b/meinberlin/urls.py\n@@ -2,6 +2,7 @@\n \n from allauth import urls as allauth_urls\n from ckeditor_uploader import views as ck_views\n+from django.conf import settings\n from django.conf.urls import include\n from django.conf.urls import url\n from django.contrib import admin\n@@ -55,3 +56,12 @@\n js_info_dict, name='javascript-catalog'),\n url(r'', include(wagtail_urls)),\n ]\n+\n+if settings.DEBUG:\n+ from django.conf.urls.static import static\n+ from django.contrib.staticfiles.urls import staticfiles_urlpatterns\n+\n+ # Serve static and media locally\n+ urlpatterns += staticfiles_urlpatterns()\n+ urlpatterns += static(settings.MEDIA_URL,\n+ document_root=settings.MEDIA_ROOT)\n", "issue": "HTML editor in create-text process not fully working\nThe HTML editor in create-text-process not working. I currently can't add links to other website or upload pictures ;)\r\n\r\n<img width=\"803\" alt=\"bildschirmfoto 2017-02-13 um 17 24 40\" src=\"https://cloud.githubusercontent.com/assets/11075214/22891998/5bc735ac-f211-11e6-8766-a7588873b436.png\">\r\n\n", "before_files": [{"content": "\"\"\"meinberlin URL Configuration.\"\"\"\n\nfrom allauth import urls as allauth_urls\nfrom ckeditor_uploader import views as ck_views\nfrom django.conf.urls import include\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.cache import never_cache\nfrom django.views.i18n import javascript_catalog\nfrom rest_framework import routers\nfrom wagtail.wagtailadmin import urls as wagtailadmin_urls\nfrom wagtail.wagtailcore import urls as wagtail_urls\nfrom wagtail.wagtaildocs import urls as wagtaildocs_urls\n\nfrom adhocracy4.comments.api import CommentViewSet\nfrom adhocracy4.projects import urls as projects_urls\nfrom adhocracy4.ratings.api import RatingViewSet\n\nfrom apps.dashboard import urls as dashboard_urls\nfrom apps.documents import urls as paragraph_urls\nfrom apps.documents.api import DocumentViewSet\nfrom apps.ideas import urls as ideas_urls\n\njs_info_dict = {\n 'packages': ('adhocracy4.comments',),\n}\n\nrouter = routers.DefaultRouter()\nrouter.register(r'ratings', RatingViewSet, base_name='ratings')\nrouter.register(r'comments', CommentViewSet, base_name='comments')\nrouter.register(r'documents', DocumentViewSet, base_name='documents')\n\n\nurlpatterns = [\n url(r'^django-admin/', include(admin.site.urls)),\n url(r'^dashboard/', include(dashboard_urls)),\n\n url(r'^admin/', include(wagtailadmin_urls)),\n url(r'^accounts/', include(allauth_urls)),\n url(r'^documents/', include(wagtaildocs_urls)),\n url(r'^projects/', include(projects_urls)),\n\n url(r'^ideas/', include(ideas_urls)),\n url(r'^paragraphs/', include(paragraph_urls)),\n\n url(r'^api/', include(router.urls)),\n\n url(r'^upload/',\n login_required(ck_views.upload), name='ckeditor_upload'),\n url(r'^browse/',\n never_cache(login_required(ck_views.browse)), name='ckeditor_browse'),\n\n url(r'^jsi18n/$', javascript_catalog,\n js_info_dict, name='javascript-catalog'),\n url(r'', include(wagtail_urls)),\n]\n", "path": "meinberlin/urls.py"}, {"content": "\"\"\"\nDjango settings for meinberlin project.\n\nGenerated by 'django-admin startproject' using Django 1.8.17.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.8/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.8/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\n\nPROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nBASE_DIR = os.path.dirname(PROJECT_DIR)\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.sites',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n 'wagtail.wagtailforms',\n 'wagtail.wagtailredirects',\n 'wagtail.wagtailembeds',\n 'wagtail.wagtailsites',\n 'wagtail.wagtailusers',\n 'wagtail.wagtailsnippets',\n 'wagtail.wagtaildocs',\n 'wagtail.wagtailimages',\n 'wagtail.wagtailsearch',\n 'wagtail.wagtailadmin',\n 'wagtail.wagtailcore',\n 'wagtail.contrib.wagtailstyleguide',\n\n 'taggit', # wagtail dependency\n 'rest_framework',\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n 'rules.apps.AutodiscoverRulesConfig',\n 'easy_thumbnails',\n 'ckeditor',\n 'ckeditor_uploader',\n\n 'adhocracy4.contrib.apps.ContribConfig',\n 'adhocracy4.organisations.apps.OrganisationsConfig',\n 'adhocracy4.projects.apps.ProjectsConfig',\n 'adhocracy4.phases.apps.PhasesConfig',\n 'adhocracy4.modules.apps.ModulesConfig',\n 'adhocracy4.ratings.apps.RatingsConfig',\n 'adhocracy4.comments.apps.CommentsConfig',\n\n 'apps.contrib.apps.Config',\n 'apps.cms.apps.Config',\n 'apps.users.apps.Config',\n 'apps.projects.apps.Config',\n 'apps.organisations.apps.Config',\n 'apps.dashboard.apps.Config',\n\n 'apps.ideas.apps.Config',\n 'apps.documents.apps.Config',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n\n 'wagtail.wagtailcore.middleware.SiteMiddleware',\n 'wagtail.wagtailredirects.middleware.RedirectMiddleware',\n)\n\nSITE_ID = 1\n\nROOT_URLCONF = 'meinberlin.urls'\n\nLOCALE_PATHS = [os.path.join(PROJECT_DIR, 'locale')]\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(PROJECT_DIR, 'templates'),\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'meinberlin.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.8/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.8/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.8/howto/static-files/\n\nSTATICFILES_DIRS = [\n os.path.join(PROJECT_DIR, 'static'),\n]\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nSTATIC_URL = '/static/'\n\nIMAGE_ALIASES = {\n '*': {\n 'max_size': 5*10**6,\n 'fileformats': ('image/png', 'image/jpeg', 'image/gif')\n },\n 'heroimage': {'min_resolution': (1300, 600)},\n 'logo': {'min_resolution': (200, 200), 'aspect_ratio': (1, 1)},\n 'avatar': {'min_resolution': (200, 200)},\n 'idea_image': {'min_resolution': (800, 200)},\n}\n\nALLOWED_UPLOAD_IMAGES = ('png', 'jpeg', 'gif')\n\n\n# Wagtail settings\n\nWAGTAIL_SITE_NAME = 'meinberlin'\n\n# Base URL to use when referring to full URLs within the Wagtail admin backend -\n# e.g. in notification emails. Don't include '/admin' or a trailing slash\nBASE_URL = 'http://localhost:8000'\n\n# Authentication\n\nAUTH_USER_MODEL = 'meinberlin_users.User'\n\nAUTHENTICATION_BACKENDS = (\n 'rules.permissions.ObjectPermissionBackend',\n 'django.contrib.auth.backends.ModelBackend',\n 'allauth.account.auth_backends.AuthenticationBackend',\n)\n\nACCOUNT_ADAPTER = 'apps.users.adapters.AccountAdapter'\nACCOUNT_AUTHENTICATION_METHOD = 'email'\nACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 3\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_EMAIL_VERIFICATION = 'mandatory'\nACCOUNT_USERNAME_REQUIRED = True\nACCOUNT_LOGIN_ATTEMPTS_LIMIT = 10\nACCOUNT_LOGIN_ATTEMPTS_TIMEOUT = 300 # seconds\nACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True\nACCOUNT_LOGIN_ON_PASSWORD_RESET = True\n\nLOGIN_REDIRECT_URL = '/'\n\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\n\n# ckeditor\n\nCKEDITOR_UPLOAD_PATH = \"uploads/\"\nCKEDITOR_RESTRICT_BY_USER = True\nCKEDITOR_ALLOW_NONIMAGE_FILES = False\n\nCKEDITOR_CONFIGS = {\n 'default': {\n 'width': '100%',\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', 'Underline'],\n ['NumberedList', 'BulletedList'],\n ['Link', 'Unlink']\n ]\n },\n 'image-editor': {\n 'width': '100%',\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', 'Underline'],\n ['Image'],\n ['NumberedList', 'BulletedList'],\n ['Link', 'Unlink']\n ]\n }\n}\n\nBLEACH_LIST = {\n 'default' : {\n 'tags': ['p','strong','em','u','ol','li','ul','a'],\n 'attributes': {\n 'a': ['href', 'rel'],\n },\n },\n 'image-editor': {\n 'tags': ['p','strong','em','u','ol','li','ul','a','img'],\n 'attributes': {\n 'a': ['href', 'rel'],\n 'img': ['src', 'alt', 'style']\n },\n 'styles': [\n 'float',\n 'margin',\n 'padding',\n 'width',\n 'height',\n 'margin-bottom',\n 'margin-top',\n 'margin-left',\n 'margin-right',\n ],\n }\n}\n\n\n# adhocracy4\n\nA4_ORGANISATIONS_MODEL = 'meinberlin_organisations.Organisation'\n\nA4_RATEABLES = (\n ('meinberlin_ideas', 'idea'),\n)\n\nA4_COMMENTABLES = (\n ('meinberlin_ideas', 'idea'),\n)\n", "path": "meinberlin/settings/base.py"}], "after_files": [{"content": "\"\"\"meinberlin URL Configuration.\"\"\"\n\nfrom allauth import urls as allauth_urls\nfrom ckeditor_uploader import views as ck_views\nfrom django.conf import settings\nfrom django.conf.urls import include\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.cache import never_cache\nfrom django.views.i18n import javascript_catalog\nfrom rest_framework import routers\nfrom wagtail.wagtailadmin import urls as wagtailadmin_urls\nfrom wagtail.wagtailcore import urls as wagtail_urls\nfrom wagtail.wagtaildocs import urls as wagtaildocs_urls\n\nfrom adhocracy4.comments.api import CommentViewSet\nfrom adhocracy4.projects import urls as projects_urls\nfrom adhocracy4.ratings.api import RatingViewSet\n\nfrom apps.dashboard import urls as dashboard_urls\nfrom apps.documents import urls as paragraph_urls\nfrom apps.documents.api import DocumentViewSet\nfrom apps.ideas import urls as ideas_urls\n\njs_info_dict = {\n 'packages': ('adhocracy4.comments',),\n}\n\nrouter = routers.DefaultRouter()\nrouter.register(r'ratings', RatingViewSet, base_name='ratings')\nrouter.register(r'comments', CommentViewSet, base_name='comments')\nrouter.register(r'documents', DocumentViewSet, base_name='documents')\n\n\nurlpatterns = [\n url(r'^django-admin/', include(admin.site.urls)),\n url(r'^dashboard/', include(dashboard_urls)),\n\n url(r'^admin/', include(wagtailadmin_urls)),\n url(r'^accounts/', include(allauth_urls)),\n url(r'^documents/', include(wagtaildocs_urls)),\n url(r'^projects/', include(projects_urls)),\n\n url(r'^ideas/', include(ideas_urls)),\n url(r'^paragraphs/', include(paragraph_urls)),\n\n url(r'^api/', include(router.urls)),\n\n url(r'^upload/',\n login_required(ck_views.upload), name='ckeditor_upload'),\n url(r'^browse/',\n never_cache(login_required(ck_views.browse)), name='ckeditor_browse'),\n\n url(r'^jsi18n/$', javascript_catalog,\n js_info_dict, name='javascript-catalog'),\n url(r'', include(wagtail_urls)),\n]\n\nif settings.DEBUG:\n from django.conf.urls.static import static\n from django.contrib.staticfiles.urls import staticfiles_urlpatterns\n\n # Serve static and media locally\n urlpatterns += staticfiles_urlpatterns()\n urlpatterns += static(settings.MEDIA_URL,\n document_root=settings.MEDIA_ROOT)\n", "path": "meinberlin/urls.py"}, {"content": "\"\"\"\nDjango settings for meinberlin project.\n\nGenerated by 'django-admin startproject' using Django 1.8.17.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/1.8/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/1.8/ref/settings/\n\"\"\"\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nimport os\n\nPROJECT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nBASE_DIR = os.path.dirname(PROJECT_DIR)\n\n# Application definition\n\nINSTALLED_APPS = (\n 'django.contrib.sites',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n 'wagtail.wagtailforms',\n 'wagtail.wagtailredirects',\n 'wagtail.wagtailembeds',\n 'wagtail.wagtailsites',\n 'wagtail.wagtailusers',\n 'wagtail.wagtailsnippets',\n 'wagtail.wagtaildocs',\n 'wagtail.wagtailimages',\n 'wagtail.wagtailsearch',\n 'wagtail.wagtailadmin',\n 'wagtail.wagtailcore',\n 'wagtail.contrib.wagtailstyleguide',\n\n 'taggit', # wagtail dependency\n 'rest_framework',\n 'allauth',\n 'allauth.account',\n 'allauth.socialaccount',\n 'rules.apps.AutodiscoverRulesConfig',\n 'easy_thumbnails',\n 'ckeditor',\n 'ckeditor_uploader',\n\n 'adhocracy4.contrib.apps.ContribConfig',\n 'adhocracy4.organisations.apps.OrganisationsConfig',\n 'adhocracy4.projects.apps.ProjectsConfig',\n 'adhocracy4.phases.apps.PhasesConfig',\n 'adhocracy4.modules.apps.ModulesConfig',\n 'adhocracy4.ratings.apps.RatingsConfig',\n 'adhocracy4.comments.apps.CommentsConfig',\n\n 'apps.contrib.apps.Config',\n 'apps.cms.apps.Config',\n 'apps.users.apps.Config',\n 'apps.projects.apps.Config',\n 'apps.organisations.apps.Config',\n 'apps.dashboard.apps.Config',\n\n 'apps.ideas.apps.Config',\n 'apps.documents.apps.Config',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n\n 'wagtail.wagtailcore.middleware.SiteMiddleware',\n 'wagtail.wagtailredirects.middleware.RedirectMiddleware',\n)\n\nSITE_ID = 1\n\nROOT_URLCONF = 'meinberlin.urls'\n\nLOCALE_PATHS = [os.path.join(PROJECT_DIR, 'locale')]\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [\n os.path.join(PROJECT_DIR, 'templates'),\n ],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'meinberlin.wsgi.application'\n\n\n# Database\n# https://docs.djangoproject.com/en/1.8/ref/settings/#databases\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),\n }\n}\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/1.8/topics/i18n/\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/1.8/howto/static-files/\n\nSTATICFILES_DIRS = [\n os.path.join(PROJECT_DIR, 'static'),\n]\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static')\nSTATIC_URL = '/static/'\n\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media')\nMEDIA_URL = '/media/'\n\nIMAGE_ALIASES = {\n '*': {\n 'max_size': 5*10**6,\n 'fileformats': ('image/png', 'image/jpeg', 'image/gif')\n },\n 'heroimage': {'min_resolution': (1300, 600)},\n 'logo': {'min_resolution': (200, 200), 'aspect_ratio': (1, 1)},\n 'avatar': {'min_resolution': (200, 200)},\n 'idea_image': {'min_resolution': (800, 200)},\n}\n\nALLOWED_UPLOAD_IMAGES = ('png', 'jpeg', 'gif')\n\n\n# Wagtail settings\n\nWAGTAIL_SITE_NAME = 'meinberlin'\n\n# Base URL to use when referring to full URLs within the Wagtail admin backend -\n# e.g. in notification emails. Don't include '/admin' or a trailing slash\nBASE_URL = 'http://localhost:8000'\n\n# Authentication\n\nAUTH_USER_MODEL = 'meinberlin_users.User'\n\nAUTHENTICATION_BACKENDS = (\n 'rules.permissions.ObjectPermissionBackend',\n 'django.contrib.auth.backends.ModelBackend',\n 'allauth.account.auth_backends.AuthenticationBackend',\n)\n\nACCOUNT_ADAPTER = 'apps.users.adapters.AccountAdapter'\nACCOUNT_AUTHENTICATION_METHOD = 'email'\nACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 3\nACCOUNT_EMAIL_REQUIRED = True\nACCOUNT_EMAIL_VERIFICATION = 'mandatory'\nACCOUNT_USERNAME_REQUIRED = True\nACCOUNT_LOGIN_ATTEMPTS_LIMIT = 10\nACCOUNT_LOGIN_ATTEMPTS_TIMEOUT = 300 # seconds\nACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True\nACCOUNT_LOGIN_ON_PASSWORD_RESET = True\n\nLOGIN_REDIRECT_URL = '/'\n\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\n\n# ckeditor\n\nCKEDITOR_UPLOAD_PATH = \"uploads/\"\nCKEDITOR_RESTRICT_BY_USER = True\nCKEDITOR_ALLOW_NONIMAGE_FILES = False\n\nCKEDITOR_CONFIGS = {\n 'default': {\n 'width': '100%',\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', 'Underline'],\n ['NumberedList', 'BulletedList'],\n ['Link', 'Unlink']\n ]\n },\n 'image-editor': {\n 'width': '100%',\n 'toolbar': 'Custom',\n 'toolbar_Custom': [\n ['Bold', 'Italic', 'Underline'],\n ['Image'],\n ['NumberedList', 'BulletedList'],\n ['Link', 'Unlink']\n ]\n }\n}\n\nBLEACH_LIST = {\n 'default' : {\n 'tags': ['p','strong','em','u','ol','li','ul','a'],\n 'attributes': {\n 'a': ['href', 'rel'],\n },\n },\n 'image-editor': {\n 'tags': ['p','strong','em','u','ol','li','ul','a','img'],\n 'attributes': {\n 'a': ['href', 'rel'],\n 'img': ['src', 'alt', 'style']\n },\n 'styles': [\n 'float',\n 'margin',\n 'padding',\n 'width',\n 'height',\n 'margin-bottom',\n 'margin-top',\n 'margin-left',\n 'margin-right',\n ],\n }\n}\n\n\n# adhocracy4\n\nA4_ORGANISATIONS_MODEL = 'meinberlin_organisations.Organisation'\n\nA4_RATEABLES = (\n ('meinberlin_ideas', 'idea'),\n)\n\nA4_COMMENTABLES = (\n ('meinberlin_ideas', 'idea'),\n)\n", "path": "meinberlin/settings/base.py"}]}
3,418
318
gh_patches_debug_14832
rasdani/github-patches
git_diff
saulpw__visidata-1784
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [capture-col] When nothing is captured, "IndexError: list index out of range" is thrown _Minor user exp bug_ **Small description** When using `capture-col` on a column and the regex does not capture any values, the below error is thrown. No new column is created (as expected) **Expected result** Just to show the "no columns to add" error. Preferably, change the error to something more informative such as "no matches found, didn't add column" **Actual result with screenshot** ``` Traceback (most recent call last): File "/Users/geekscrapy7/lib/python3.7/site-packages/visidata/threads.py", line 200, in _toplevelTryFunc t.status = func(*args, **kwargs) File "/Users/geekscrapy7/lib/python3.7/site-packages/visidata/features/regex.py", line 77, in addRegexColumns vs.addColumnAtCursor(*cols.values()) File "/Users/geekscrapy7/lib/python3.7/site-packages/visidata/sheets.py", line 473, in addColumnAtCursor firstnewcol = [c for c in cols if not c.hidden][0] IndexError: list index out of range ``` I presume this error is from an insufficient catch of an exception and it is attempting to create the column even though there are no groups to show. **Steps to reproduce with sample data and a .vd** `# echo -e 'helloooo\nthere' | vd -f txt --config=/dev/null --visidata_dir=/dev/null` ```tsv sheet col row longname input keystrokes comment - text capture-col (ABC) ; add new column from capture groups of regex; requires example row ``` **Additional context** v2.12dev --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `visidata/features/regex.py` Content: ``` 1 import re 2 import random 3 4 from visidata import asyncthread, options, vd 5 from visidata import VisiData, BaseSheet, Sheet, Column, Progress 6 7 8 @Sheet.api 9 def setSubst(sheet, cols, rows): 10 if not rows: 11 vd.warning('no %s selected' % sheet.rowtype) 12 return 13 modified = 'column' if len(cols) == 1 else 'columns' 14 rex = vd.input("transform %s by regex: " % modified, type="regex-subst") 15 setValuesFromRegex(cols, rows, rex) 16 17 18 vd.option('regex_flags', 'I', 'flags to pass to re.compile() [AILMSUX]', replay=True) 19 vd.option('regex_maxsplit', 0, 'maxsplit to pass to regex.split', replay=True) 20 21 @VisiData.api 22 def makeRegexSplitter(vd, regex, origcol): 23 return lambda row, regex=regex, origcol=origcol, maxsplit=options.regex_maxsplit: regex.split(origcol.getDisplayValue(row), maxsplit=maxsplit) 24 25 @VisiData.api 26 def makeRegexMatcher(vd, regex, origcol): 27 def _regexMatcher(row): 28 m = regex.search(origcol.getDisplayValue(row)) 29 if m: 30 return m.groupdict() if m.groupdict() else m.groups() 31 return _regexMatcher 32 33 34 @Sheet.api 35 def RegexColumn(vs, regexMaker, origcol, regexstr): 36 regex = re.compile(regexstr, vs.regex_flags()) 37 func = regexMaker(regex, origcol) 38 return Column(origcol.name+'_re', 39 getter=lambda col,row,func=func: func(row), 40 origCol=origcol) 41 42 43 @Sheet.api 44 @asyncthread 45 def addRegexColumns(vs, regexMaker, origcol, regexstr): 46 regexstr or vd.fail('regex required') 47 48 regex = re.compile(regexstr, vs.regex_flags()) 49 50 func = regexMaker(regex, origcol) 51 52 cols = {} 53 ncols = 0 # number of new columns added already 54 for r in Progress(vs.getSampleRows()): 55 try: 56 m = func(r) 57 if not m: 58 continue 59 except Exception as e: 60 vd.exceptionCaught(e) 61 62 if isinstance(m, dict): 63 for name in m: 64 if name in cols: 65 continue 66 cols[name] = Column(origcol.name+'_'+str(name), 67 getter=lambda col,row,name=name,func=func: func(row)[name], 68 origCol=origcol) 69 elif isinstance(m, (tuple, list)): 70 for _ in range(len(m)-len(cols)): 71 cols[len(cols)] = Column(origcol.name+'_re'+str(len(cols)), 72 getter=lambda col,row,i=len(cols),func=func: func(row)[i], 73 origCol=origcol) 74 else: 75 raise TypeError("addRegexColumns() expects a dict, list, or tuple from regexMaker, but got a "+type(m).__name__) 76 77 vs.addColumnAtCursor(*cols.values()) 78 79 80 @VisiData.api 81 def regexTransform(vd, origcol, instr): 82 before, after = vd.parse_sed_transform(instr) 83 return lambda col,row,origcol=origcol,before=before,after=after,flags=origcol.sheet.regex_flags(): re.sub(before, after, origcol.getDisplayValue(row), flags=flags) 84 85 86 @VisiData.api 87 def parse_sed_transform(vd, instr): 88 i = indexWithEscape(instr, '/') 89 if i is None: 90 return instr, '' 91 else: 92 return instr[:i], instr[i+1:] 93 94 95 def indexWithEscape(s, char, escape_char='\\'): 96 i=0 97 while i < len(s): 98 if s[i] == escape_char: 99 i += 1 100 elif s[i] == char: 101 return i 102 i += 1 103 104 return None 105 106 107 @asyncthread 108 def setValuesFromRegex(cols, rows, rex): 109 transforms = [vd.regexTransform(col, rex) for col in cols] 110 vd.addUndoSetValues(cols, rows) 111 for r in Progress(rows, 'replacing'): 112 for col, transform in zip(cols, transforms): 113 col.setValueSafe(r, transform(col, r)) 114 for col in cols: 115 col.recalc() 116 117 118 @BaseSheet.api 119 def regex_flags(sheet): 120 'Return flags to pass to regex functions from options' 121 return sum(getattr(re, f.upper()) for f in sheet.options.regex_flags) 122 123 124 Sheet.addCommand(':', 'split-col', 'addRegexColumns(makeRegexSplitter, cursorCol, input("split regex: ", type="regex-split"))', 'Add new columns from regex split') 125 Sheet.addCommand(';', 'capture-col', 'addRegexColumns(makeRegexMatcher, cursorCol, input("capture regex: ", type="regex-capture"))', 'add new column from capture groups of regex; requires example row') 126 Sheet.addCommand('', 'addcol-split', 'addColumnAtCursor(RegexColumn(makeRegexSplitter, cursorCol, input("split regex: ", type="regex-split")))', 'Add column split by regex') 127 Sheet.addCommand('', 'addcol-capture', 'addColumnAtCursor(RegexColumn(makeRegexMatcher, cursorCol, input("capture regex: ", type="regex-capture")))', 'Add column captured by regex') 128 Sheet.addCommand('*', 'addcol-subst', 'addColumnAtCursor(Column(cursorCol.name + "_re", getter=regexTransform(cursorCol, input("transform column by regex: ", type="regex-subst"))))', 'add column derived from current column, replacing regex with subst (may include \1 backrefs)') 129 Sheet.addCommand('g*', 'setcol-subst', 'setSubst([cursorCol], someSelectedRows)', 'regex/subst - modify selected rows in current column, replacing regex with subst, (may include backreferences \\1 etc)') 130 Sheet.addCommand('gz*', 'setcol-subst-all', 'setSubst(visibleCols, someSelectedRows)', 'modify selected rows in all visible columns, replacing regex with subst (may include \\1 backrefs)') 131 132 133 vd.addMenuItems(''' 134 Edit > Modify > selected cells > regex substitution > setcol-subst 135 Column > Add column > capture by regex > addcol-capture 136 Column > Add column > split by regex > addcol-split 137 Column > Add column > subst by regex > addcol-subst 138 Column > Split > split-col 139 Row > Select > by regex > current column > select-col-regex 140 Row > Select > by regex > all columns > select-cols-regex 141 Row > Unselect > by regex > current column > unselect-col-regex 142 Row > Unselect > by regex > all columns > unselect-cols-regex 143 ''') 144 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/visidata/features/regex.py b/visidata/features/regex.py --- a/visidata/features/regex.py +++ b/visidata/features/regex.py @@ -24,6 +24,8 @@ @VisiData.api def makeRegexMatcher(vd, regex, origcol): + if not regex.groups: + vd.fail('specify a capture group') #1778 def _regexMatcher(row): m = regex.search(origcol.getDisplayValue(row)) if m: @@ -74,6 +76,10 @@ else: raise TypeError("addRegexColumns() expects a dict, list, or tuple from regexMaker, but got a "+type(m).__name__) + if not cols: + vd.warning("no regex matches found, didn't add column") + return + vs.addColumnAtCursor(*cols.values())
{"golden_diff": "diff --git a/visidata/features/regex.py b/visidata/features/regex.py\n--- a/visidata/features/regex.py\n+++ b/visidata/features/regex.py\n@@ -24,6 +24,8 @@\n \n @VisiData.api\n def makeRegexMatcher(vd, regex, origcol):\n+ if not regex.groups:\n+ vd.fail('specify a capture group') #1778\n def _regexMatcher(row):\n m = regex.search(origcol.getDisplayValue(row))\n if m:\n@@ -74,6 +76,10 @@\n else:\n raise TypeError(\"addRegexColumns() expects a dict, list, or tuple from regexMaker, but got a \"+type(m).__name__)\n \n+ if not cols:\n+ vd.warning(\"no regex matches found, didn't add column\")\n+ return\n+\n vs.addColumnAtCursor(*cols.values())\n", "issue": "[capture-col] When nothing is captured, \"IndexError: list index out of range\" is thrown\n_Minor user exp bug_\r\n\r\n**Small description**\r\nWhen using `capture-col` on a column and the regex does not capture any values, the below error is thrown. No new column is created (as expected)\r\n\r\n**Expected result**\r\nJust to show the \"no columns to add\" error. Preferably, change the error to something more informative such as \"no matches found, didn't add column\"\r\n\r\n**Actual result with screenshot**\r\n```\r\nTraceback (most recent call last):\r\n File \"/Users/geekscrapy7/lib/python3.7/site-packages/visidata/threads.py\", line 200, in _toplevelTryFunc\r\n t.status = func(*args, **kwargs)\r\n File \"/Users/geekscrapy7/lib/python3.7/site-packages/visidata/features/regex.py\", line 77, in addRegexColumns\r\n vs.addColumnAtCursor(*cols.values())\r\n File \"/Users/geekscrapy7/lib/python3.7/site-packages/visidata/sheets.py\", line 473, in addColumnAtCursor\r\n firstnewcol = [c for c in cols if not c.hidden][0]\r\nIndexError: list index out of range\r\n```\r\nI presume this error is from an insufficient catch of an exception and it is attempting to create the column even though there are no groups to show.\r\n\r\n**Steps to reproduce with sample data and a .vd**\r\n`# echo -e 'helloooo\\nthere' | vd -f txt --config=/dev/null --visidata_dir=/dev/null`\r\n\r\n```tsv\r\nsheet\tcol\trow\tlongname\tinput\tkeystrokes\tcomment\r\n-\ttext\t\tcapture-col\t(ABC)\t;\tadd new column from capture groups of regex; requires example row\r\n```\r\n\r\n**Additional context**\r\nv2.12dev\n", "before_files": [{"content": "import re\nimport random\n\nfrom visidata import asyncthread, options, vd\nfrom visidata import VisiData, BaseSheet, Sheet, Column, Progress\n\n\[email protected]\ndef setSubst(sheet, cols, rows):\n if not rows:\n vd.warning('no %s selected' % sheet.rowtype)\n return\n modified = 'column' if len(cols) == 1 else 'columns'\n rex = vd.input(\"transform %s by regex: \" % modified, type=\"regex-subst\")\n setValuesFromRegex(cols, rows, rex)\n\n\nvd.option('regex_flags', 'I', 'flags to pass to re.compile() [AILMSUX]', replay=True)\nvd.option('regex_maxsplit', 0, 'maxsplit to pass to regex.split', replay=True)\n\[email protected]\ndef makeRegexSplitter(vd, regex, origcol):\n return lambda row, regex=regex, origcol=origcol, maxsplit=options.regex_maxsplit: regex.split(origcol.getDisplayValue(row), maxsplit=maxsplit)\n\[email protected]\ndef makeRegexMatcher(vd, regex, origcol):\n def _regexMatcher(row):\n m = regex.search(origcol.getDisplayValue(row))\n if m:\n return m.groupdict() if m.groupdict() else m.groups()\n return _regexMatcher\n\n\[email protected]\ndef RegexColumn(vs, regexMaker, origcol, regexstr):\n regex = re.compile(regexstr, vs.regex_flags())\n func = regexMaker(regex, origcol)\n return Column(origcol.name+'_re',\n getter=lambda col,row,func=func: func(row),\n origCol=origcol)\n\n\[email protected]\n@asyncthread\ndef addRegexColumns(vs, regexMaker, origcol, regexstr):\n regexstr or vd.fail('regex required')\n\n regex = re.compile(regexstr, vs.regex_flags())\n\n func = regexMaker(regex, origcol)\n\n cols = {}\n ncols = 0 # number of new columns added already\n for r in Progress(vs.getSampleRows()):\n try:\n m = func(r)\n if not m:\n continue\n except Exception as e:\n vd.exceptionCaught(e)\n\n if isinstance(m, dict):\n for name in m:\n if name in cols:\n continue\n cols[name] = Column(origcol.name+'_'+str(name),\n getter=lambda col,row,name=name,func=func: func(row)[name],\n origCol=origcol)\n elif isinstance(m, (tuple, list)):\n for _ in range(len(m)-len(cols)):\n cols[len(cols)] = Column(origcol.name+'_re'+str(len(cols)),\n getter=lambda col,row,i=len(cols),func=func: func(row)[i],\n origCol=origcol)\n else:\n raise TypeError(\"addRegexColumns() expects a dict, list, or tuple from regexMaker, but got a \"+type(m).__name__)\n\n vs.addColumnAtCursor(*cols.values())\n\n\[email protected]\ndef regexTransform(vd, origcol, instr):\n before, after = vd.parse_sed_transform(instr)\n return lambda col,row,origcol=origcol,before=before,after=after,flags=origcol.sheet.regex_flags(): re.sub(before, after, origcol.getDisplayValue(row), flags=flags)\n\n\[email protected]\ndef parse_sed_transform(vd, instr):\n i = indexWithEscape(instr, '/')\n if i is None:\n return instr, ''\n else:\n return instr[:i], instr[i+1:]\n\n\ndef indexWithEscape(s, char, escape_char='\\\\'):\n i=0\n while i < len(s):\n if s[i] == escape_char:\n i += 1\n elif s[i] == char:\n return i\n i += 1\n\n return None\n\n\n@asyncthread\ndef setValuesFromRegex(cols, rows, rex):\n transforms = [vd.regexTransform(col, rex) for col in cols]\n vd.addUndoSetValues(cols, rows)\n for r in Progress(rows, 'replacing'):\n for col, transform in zip(cols, transforms):\n col.setValueSafe(r, transform(col, r))\n for col in cols:\n col.recalc()\n\n\[email protected]\ndef regex_flags(sheet):\n 'Return flags to pass to regex functions from options'\n return sum(getattr(re, f.upper()) for f in sheet.options.regex_flags)\n\n\nSheet.addCommand(':', 'split-col', 'addRegexColumns(makeRegexSplitter, cursorCol, input(\"split regex: \", type=\"regex-split\"))', 'Add new columns from regex split')\nSheet.addCommand(';', 'capture-col', 'addRegexColumns(makeRegexMatcher, cursorCol, input(\"capture regex: \", type=\"regex-capture\"))', 'add new column from capture groups of regex; requires example row')\nSheet.addCommand('', 'addcol-split', 'addColumnAtCursor(RegexColumn(makeRegexSplitter, cursorCol, input(\"split regex: \", type=\"regex-split\")))', 'Add column split by regex')\nSheet.addCommand('', 'addcol-capture', 'addColumnAtCursor(RegexColumn(makeRegexMatcher, cursorCol, input(\"capture regex: \", type=\"regex-capture\")))', 'Add column captured by regex')\nSheet.addCommand('*', 'addcol-subst', 'addColumnAtCursor(Column(cursorCol.name + \"_re\", getter=regexTransform(cursorCol, input(\"transform column by regex: \", type=\"regex-subst\"))))', 'add column derived from current column, replacing regex with subst (may include \\1 backrefs)')\nSheet.addCommand('g*', 'setcol-subst', 'setSubst([cursorCol], someSelectedRows)', 'regex/subst - modify selected rows in current column, replacing regex with subst, (may include backreferences \\\\1 etc)')\nSheet.addCommand('gz*', 'setcol-subst-all', 'setSubst(visibleCols, someSelectedRows)', 'modify selected rows in all visible columns, replacing regex with subst (may include \\\\1 backrefs)')\n\n\nvd.addMenuItems('''\n Edit > Modify > selected cells > regex substitution > setcol-subst\n Column > Add column > capture by regex > addcol-capture\n Column > Add column > split by regex > addcol-split\n Column > Add column > subst by regex > addcol-subst\n Column > Split > split-col\n Row > Select > by regex > current column > select-col-regex\n Row > Select > by regex > all columns > select-cols-regex\n Row > Unselect > by regex > current column > unselect-col-regex\n Row > Unselect > by regex > all columns > unselect-cols-regex\n''')\n", "path": "visidata/features/regex.py"}], "after_files": [{"content": "import re\nimport random\n\nfrom visidata import asyncthread, options, vd\nfrom visidata import VisiData, BaseSheet, Sheet, Column, Progress\n\n\[email protected]\ndef setSubst(sheet, cols, rows):\n if not rows:\n vd.warning('no %s selected' % sheet.rowtype)\n return\n modified = 'column' if len(cols) == 1 else 'columns'\n rex = vd.input(\"transform %s by regex: \" % modified, type=\"regex-subst\")\n setValuesFromRegex(cols, rows, rex)\n\n\nvd.option('regex_flags', 'I', 'flags to pass to re.compile() [AILMSUX]', replay=True)\nvd.option('regex_maxsplit', 0, 'maxsplit to pass to regex.split', replay=True)\n\[email protected]\ndef makeRegexSplitter(vd, regex, origcol):\n return lambda row, regex=regex, origcol=origcol, maxsplit=options.regex_maxsplit: regex.split(origcol.getDisplayValue(row), maxsplit=maxsplit)\n\[email protected]\ndef makeRegexMatcher(vd, regex, origcol):\n if not regex.groups:\n vd.fail('specify a capture group') #1778\n def _regexMatcher(row):\n m = regex.search(origcol.getDisplayValue(row))\n if m:\n return m.groupdict() if m.groupdict() else m.groups()\n return _regexMatcher\n\n\[email protected]\ndef RegexColumn(vs, regexMaker, origcol, regexstr):\n regex = re.compile(regexstr, vs.regex_flags())\n func = regexMaker(regex, origcol)\n return Column(origcol.name+'_re',\n getter=lambda col,row,func=func: func(row),\n origCol=origcol)\n\n\[email protected]\n@asyncthread\ndef addRegexColumns(vs, regexMaker, origcol, regexstr):\n regexstr or vd.fail('regex required')\n\n regex = re.compile(regexstr, vs.regex_flags())\n\n func = regexMaker(regex, origcol)\n\n cols = {}\n ncols = 0 # number of new columns added already\n for r in Progress(vs.getSampleRows()):\n try:\n m = func(r)\n if not m:\n continue\n except Exception as e:\n vd.exceptionCaught(e)\n\n if isinstance(m, dict):\n for name in m:\n if name in cols:\n continue\n cols[name] = Column(origcol.name+'_'+str(name),\n getter=lambda col,row,name=name,func=func: func(row)[name],\n origCol=origcol)\n elif isinstance(m, (tuple, list)):\n for _ in range(len(m)-len(cols)):\n cols[len(cols)] = Column(origcol.name+'_re'+str(len(cols)),\n getter=lambda col,row,i=len(cols),func=func: func(row)[i],\n origCol=origcol)\n else:\n raise TypeError(\"addRegexColumns() expects a dict, list, or tuple from regexMaker, but got a \"+type(m).__name__)\n\n if not cols:\n vd.warning(\"no regex matches found, didn't add column\")\n return\n\n vs.addColumnAtCursor(*cols.values())\n\n\[email protected]\ndef regexTransform(vd, origcol, instr):\n before, after = vd.parse_sed_transform(instr)\n return lambda col,row,origcol=origcol,before=before,after=after,flags=origcol.sheet.regex_flags(): re.sub(before, after, origcol.getDisplayValue(row), flags=flags)\n\n\[email protected]\ndef parse_sed_transform(vd, instr):\n i = indexWithEscape(instr, '/')\n if i is None:\n return instr, ''\n else:\n return instr[:i], instr[i+1:]\n\n\ndef indexWithEscape(s, char, escape_char='\\\\'):\n i=0\n while i < len(s):\n if s[i] == escape_char:\n i += 1\n elif s[i] == char:\n return i\n i += 1\n\n return None\n\n\n@asyncthread\ndef setValuesFromRegex(cols, rows, rex):\n transforms = [vd.regexTransform(col, rex) for col in cols]\n vd.addUndoSetValues(cols, rows)\n for r in Progress(rows, 'replacing'):\n for col, transform in zip(cols, transforms):\n col.setValueSafe(r, transform(col, r))\n for col in cols:\n col.recalc()\n\n\[email protected]\ndef regex_flags(sheet):\n 'Return flags to pass to regex functions from options'\n return sum(getattr(re, f.upper()) for f in sheet.options.regex_flags)\n\n\nSheet.addCommand(':', 'split-col', 'addRegexColumns(makeRegexSplitter, cursorCol, input(\"split regex: \", type=\"regex-split\"))', 'Add new columns from regex split')\nSheet.addCommand(';', 'capture-col', 'addRegexColumns(makeRegexMatcher, cursorCol, input(\"capture regex: \", type=\"regex-capture\"))', 'add new column from capture groups of regex; requires example row')\nSheet.addCommand('', 'addcol-split', 'addColumnAtCursor(RegexColumn(makeRegexSplitter, cursorCol, input(\"split regex: \", type=\"regex-split\")))', 'Add column split by regex')\nSheet.addCommand('', 'addcol-capture', 'addColumnAtCursor(RegexColumn(makeRegexMatcher, cursorCol, input(\"capture regex: \", type=\"regex-capture\")))', 'Add column captured by regex')\nSheet.addCommand('*', 'addcol-subst', 'addColumnAtCursor(Column(cursorCol.name + \"_re\", getter=regexTransform(cursorCol, input(\"transform column by regex: \", type=\"regex-subst\"))))', 'add column derived from current column, replacing regex with subst (may include \\1 backrefs)')\nSheet.addCommand('g*', 'setcol-subst', 'setSubst([cursorCol], someSelectedRows)', 'regex/subst - modify selected rows in current column, replacing regex with subst, (may include backreferences \\\\1 etc)')\nSheet.addCommand('gz*', 'setcol-subst-all', 'setSubst(visibleCols, someSelectedRows)', 'modify selected rows in all visible columns, replacing regex with subst (may include \\\\1 backrefs)')\n\n\nvd.addMenuItems('''\n Edit > Modify > selected cells > regex substitution > setcol-subst\n Column > Add column > capture by regex > addcol-capture\n Column > Add column > split by regex > addcol-split\n Column > Add column > subst by regex > addcol-subst\n Column > Split > split-col\n Row > Select > by regex > current column > select-col-regex\n Row > Select > by regex > all columns > select-cols-regex\n Row > Unselect > by regex > current column > unselect-col-regex\n Row > Unselect > by regex > all columns > unselect-cols-regex\n''')\n", "path": "visidata/features/regex.py"}]}
2,437
200
gh_patches_debug_39739
rasdani/github-patches
git_diff
streamlink__streamlink-1878
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Problem with live.russia.tv I have Problem with the Plugin live.russia.tv : ``` #SERVICE 4097:0:1:0:0:0:224:0:0:0:http%3a//127.0.0.1%3a8088/https%3a//live.russia.tv/index/index/channel_id/76:Москва 24 HD #DESCRIPTION Москва 24 HD #SERVICE 4097:0:1:0:0:0:449:0:0:0:http%3a//127.0.0.1%3a8088/https%3a//live.russia.tv/index/index/channel_id/1:Rossija 1 HD #DESCRIPTION Rossija 1 HD #SERVICE 4097:0:1:0:0:0:445:0:0:0:http%3a//127.0.0.1%3a8088/https%3a//live.russia.tv/index/index/channel_id/82:Rossija RTR HD #DESCRIPTION Rossija RTR HD #SERVICE 4097:0:1:0:0:0:447:0:0:0:http%3a//127.0.0.1%3a8088/https%3a//live.russia.tv/index/index/channel_id/3:Rossija 24 HD #DESCRIPTION Rossija 24 HD ``` The Channels not working on streamlink - from PC work the channels ok. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/streamlink/plugins/live_russia_tv.py` Content: ``` 1 import re 2 from streamlink.plugin import Plugin 3 from streamlink.plugin.api import http 4 from streamlink.stream import HLSStream 5 6 class LiveRussia(Plugin): 7 url_re = re.compile(r"https?://(?:www.)?live.russia.tv/index/index/channel_id/") 8 iframe_re = re.compile(r"""<iframe[^>]*src=["']([^'"]+)["'][^>]*>""") 9 stream_re = re.compile(r"""window.pl.data.*m3u8":"(.*)"}.*};""") 10 11 @classmethod 12 def can_handle_url(cls, url): 13 return cls.url_re.match(url) is not None 14 15 def _get_streams(self): 16 res = http.get(self.url) 17 iframe_result = re.search(self.iframe_re, res.text) 18 19 if not iframe_result: 20 self.logger.error("The requested content is unavailable.") 21 return 22 23 res = http.get(iframe_result.group(1)) 24 stream_url_result = re.search(self.stream_re, res.text) 25 26 if not stream_url_result: 27 self.logger.error("The requested content is unavailable.") 28 return 29 30 return HLSStream.parse_variant_playlist(self.session, stream_url_result.group(1)) 31 32 33 __plugin__ = LiveRussia ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/streamlink/plugins/live_russia_tv.py b/src/streamlink/plugins/live_russia_tv.py --- a/src/streamlink/plugins/live_russia_tv.py +++ b/src/streamlink/plugins/live_russia_tv.py @@ -1,33 +1,81 @@ +import logging import re + from streamlink.plugin import Plugin -from streamlink.plugin.api import http -from streamlink.stream import HLSStream +from streamlink.plugin.api import http, validate +from streamlink.plugin.api.utils import itertags +from streamlink.stream import HLSStream, HTTPStream + +log = logging.getLogger(__name__) + class LiveRussia(Plugin): - url_re = re.compile(r"https?://(?:www.)?live.russia.tv/index/index/channel_id/") - iframe_re = re.compile(r"""<iframe[^>]*src=["']([^'"]+)["'][^>]*>""") - stream_re = re.compile(r"""window.pl.data.*m3u8":"(.*)"}.*};""") + url_re = re.compile(r"https?://(?:www\.|live\.)?russia.tv") + _data_re = re.compile(r"""window\.pl\.data\.([\w_]+)\s*=\s*['"]?(.*?)['"]?;""") @classmethod def can_handle_url(cls, url): return cls.url_re.match(url) is not None + def _get_iframe_url(self, url): + res = http.get(url) + for iframe in itertags(res.text, 'iframe'): + src = iframe.attributes.get("src") + if src: + return src + + def _get_stream_info_url(self, url): + data = {} + res = http.get(url) + for m in self._data_re.finditer(res.text): + data[m.group(1)] = m.group(2) + + log.debug("Got pl_data={0}".format(data)) + + if data: + if data["isVod"] == '0': + return "https:{domain}/iframe/datalive/id/{id}/sid/{sid}".format(**data) + else: + return "https:{domain}/iframe/datavideo/id/{id}/sid/{sid}".format(**data) + def _get_streams(self): - res = http.get(self.url) - iframe_result = re.search(self.iframe_re, res.text) + iframe_url = self._get_iframe_url(self.url) + + if iframe_url: + log.debug("Found iframe URL={0}".format(iframe_url)) + info_url = self._get_stream_info_url(iframe_url) + + if info_url: + log.debug("Getting info from URL: {0}".format(info_url)) + res = http.get(info_url, headers={"Referer": iframe_url}) + data = http.json(res) + + if data['status'] == 200: + for media in data['data']['playlist']['medialist']: + if media['errors']: + log.error(media['errors'].replace('\n', '').replace('\r', '')) + + for media_type in media.get('sources', []): + + if media_type == "m3u8": + hls_url = media['sources'][media_type]['auto'] + for s in HLSStream.parse_variant_playlist(self.session, hls_url).items(): + yield s + + if media_type == "http": + for pix, url in media['sources'][media_type].items(): + yield "{0}p".format(pix), HTTPStream(self.session, url) + else: + log.error("An error occurred: {0}".format(data['errors'].replace('\n', '').replace('\r', ''))) + else: + log.error("Unable to get stream info URL") + else: + log.error("Could not find video iframe") + - if not iframe_result: - self.logger.error("The requested content is unavailable.") - return - res = http.get(iframe_result.group(1)) - stream_url_result = re.search(self.stream_re, res.text) - if not stream_url_result: - self.logger.error("The requested content is unavailable.") - return - return HLSStream.parse_variant_playlist(self.session, stream_url_result.group(1)) -__plugin__ = LiveRussia \ No newline at end of file +__plugin__ = LiveRussia
{"golden_diff": "diff --git a/src/streamlink/plugins/live_russia_tv.py b/src/streamlink/plugins/live_russia_tv.py\n--- a/src/streamlink/plugins/live_russia_tv.py\n+++ b/src/streamlink/plugins/live_russia_tv.py\n@@ -1,33 +1,81 @@\n+import logging\n import re\n+\n from streamlink.plugin import Plugin\n-from streamlink.plugin.api import http\n-from streamlink.stream import HLSStream\n+from streamlink.plugin.api import http, validate\n+from streamlink.plugin.api.utils import itertags\n+from streamlink.stream import HLSStream, HTTPStream\n+\n+log = logging.getLogger(__name__)\n+\n \n class LiveRussia(Plugin):\n- url_re = re.compile(r\"https?://(?:www.)?live.russia.tv/index/index/channel_id/\")\n- iframe_re = re.compile(r\"\"\"<iframe[^>]*src=[\"']([^'\"]+)[\"'][^>]*>\"\"\")\n- stream_re = re.compile(r\"\"\"window.pl.data.*m3u8\":\"(.*)\"}.*};\"\"\")\n+ url_re = re.compile(r\"https?://(?:www\\.|live\\.)?russia.tv\")\n+ _data_re = re.compile(r\"\"\"window\\.pl\\.data\\.([\\w_]+)\\s*=\\s*['\"]?(.*?)['\"]?;\"\"\")\n \n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n \n+ def _get_iframe_url(self, url):\n+ res = http.get(url)\n+ for iframe in itertags(res.text, 'iframe'):\n+ src = iframe.attributes.get(\"src\")\n+ if src:\n+ return src\n+\n+ def _get_stream_info_url(self, url):\n+ data = {}\n+ res = http.get(url)\n+ for m in self._data_re.finditer(res.text):\n+ data[m.group(1)] = m.group(2)\n+\n+ log.debug(\"Got pl_data={0}\".format(data))\n+\n+ if data:\n+ if data[\"isVod\"] == '0':\n+ return \"https:{domain}/iframe/datalive/id/{id}/sid/{sid}\".format(**data)\n+ else:\n+ return \"https:{domain}/iframe/datavideo/id/{id}/sid/{sid}\".format(**data)\n+\n def _get_streams(self):\n- res = http.get(self.url)\n- iframe_result = re.search(self.iframe_re, res.text)\n+ iframe_url = self._get_iframe_url(self.url)\n+\n+ if iframe_url:\n+ log.debug(\"Found iframe URL={0}\".format(iframe_url))\n+ info_url = self._get_stream_info_url(iframe_url)\n+\n+ if info_url:\n+ log.debug(\"Getting info from URL: {0}\".format(info_url))\n+ res = http.get(info_url, headers={\"Referer\": iframe_url})\n+ data = http.json(res)\n+\n+ if data['status'] == 200:\n+ for media in data['data']['playlist']['medialist']:\n+ if media['errors']:\n+ log.error(media['errors'].replace('\\n', '').replace('\\r', ''))\n+\n+ for media_type in media.get('sources', []):\n+\n+ if media_type == \"m3u8\":\n+ hls_url = media['sources'][media_type]['auto']\n+ for s in HLSStream.parse_variant_playlist(self.session, hls_url).items():\n+ yield s\n+\n+ if media_type == \"http\":\n+ for pix, url in media['sources'][media_type].items():\n+ yield \"{0}p\".format(pix), HTTPStream(self.session, url)\n+ else:\n+ log.error(\"An error occurred: {0}\".format(data['errors'].replace('\\n', '').replace('\\r', '')))\n+ else:\n+ log.error(\"Unable to get stream info URL\")\n+ else:\n+ log.error(\"Could not find video iframe\")\n+\n \n- if not iframe_result:\n- self.logger.error(\"The requested content is unavailable.\")\n- return\n \n- res = http.get(iframe_result.group(1))\n- stream_url_result = re.search(self.stream_re, res.text)\n \n- if not stream_url_result:\n- self.logger.error(\"The requested content is unavailable.\")\n- return\n \n- return HLSStream.parse_variant_playlist(self.session, stream_url_result.group(1))\n \n \n-__plugin__ = LiveRussia\n\\ No newline at end of file\n+__plugin__ = LiveRussia\n", "issue": "Problem with live.russia.tv\nI have Problem with the Plugin live.russia.tv : \r\n```\r\n#SERVICE 4097:0:1:0:0:0:224:0:0:0:http%3a//127.0.0.1%3a8088/https%3a//live.russia.tv/index/index/channel_id/76:\u041c\u043e\u0441\u043a\u0432\u0430 24 HD\r\n#DESCRIPTION \u041c\u043e\u0441\u043a\u0432\u0430 24 HD\r\n#SERVICE 4097:0:1:0:0:0:449:0:0:0:http%3a//127.0.0.1%3a8088/https%3a//live.russia.tv/index/index/channel_id/1:Rossija 1 HD\r\n#DESCRIPTION Rossija 1 HD\r\n#SERVICE 4097:0:1:0:0:0:445:0:0:0:http%3a//127.0.0.1%3a8088/https%3a//live.russia.tv/index/index/channel_id/82:Rossija RTR HD\r\n#DESCRIPTION Rossija RTR HD\r\n#SERVICE 4097:0:1:0:0:0:447:0:0:0:http%3a//127.0.0.1%3a8088/https%3a//live.russia.tv/index/index/channel_id/3:Rossija 24 HD\r\n#DESCRIPTION Rossija 24 HD\r\n```\r\nThe Channels not working on streamlink - from PC work the channels ok.\n", "before_files": [{"content": "import re\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http\nfrom streamlink.stream import HLSStream\n\nclass LiveRussia(Plugin):\n url_re = re.compile(r\"https?://(?:www.)?live.russia.tv/index/index/channel_id/\")\n iframe_re = re.compile(r\"\"\"<iframe[^>]*src=[\"']([^'\"]+)[\"'][^>]*>\"\"\")\n stream_re = re.compile(r\"\"\"window.pl.data.*m3u8\":\"(.*)\"}.*};\"\"\")\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def _get_streams(self):\n res = http.get(self.url)\n iframe_result = re.search(self.iframe_re, res.text)\n\n if not iframe_result:\n self.logger.error(\"The requested content is unavailable.\")\n return\n\n res = http.get(iframe_result.group(1))\n stream_url_result = re.search(self.stream_re, res.text)\n\n if not stream_url_result:\n self.logger.error(\"The requested content is unavailable.\")\n return\n\n return HLSStream.parse_variant_playlist(self.session, stream_url_result.group(1))\n\n\n__plugin__ = LiveRussia", "path": "src/streamlink/plugins/live_russia_tv.py"}], "after_files": [{"content": "import logging\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http, validate\nfrom streamlink.plugin.api.utils import itertags\nfrom streamlink.stream import HLSStream, HTTPStream\n\nlog = logging.getLogger(__name__)\n\n\nclass LiveRussia(Plugin):\n url_re = re.compile(r\"https?://(?:www\\.|live\\.)?russia.tv\")\n _data_re = re.compile(r\"\"\"window\\.pl\\.data\\.([\\w_]+)\\s*=\\s*['\"]?(.*?)['\"]?;\"\"\")\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def _get_iframe_url(self, url):\n res = http.get(url)\n for iframe in itertags(res.text, 'iframe'):\n src = iframe.attributes.get(\"src\")\n if src:\n return src\n\n def _get_stream_info_url(self, url):\n data = {}\n res = http.get(url)\n for m in self._data_re.finditer(res.text):\n data[m.group(1)] = m.group(2)\n\n log.debug(\"Got pl_data={0}\".format(data))\n\n if data:\n if data[\"isVod\"] == '0':\n return \"https:{domain}/iframe/datalive/id/{id}/sid/{sid}\".format(**data)\n else:\n return \"https:{domain}/iframe/datavideo/id/{id}/sid/{sid}\".format(**data)\n\n def _get_streams(self):\n iframe_url = self._get_iframe_url(self.url)\n\n if iframe_url:\n log.debug(\"Found iframe URL={0}\".format(iframe_url))\n info_url = self._get_stream_info_url(iframe_url)\n\n if info_url:\n log.debug(\"Getting info from URL: {0}\".format(info_url))\n res = http.get(info_url, headers={\"Referer\": iframe_url})\n data = http.json(res)\n\n if data['status'] == 200:\n for media in data['data']['playlist']['medialist']:\n if media['errors']:\n log.error(media['errors'].replace('\\n', '').replace('\\r', ''))\n\n for media_type in media.get('sources', []):\n\n if media_type == \"m3u8\":\n hls_url = media['sources'][media_type]['auto']\n for s in HLSStream.parse_variant_playlist(self.session, hls_url).items():\n yield s\n\n if media_type == \"http\":\n for pix, url in media['sources'][media_type].items():\n yield \"{0}p\".format(pix), HTTPStream(self.session, url)\n else:\n log.error(\"An error occurred: {0}\".format(data['errors'].replace('\\n', '').replace('\\r', '')))\n else:\n log.error(\"Unable to get stream info URL\")\n else:\n log.error(\"Could not find video iframe\")\n\n\n\n\n\n\n\n__plugin__ = LiveRussia\n", "path": "src/streamlink/plugins/live_russia_tv.py"}]}
939
980
gh_patches_debug_27786
rasdani/github-patches
git_diff
gammapy__gammapy-1033
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Problem of fit with sherpa Hi @cdeil and @joleroi , I've got some problem to do a spectral fit with sherpa on CTA simulated data. I don't really understand why it's happening. Everything worked fine ~2 weeks ago. Here is a small example to illustrate the problem: ``` import astropy.units as u from gammapy.spectrum.models import PowerLaw from gammapy.scripts.cta_utils import CTAObservationSimulation, Target, ObservationParameters from gammapy.scripts import CTAPerf # observation parameters alpha = 0.2 * u.Unit('') livetime = 100 * u.h emin = 0.05 * u.TeV emax = 5. * u.TeV obs_param = ObservationParameters(alpha=alpha, livetime=livetime, emin=emin, emax=emax) # target pwl = PowerLaw(amplitude=1e-11 * u.Unit('1/(s cm2 TeV)'), index=2., reference=1. * u.TeV) target = Target(name='bob', model=pwl) # perf dir_south = './CTA-Performance-South-20170323/' perf = CTAPerf.read('$GAMMAPY_EXTRA/datasets/cta/perf_prod2/point_like_non_smoothed/South_5h.fits.gz') # simu obs_id = 0 simu = CTAObservationSimulation.simulate_obs(perf=perf, target=target, obs_param=obs_param, obs_id=obs_id) # simu output print(simu) stats = simu.stats_table() print(stats[['energy_min', 'energy_max', 'background', 'excess', 'sigma']]) simu.write(outdir='./', use_sherpa=True) from sherpa.astro.datastack import DataStack import sherpa.astro.datastack as sh from sherpa_model import Absorption simu_output_dir = './' filename = simu_output_dir + 'pha_obs' + str(obs_id) + '.fits' sh.load_data(id=obs_id, filename=filename) sherpa_model = 'powlaw1d.pwl' sh.set_source(obs_id, sherpa_model) pwl.ampl = 1.e-12 * u.Unit('1/(s cm2 TeV)').to('1/(s cm2 keV)') pwl.ref = 1 * u.TeV.to('keV') pwl.gamma = 2. sh.notice(emin.to('keV')*0.99,emax.to('keV')*1.01) print('JLK: threshold for spectra: ({},{})'.format(emin.to('keV')*0.99,emax.to('keV')*1.01)) sh.set_stat('WStat') sh.fit() sh.conf() ``` which results in weird NaN values and implies problem for the fit: ``` Dataset = 0 Method = levmar Statistic = wstat Initial fit statistic = nan Final fit statistic = nan at function evaluation 33 Data points = 12 Degrees of freedom = 10 Reduced statistic = nan Change in statistic = nan pwl.gamma 2 pwl.ampl 1e-21 ``` Do you have any idea what could cause that? OGIP format writting or something like that? Thanks in advance ++ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `gammapy/scripts/cta_utils.py` Content: ``` 1 from __future__ import absolute_import, division, print_function, unicode_literals 2 import numpy as np 3 import astropy.units as u 4 from ..spectrum import SpectrumObservation 5 from ..spectrum.utils import CountsPredictor 6 from ..spectrum.core import PHACountsSpectrum 7 from ..utils.random import get_random_state 8 9 __all__ = [ 10 'Target', 11 'ObservationParameters', 12 'CTAObservationSimulation', 13 ] 14 15 16 class Target(object): 17 """Observation target information. 18 19 Parameters 20 ---------- 21 name : `str` 22 Name of the source 23 model : `~gammapy.spectrum.models.SpectralModel` 24 Model of the source 25 """ 26 27 def __init__(self, name=None, 28 model=None): 29 self.name = name 30 self.model = model 31 32 def __str__(self): 33 """Target report (`str`).""" 34 ss = '*** Target parameters ***\n' 35 ss += 'Name={}\n'.format(self.name) 36 for par in self.model.parameters.parameters: 37 ss += '{}={} {}\n'.format(par.name, str(par.value), par.unit) 38 return ss 39 40 def from_fermi_lat_catalogue(name): 41 raise NotImplementedError 42 43 44 class ObservationParameters(object): 45 """Container for observation parameters. 46 47 Parameters 48 ---------- 49 alpha : `~astropy.units.Quantity` 50 Normalisation between ON and OFF regions 51 livetime : `~astropy.units.Quantity` 52 Observation time 53 emin : `~astropy.units.Quantity` 54 Minimal energy for simulation 55 emax : `~astropy.units.Quantity` 56 Maximal energy for simulation 57 """ 58 59 def __init__(self, alpha=None, livetime=None, 60 emin=None, emax=None): 61 self.alpha = alpha 62 self.livetime = livetime 63 self.emin = emin 64 self.emax = emax 65 66 def __str__(self): 67 """Observation summary report (`str`).""" 68 ss = '*** Observation parameters summary ***\n' 69 ss += 'alpha={} [{}]\n'.format(self.alpha.value, self.alpha.unit) 70 ss += 'livetime={} [{}]\n'.format(self.livetime.value, 71 self.livetime.unit) 72 ss += 'emin={} [{}]\n'.format(self.emin.value, self.emin.unit) 73 ss += 'emax={} [{}]\n'.format(self.emax.value, self.emax.unit) 74 return ss 75 76 77 class CTAObservationSimulation(object): 78 """Simulate observation for one IRF and target. 79 80 TODO : Should be merge with `~gammapy.spectrum.SpectrumSimulation` 81 82 Parameters 83 ---------- 84 perf : `~gammapy.scripts.CTAPerf` 85 CTA performance 86 target : `~gammapy.scripts.Target` 87 Source 88 """ 89 90 @staticmethod 91 def simulate_obs(perf, target, obs_param, obs_id=0): 92 """ 93 Simulate observation with given parameters 94 95 Parameters 96 ---------- 97 perf : `~gammapy.scripts.CTAPerf` 98 CTA performance 99 target : `~gammapy.scripts.Target` 100 Source 101 obs_param : `~gammapy.scripts.ObservationParameters` 102 Observation parameters 103 obs_id : `int`, optional 104 Observation Id 105 """ 106 livetime = obs_param.livetime 107 alpha = obs_param.alpha.value 108 emin = obs_param.emin 109 emax = obs_param.emax 110 111 model = target.model 112 113 # Compute expected counts 114 reco_energy = perf.bkg.energy 115 bkg_rate_values = perf.bkg.data.data * livetime.to('s') 116 predicted_counts = CountsPredictor(model=model, 117 aeff=perf.aeff, 118 livetime=livetime, 119 edisp=perf.rmf) 120 predicted_counts.run() 121 npred = predicted_counts.npred 122 # set negative values to zero (interpolation issue) 123 idx = np.where(npred.data.data < 0.) 124 npred.data.data[idx] = 0 125 126 # Randomise counts 127 rand = get_random_state('random-seed') 128 on_counts = rand.poisson(npred.data.data.value) # excess 129 bkg_counts = rand.poisson(bkg_rate_values.value) # bkg in ON region 130 off_counts = rand.poisson( 131 bkg_rate_values.value / alpha) # bkg in OFF region 132 133 on_counts += bkg_counts # evts in ON region 134 135 meta = dict(EXPOSURE=livetime.to('s').value) 136 137 on_vector = PHACountsSpectrum( 138 data=on_counts, 139 backscal=1, 140 energy_lo=reco_energy.lo, 141 energy_hi=reco_energy.hi, 142 meta=meta, 143 ) 144 145 off_vector = PHACountsSpectrum(energy_lo=reco_energy.lo, 146 energy_hi=reco_energy.hi, 147 data=off_counts, 148 backscal=1. / alpha, 149 is_bkg=True, 150 ) 151 152 obs = SpectrumObservation(on_vector=on_vector, 153 off_vector=off_vector, 154 aeff=perf.aeff, 155 edisp=perf.rmf) 156 obs.obs_id = obs_id 157 158 # Set threshold according to the closest energy reco from bkg bins 159 idx_min = np.abs(reco_energy.lo - emin).argmin() 160 idx_max = np.abs(reco_energy.lo - emax).argmin() 161 obs.lo_threshold = reco_energy.lo[idx_min] 162 obs.hi_threshold = reco_energy.lo[idx_max] 163 164 return obs 165 166 @staticmethod 167 def plot_simu(simu, target): 168 import matplotlib.pyplot as plt 169 fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, 170 figsize=(10, 5)) 171 172 # Spectrum plot 173 energy_range = [0.01 * u.TeV, 100 * u.TeV] 174 target.model.plot(ax=ax1, energy_range=energy_range, 175 label='Model') 176 plt.text(0.55, 0.65, target.__str__(), 177 style='italic', transform=ax1.transAxes, fontsize=7, 178 bbox={'facecolor': 'white', 'alpha': 1, 'pad': 10}) 179 ax1.set_xlim([energy_range[0].value, energy_range[1].value]) 180 ax1.set_ylim(1.e-17, 1.e-5) 181 ax1.grid(which='both') 182 ax1.legend(loc=0) 183 184 # Counts plot 185 on_off = simu.on_vector.data.data.value 186 off = 1. / simu.off_vector.backscal * simu.off_vector.data.data.value 187 excess = on_off - off 188 bins = simu.on_vector.energy.lo.value 189 x = simu.on_vector.energy.nodes.value 190 ax2.hist(x, bins=bins, weights=on_off, 191 facecolor='blue', alpha=1, label='ON') 192 ax2.hist(x, bins=bins, weights=off, 193 facecolor='green', alpha=1, label='OFF') 194 ax2.hist(x, bins=bins, weights=excess, 195 facecolor='red', alpha=1, label='EXCESS') 196 ax2.legend(loc='best') 197 ax2.set_xscale('log') 198 ax2.set_xlabel('Energy [TeV]') 199 ax2.set_ylabel('Expected counts') 200 ax2.set_xlim([energy_range[0].value, energy_range[1].value]) 201 ax2.set_ylim([0.0001, on_off.max() * (1 + 0.05)]) 202 ax2.vlines(simu.lo_threshold.value, 0, 1.1 * on_off.max(), 203 linestyles='dashed') 204 ax2.grid(which='both') 205 plt.text(0.55, 0.05, simu.__str__(), 206 style='italic', transform=ax2.transAxes, fontsize=7, 207 bbox={'facecolor': 'white', 'alpha': 1, 'pad': 10}) 208 plt.tight_layout() 209 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/gammapy/scripts/cta_utils.py b/gammapy/scripts/cta_utils.py --- a/gammapy/scripts/cta_utils.py +++ b/gammapy/scripts/cta_utils.py @@ -1,4 +1,4 @@ -from __future__ import absolute_import, division, print_function, unicode_literals + import numpy as np import astropy.units as u from ..spectrum import SpectrumObservation @@ -132,22 +132,21 @@ on_counts += bkg_counts # evts in ON region - meta = dict(EXPOSURE=livetime.to('s').value) - on_vector = PHACountsSpectrum( data=on_counts, backscal=1, energy_lo=reco_energy.lo, energy_hi=reco_energy.hi, - meta=meta, ) + on_vector.livetime = livetime off_vector = PHACountsSpectrum(energy_lo=reco_energy.lo, energy_hi=reco_energy.hi, data=off_counts, backscal=1. / alpha, is_bkg=True, ) + off_vector.livetime = livetime obs = SpectrumObservation(on_vector=on_vector, off_vector=off_vector,
{"golden_diff": "diff --git a/gammapy/scripts/cta_utils.py b/gammapy/scripts/cta_utils.py\n--- a/gammapy/scripts/cta_utils.py\n+++ b/gammapy/scripts/cta_utils.py\n@@ -1,4 +1,4 @@\n-from __future__ import absolute_import, division, print_function, unicode_literals\n+\n import numpy as np\n import astropy.units as u\n from ..spectrum import SpectrumObservation\n@@ -132,22 +132,21 @@\n \n on_counts += bkg_counts # evts in ON region\n \n- meta = dict(EXPOSURE=livetime.to('s').value)\n-\n on_vector = PHACountsSpectrum(\n data=on_counts,\n backscal=1,\n energy_lo=reco_energy.lo,\n energy_hi=reco_energy.hi,\n- meta=meta,\n )\n \n+ on_vector.livetime = livetime\n off_vector = PHACountsSpectrum(energy_lo=reco_energy.lo,\n energy_hi=reco_energy.hi,\n data=off_counts,\n backscal=1. / alpha,\n is_bkg=True,\n )\n+ off_vector.livetime = livetime\n \n obs = SpectrumObservation(on_vector=on_vector,\n off_vector=off_vector,\n", "issue": "Problem of fit with sherpa\nHi @cdeil and @joleroi , \r\nI've got some problem to do a spectral fit with sherpa on CTA simulated data. I don't really understand why it's happening. Everything worked fine ~2 weeks ago.\r\n\r\nHere is a small example to illustrate the problem: \r\n```\r\nimport astropy.units as u\r\n\r\nfrom gammapy.spectrum.models import PowerLaw\r\nfrom gammapy.scripts.cta_utils import CTAObservationSimulation, Target, ObservationParameters\r\n\r\nfrom gammapy.scripts import CTAPerf\r\n\r\n# observation parameters\r\nalpha = 0.2 * u.Unit('')\r\nlivetime = 100 * u.h\r\nemin = 0.05 * u.TeV\r\nemax = 5. * u.TeV\r\nobs_param = ObservationParameters(alpha=alpha,\r\n livetime=livetime,\r\n emin=emin,\r\n emax=emax)\r\n\r\n# target\r\npwl = PowerLaw(amplitude=1e-11 * u.Unit('1/(s cm2 TeV)'),\r\n index=2.,\r\n reference=1. * u.TeV)\r\ntarget = Target(name='bob', model=pwl)\r\n\r\n# perf\r\ndir_south = './CTA-Performance-South-20170323/'\r\nperf = CTAPerf.read('$GAMMAPY_EXTRA/datasets/cta/perf_prod2/point_like_non_smoothed/South_5h.fits.gz')\r\n\r\n# simu\r\nobs_id = 0\r\nsimu = CTAObservationSimulation.simulate_obs(perf=perf,\r\n target=target,\r\n obs_param=obs_param,\r\n obs_id=obs_id)\r\n\r\n# simu output\r\nprint(simu)\r\nstats = simu.stats_table()\r\nprint(stats[['energy_min', 'energy_max', 'background', 'excess', 'sigma']])\r\nsimu.write(outdir='./', use_sherpa=True)\r\n\r\n\r\nfrom sherpa.astro.datastack import DataStack\r\nimport sherpa.astro.datastack as sh\r\nfrom sherpa_model import Absorption\r\n\r\nsimu_output_dir = './'\r\nfilename = simu_output_dir + 'pha_obs' + str(obs_id) + '.fits'\r\nsh.load_data(id=obs_id, filename=filename)\r\nsherpa_model = 'powlaw1d.pwl'\r\nsh.set_source(obs_id, sherpa_model)\r\n \r\npwl.ampl = 1.e-12 * u.Unit('1/(s cm2 TeV)').to('1/(s cm2 keV)')\r\npwl.ref = 1 * u.TeV.to('keV')\r\npwl.gamma = 2.\r\n\r\nsh.notice(emin.to('keV')*0.99,emax.to('keV')*1.01)\r\nprint('JLK: threshold for spectra: ({},{})'.format(emin.to('keV')*0.99,emax.to('keV')*1.01))\r\n\r\nsh.set_stat('WStat')\r\nsh.fit()\r\nsh.conf()\r\n```\r\n\r\nwhich results in weird NaN values and implies problem for the fit:\r\n```\r\nDataset = 0\r\nMethod = levmar\r\nStatistic = wstat\r\nInitial fit statistic = nan\r\nFinal fit statistic = nan at function evaluation 33\r\nData points = 12\r\nDegrees of freedom = 10\r\nReduced statistic = nan\r\nChange in statistic = nan\r\n pwl.gamma 2 \r\n pwl.ampl 1e-21 \r\n```\r\n \r\nDo you have any idea what could cause that? OGIP format writting or something like that?\r\n\r\nThanks in advance ++\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function, unicode_literals\nimport numpy as np\nimport astropy.units as u\nfrom ..spectrum import SpectrumObservation\nfrom ..spectrum.utils import CountsPredictor\nfrom ..spectrum.core import PHACountsSpectrum\nfrom ..utils.random import get_random_state\n\n__all__ = [\n 'Target',\n 'ObservationParameters',\n 'CTAObservationSimulation',\n]\n\n\nclass Target(object):\n \"\"\"Observation target information.\n\n Parameters\n ----------\n name : `str`\n Name of the source\n model : `~gammapy.spectrum.models.SpectralModel`\n Model of the source\n \"\"\"\n\n def __init__(self, name=None,\n model=None):\n self.name = name\n self.model = model\n\n def __str__(self):\n \"\"\"Target report (`str`).\"\"\"\n ss = '*** Target parameters ***\\n'\n ss += 'Name={}\\n'.format(self.name)\n for par in self.model.parameters.parameters:\n ss += '{}={} {}\\n'.format(par.name, str(par.value), par.unit)\n return ss\n\n def from_fermi_lat_catalogue(name):\n raise NotImplementedError\n\n\nclass ObservationParameters(object):\n \"\"\"Container for observation parameters.\n\n Parameters\n ----------\n alpha : `~astropy.units.Quantity`\n Normalisation between ON and OFF regions\n livetime : `~astropy.units.Quantity`\n Observation time\n emin : `~astropy.units.Quantity`\n Minimal energy for simulation\n emax : `~astropy.units.Quantity`\n Maximal energy for simulation\n \"\"\"\n\n def __init__(self, alpha=None, livetime=None,\n emin=None, emax=None):\n self.alpha = alpha\n self.livetime = livetime\n self.emin = emin\n self.emax = emax\n\n def __str__(self):\n \"\"\"Observation summary report (`str`).\"\"\"\n ss = '*** Observation parameters summary ***\\n'\n ss += 'alpha={} [{}]\\n'.format(self.alpha.value, self.alpha.unit)\n ss += 'livetime={} [{}]\\n'.format(self.livetime.value,\n self.livetime.unit)\n ss += 'emin={} [{}]\\n'.format(self.emin.value, self.emin.unit)\n ss += 'emax={} [{}]\\n'.format(self.emax.value, self.emax.unit)\n return ss\n\n\nclass CTAObservationSimulation(object):\n \"\"\"Simulate observation for one IRF and target.\n\n TODO : Should be merge with `~gammapy.spectrum.SpectrumSimulation`\n\n Parameters\n ----------\n perf : `~gammapy.scripts.CTAPerf`\n CTA performance\n target : `~gammapy.scripts.Target`\n Source\n \"\"\"\n\n @staticmethod\n def simulate_obs(perf, target, obs_param, obs_id=0):\n \"\"\"\n Simulate observation with given parameters\n\n Parameters\n ----------\n perf : `~gammapy.scripts.CTAPerf`\n CTA performance\n target : `~gammapy.scripts.Target`\n Source\n obs_param : `~gammapy.scripts.ObservationParameters`\n Observation parameters\n obs_id : `int`, optional\n Observation Id\n \"\"\"\n livetime = obs_param.livetime\n alpha = obs_param.alpha.value\n emin = obs_param.emin\n emax = obs_param.emax\n\n model = target.model\n\n # Compute expected counts\n reco_energy = perf.bkg.energy\n bkg_rate_values = perf.bkg.data.data * livetime.to('s')\n predicted_counts = CountsPredictor(model=model,\n aeff=perf.aeff,\n livetime=livetime,\n edisp=perf.rmf)\n predicted_counts.run()\n npred = predicted_counts.npred\n # set negative values to zero (interpolation issue)\n idx = np.where(npred.data.data < 0.)\n npred.data.data[idx] = 0\n\n # Randomise counts\n rand = get_random_state('random-seed')\n on_counts = rand.poisson(npred.data.data.value) # excess\n bkg_counts = rand.poisson(bkg_rate_values.value) # bkg in ON region\n off_counts = rand.poisson(\n bkg_rate_values.value / alpha) # bkg in OFF region\n\n on_counts += bkg_counts # evts in ON region\n\n meta = dict(EXPOSURE=livetime.to('s').value)\n\n on_vector = PHACountsSpectrum(\n data=on_counts,\n backscal=1,\n energy_lo=reco_energy.lo,\n energy_hi=reco_energy.hi,\n meta=meta,\n )\n\n off_vector = PHACountsSpectrum(energy_lo=reco_energy.lo,\n energy_hi=reco_energy.hi,\n data=off_counts,\n backscal=1. / alpha,\n is_bkg=True,\n )\n\n obs = SpectrumObservation(on_vector=on_vector,\n off_vector=off_vector,\n aeff=perf.aeff,\n edisp=perf.rmf)\n obs.obs_id = obs_id\n\n # Set threshold according to the closest energy reco from bkg bins\n idx_min = np.abs(reco_energy.lo - emin).argmin()\n idx_max = np.abs(reco_energy.lo - emax).argmin()\n obs.lo_threshold = reco_energy.lo[idx_min]\n obs.hi_threshold = reco_energy.lo[idx_max]\n\n return obs\n\n @staticmethod\n def plot_simu(simu, target):\n import matplotlib.pyplot as plt\n fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2,\n figsize=(10, 5))\n\n # Spectrum plot\n energy_range = [0.01 * u.TeV, 100 * u.TeV]\n target.model.plot(ax=ax1, energy_range=energy_range,\n label='Model')\n plt.text(0.55, 0.65, target.__str__(),\n style='italic', transform=ax1.transAxes, fontsize=7,\n bbox={'facecolor': 'white', 'alpha': 1, 'pad': 10})\n ax1.set_xlim([energy_range[0].value, energy_range[1].value])\n ax1.set_ylim(1.e-17, 1.e-5)\n ax1.grid(which='both')\n ax1.legend(loc=0)\n\n # Counts plot\n on_off = simu.on_vector.data.data.value\n off = 1. / simu.off_vector.backscal * simu.off_vector.data.data.value\n excess = on_off - off\n bins = simu.on_vector.energy.lo.value\n x = simu.on_vector.energy.nodes.value\n ax2.hist(x, bins=bins, weights=on_off,\n facecolor='blue', alpha=1, label='ON')\n ax2.hist(x, bins=bins, weights=off,\n facecolor='green', alpha=1, label='OFF')\n ax2.hist(x, bins=bins, weights=excess,\n facecolor='red', alpha=1, label='EXCESS')\n ax2.legend(loc='best')\n ax2.set_xscale('log')\n ax2.set_xlabel('Energy [TeV]')\n ax2.set_ylabel('Expected counts')\n ax2.set_xlim([energy_range[0].value, energy_range[1].value])\n ax2.set_ylim([0.0001, on_off.max() * (1 + 0.05)])\n ax2.vlines(simu.lo_threshold.value, 0, 1.1 * on_off.max(),\n linestyles='dashed')\n ax2.grid(which='both')\n plt.text(0.55, 0.05, simu.__str__(),\n style='italic', transform=ax2.transAxes, fontsize=7,\n bbox={'facecolor': 'white', 'alpha': 1, 'pad': 10})\n plt.tight_layout()\n", "path": "gammapy/scripts/cta_utils.py"}], "after_files": [{"content": "\nimport numpy as np\nimport astropy.units as u\nfrom ..spectrum import SpectrumObservation\nfrom ..spectrum.utils import CountsPredictor\nfrom ..spectrum.core import PHACountsSpectrum\nfrom ..utils.random import get_random_state\n\n__all__ = [\n 'Target',\n 'ObservationParameters',\n 'CTAObservationSimulation',\n]\n\n\nclass Target(object):\n \"\"\"Observation target information.\n\n Parameters\n ----------\n name : `str`\n Name of the source\n model : `~gammapy.spectrum.models.SpectralModel`\n Model of the source\n \"\"\"\n\n def __init__(self, name=None,\n model=None):\n self.name = name\n self.model = model\n\n def __str__(self):\n \"\"\"Target report (`str`).\"\"\"\n ss = '*** Target parameters ***\\n'\n ss += 'Name={}\\n'.format(self.name)\n for par in self.model.parameters.parameters:\n ss += '{}={} {}\\n'.format(par.name, str(par.value), par.unit)\n return ss\n\n def from_fermi_lat_catalogue(name):\n raise NotImplementedError\n\n\nclass ObservationParameters(object):\n \"\"\"Container for observation parameters.\n\n Parameters\n ----------\n alpha : `~astropy.units.Quantity`\n Normalisation between ON and OFF regions\n livetime : `~astropy.units.Quantity`\n Observation time\n emin : `~astropy.units.Quantity`\n Minimal energy for simulation\n emax : `~astropy.units.Quantity`\n Maximal energy for simulation\n \"\"\"\n\n def __init__(self, alpha=None, livetime=None,\n emin=None, emax=None):\n self.alpha = alpha\n self.livetime = livetime\n self.emin = emin\n self.emax = emax\n\n def __str__(self):\n \"\"\"Observation summary report (`str`).\"\"\"\n ss = '*** Observation parameters summary ***\\n'\n ss += 'alpha={} [{}]\\n'.format(self.alpha.value, self.alpha.unit)\n ss += 'livetime={} [{}]\\n'.format(self.livetime.value,\n self.livetime.unit)\n ss += 'emin={} [{}]\\n'.format(self.emin.value, self.emin.unit)\n ss += 'emax={} [{}]\\n'.format(self.emax.value, self.emax.unit)\n return ss\n\n\nclass CTAObservationSimulation(object):\n \"\"\"Simulate observation for one IRF and target.\n\n TODO : Should be merge with `~gammapy.spectrum.SpectrumSimulation`\n\n Parameters\n ----------\n perf : `~gammapy.scripts.CTAPerf`\n CTA performance\n target : `~gammapy.scripts.Target`\n Source\n \"\"\"\n\n @staticmethod\n def simulate_obs(perf, target, obs_param, obs_id=0):\n \"\"\"\n Simulate observation with given parameters\n\n Parameters\n ----------\n perf : `~gammapy.scripts.CTAPerf`\n CTA performance\n target : `~gammapy.scripts.Target`\n Source\n obs_param : `~gammapy.scripts.ObservationParameters`\n Observation parameters\n obs_id : `int`, optional\n Observation Id\n \"\"\"\n livetime = obs_param.livetime\n alpha = obs_param.alpha.value\n emin = obs_param.emin\n emax = obs_param.emax\n\n model = target.model\n\n # Compute expected counts\n reco_energy = perf.bkg.energy\n bkg_rate_values = perf.bkg.data.data * livetime.to('s')\n predicted_counts = CountsPredictor(model=model,\n aeff=perf.aeff,\n livetime=livetime,\n edisp=perf.rmf)\n predicted_counts.run()\n npred = predicted_counts.npred\n # set negative values to zero (interpolation issue)\n idx = np.where(npred.data.data < 0.)\n npred.data.data[idx] = 0\n\n # Randomise counts\n rand = get_random_state('random-seed')\n on_counts = rand.poisson(npred.data.data.value) # excess\n bkg_counts = rand.poisson(bkg_rate_values.value) # bkg in ON region\n off_counts = rand.poisson(\n bkg_rate_values.value / alpha) # bkg in OFF region\n\n on_counts += bkg_counts # evts in ON region\n\n on_vector = PHACountsSpectrum(\n data=on_counts,\n backscal=1,\n energy_lo=reco_energy.lo,\n energy_hi=reco_energy.hi,\n )\n\n on_vector.livetime = livetime\n off_vector = PHACountsSpectrum(energy_lo=reco_energy.lo,\n energy_hi=reco_energy.hi,\n data=off_counts,\n backscal=1. / alpha,\n is_bkg=True,\n )\n off_vector.livetime = livetime\n\n obs = SpectrumObservation(on_vector=on_vector,\n off_vector=off_vector,\n aeff=perf.aeff,\n edisp=perf.rmf)\n obs.obs_id = obs_id\n\n # Set threshold according to the closest energy reco from bkg bins\n idx_min = np.abs(reco_energy.lo - emin).argmin()\n idx_max = np.abs(reco_energy.lo - emax).argmin()\n obs.lo_threshold = reco_energy.lo[idx_min]\n obs.hi_threshold = reco_energy.lo[idx_max]\n\n return obs\n\n @staticmethod\n def plot_simu(simu, target):\n import matplotlib.pyplot as plt\n fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2,\n figsize=(10, 5))\n\n # Spectrum plot\n energy_range = [0.01 * u.TeV, 100 * u.TeV]\n target.model.plot(ax=ax1, energy_range=energy_range,\n label='Model')\n plt.text(0.55, 0.65, target.__str__(),\n style='italic', transform=ax1.transAxes, fontsize=7,\n bbox={'facecolor': 'white', 'alpha': 1, 'pad': 10})\n ax1.set_xlim([energy_range[0].value, energy_range[1].value])\n ax1.set_ylim(1.e-17, 1.e-5)\n ax1.grid(which='both')\n ax1.legend(loc=0)\n\n # Counts plot\n on_off = simu.on_vector.data.data.value\n off = 1. / simu.off_vector.backscal * simu.off_vector.data.data.value\n excess = on_off - off\n bins = simu.on_vector.energy.lo.value\n x = simu.on_vector.energy.nodes.value\n ax2.hist(x, bins=bins, weights=on_off,\n facecolor='blue', alpha=1, label='ON')\n ax2.hist(x, bins=bins, weights=off,\n facecolor='green', alpha=1, label='OFF')\n ax2.hist(x, bins=bins, weights=excess,\n facecolor='red', alpha=1, label='EXCESS')\n ax2.legend(loc='best')\n ax2.set_xscale('log')\n ax2.set_xlabel('Energy [TeV]')\n ax2.set_ylabel('Expected counts')\n ax2.set_xlim([energy_range[0].value, energy_range[1].value])\n ax2.set_ylim([0.0001, on_off.max() * (1 + 0.05)])\n ax2.vlines(simu.lo_threshold.value, 0, 1.1 * on_off.max(),\n linestyles='dashed')\n ax2.grid(which='both')\n plt.text(0.55, 0.05, simu.__str__(),\n style='italic', transform=ax2.transAxes, fontsize=7,\n bbox={'facecolor': 'white', 'alpha': 1, 'pad': 10})\n plt.tight_layout()\n", "path": "gammapy/scripts/cta_utils.py"}]}
3,332
286
gh_patches_debug_11877
rasdani/github-patches
git_diff
CTFd__CTFd-1048
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- import will crash ctfd <!-- If this is a bug report please fill out the template below. If this is a feature request please describe the behavior that you'd like to see. --> **Environment**: - CTFd Version/Commit: 2.1.3 - Operating System: ubuntu 18.04 - Web Browser and Version: Opera 60.0.3255.170 **What happened?** trying to import db (zip file) **What did you expect to happen?** it would import db (zip file) **How to reproduce your issue** **Any associated stack traces or error logs** Failed to disable foreign key checks. Continuing. Error: No support for ALTER of constraints in SQLite dialect I believe it's Alembic fault --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `migrations/versions/b5551cd26764_add_captain_column_to_teams.py` Content: ``` 1 """Add captain column to Teams 2 3 Revision ID: b5551cd26764 4 Revises: 4e4d5a9ea000 5 Create Date: 2019-04-12 00:29:08.021141 6 7 """ 8 from CTFd.models import db 9 from alembic import op 10 import sqlalchemy as sa 11 from sqlalchemy.sql import text, table, column, and_ 12 13 # revision identifiers, used by Alembic. 14 revision = 'b5551cd26764' 15 down_revision = '4e4d5a9ea000' 16 branch_labels = None 17 depends_on = None 18 19 teams_table = table('teams', 20 column('id', db.Integer), 21 column('captain_id', db.Integer), 22 ) 23 24 users_table = table('users', 25 column('id', db.Integer), 26 column('team_id', db.Integer), 27 ) 28 29 30 def upgrade(): 31 # ### commands auto generated by Alembic - please adjust! ### 32 op.add_column('teams', sa.Column('captain_id', sa.Integer(), nullable=True)) 33 op.create_foreign_key('team_captain_id', 'teams', 'users', ['captain_id'], ['id']) 34 35 connection = op.get_bind() 36 for team in connection.execute(teams_table.select()): 37 users = connection.execute( 38 users_table.select().where(users_table.c.team_id == team.id).order_by(users_table.c.id).limit(1) 39 ) 40 for user in users: 41 connection.execute( 42 teams_table.update().where( 43 teams_table.c.id == team.id 44 ).values( 45 captain_id=user.id 46 ) 47 ) 48 # ### end Alembic commands ### 49 50 51 def downgrade(): 52 # ### commands auto generated by Alembic - please adjust! ### 53 op.drop_constraint('team_captain_id', 'teams', type_='foreignkey') 54 op.drop_column('teams', 'captain_id') 55 # ### end Alembic commands ### 56 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/migrations/versions/b5551cd26764_add_captain_column_to_teams.py b/migrations/versions/b5551cd26764_add_captain_column_to_teams.py --- a/migrations/versions/b5551cd26764_add_captain_column_to_teams.py +++ b/migrations/versions/b5551cd26764_add_captain_column_to_teams.py @@ -30,7 +30,11 @@ def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.add_column('teams', sa.Column('captain_id', sa.Integer(), nullable=True)) - op.create_foreign_key('team_captain_id', 'teams', 'users', ['captain_id'], ['id']) + + bind = op.get_bind() + url = str(bind.engine.url) + if url.startswith('sqlite') is False: + op.create_foreign_key('team_captain_id', 'teams', 'users', ['captain_id'], ['id']) connection = op.get_bind() for team in connection.execute(teams_table.select()):
{"golden_diff": "diff --git a/migrations/versions/b5551cd26764_add_captain_column_to_teams.py b/migrations/versions/b5551cd26764_add_captain_column_to_teams.py\n--- a/migrations/versions/b5551cd26764_add_captain_column_to_teams.py\n+++ b/migrations/versions/b5551cd26764_add_captain_column_to_teams.py\n@@ -30,7 +30,11 @@\n def upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('teams', sa.Column('captain_id', sa.Integer(), nullable=True))\n- op.create_foreign_key('team_captain_id', 'teams', 'users', ['captain_id'], ['id'])\n+\n+ bind = op.get_bind()\n+ url = str(bind.engine.url)\n+ if url.startswith('sqlite') is False:\n+ op.create_foreign_key('team_captain_id', 'teams', 'users', ['captain_id'], ['id'])\n \n connection = op.get_bind()\n for team in connection.execute(teams_table.select()):\n", "issue": "import will crash ctfd\n<!--\r\nIf this is a bug report please fill out the template below.\r\n\r\nIf this is a feature request please describe the behavior that you'd like to see.\r\n-->\r\n\r\n**Environment**:\r\n\r\n - CTFd Version/Commit: 2.1.3\r\n - Operating System: ubuntu 18.04\r\n - Web Browser and Version: Opera 60.0.3255.170\r\n\r\n**What happened?**\r\ntrying to import db (zip file)\r\n**What did you expect to happen?**\r\nit would import db (zip file)\r\n**How to reproduce your issue**\r\n\r\n**Any associated stack traces or error logs**\r\nFailed to disable foreign key checks. Continuing.\r\nError: No support for ALTER of constraints in SQLite dialect\r\n\r\nI believe it's Alembic fault \n", "before_files": [{"content": "\"\"\"Add captain column to Teams\n\nRevision ID: b5551cd26764\nRevises: 4e4d5a9ea000\nCreate Date: 2019-04-12 00:29:08.021141\n\n\"\"\"\nfrom CTFd.models import db\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.sql import text, table, column, and_\n\n# revision identifiers, used by Alembic.\nrevision = 'b5551cd26764'\ndown_revision = '4e4d5a9ea000'\nbranch_labels = None\ndepends_on = None\n\nteams_table = table('teams',\n column('id', db.Integer),\n column('captain_id', db.Integer),\n)\n\nusers_table = table('users',\n column('id', db.Integer),\n column('team_id', db.Integer),\n)\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('teams', sa.Column('captain_id', sa.Integer(), nullable=True))\n op.create_foreign_key('team_captain_id', 'teams', 'users', ['captain_id'], ['id'])\n\n connection = op.get_bind()\n for team in connection.execute(teams_table.select()):\n users = connection.execute(\n users_table.select().where(users_table.c.team_id == team.id).order_by(users_table.c.id).limit(1)\n )\n for user in users:\n connection.execute(\n teams_table.update().where(\n teams_table.c.id == team.id\n ).values(\n captain_id=user.id\n )\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint('team_captain_id', 'teams', type_='foreignkey')\n op.drop_column('teams', 'captain_id')\n # ### end Alembic commands ###\n", "path": "migrations/versions/b5551cd26764_add_captain_column_to_teams.py"}], "after_files": [{"content": "\"\"\"Add captain column to Teams\n\nRevision ID: b5551cd26764\nRevises: 4e4d5a9ea000\nCreate Date: 2019-04-12 00:29:08.021141\n\n\"\"\"\nfrom CTFd.models import db\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.sql import text, table, column, and_\n\n# revision identifiers, used by Alembic.\nrevision = 'b5551cd26764'\ndown_revision = '4e4d5a9ea000'\nbranch_labels = None\ndepends_on = None\n\nteams_table = table('teams',\n column('id', db.Integer),\n column('captain_id', db.Integer),\n)\n\nusers_table = table('users',\n column('id', db.Integer),\n column('team_id', db.Integer),\n)\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('teams', sa.Column('captain_id', sa.Integer(), nullable=True))\n\n bind = op.get_bind()\n url = str(bind.engine.url)\n if url.startswith('sqlite') is False:\n op.create_foreign_key('team_captain_id', 'teams', 'users', ['captain_id'], ['id'])\n\n connection = op.get_bind()\n for team in connection.execute(teams_table.select()):\n users = connection.execute(\n users_table.select().where(users_table.c.team_id == team.id).order_by(users_table.c.id).limit(1)\n )\n for user in users:\n connection.execute(\n teams_table.update().where(\n teams_table.c.id == team.id\n ).values(\n captain_id=user.id\n )\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint('team_captain_id', 'teams', type_='foreignkey')\n op.drop_column('teams', 'captain_id')\n # ### end Alembic commands ###\n", "path": "migrations/versions/b5551cd26764_add_captain_column_to_teams.py"}]}
991
257
gh_patches_debug_7915
rasdani/github-patches
git_diff
psychopy__psychopy-3457
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Windows seg fault when defaultView set to Runner --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `psychopy/app/pavlovia_ui/menu.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 4 # Part of the PsychoPy library 5 # Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2021 Open Science Tools Ltd. 6 # Distributed under the terms of the GNU General Public License (GPL). 7 8 import wx 9 import requests 10 11 from psychopy import logging 12 from .. import dialogs 13 from .functions import logInPavlovia 14 from psychopy.app.pavlovia_ui.project import syncProject 15 from .search import SearchFrame 16 from .project import ProjectEditor 17 from psychopy.localization import _translate 18 from psychopy.projects import pavlovia 19 20 21 class PavloviaMenu(wx.Menu): 22 app = None 23 appData = None 24 currentUser = None 25 knownUsers = None 26 searchDlg = None 27 28 def __init__(self, parent): 29 wx.Menu.__init__(self) 30 self.parent = parent # type: BuilderFrame 31 PavloviaMenu.app = parent.app 32 keys = self.app.keys 33 # from prefs fetch info about prev usernames and projects 34 PavloviaMenu.appData = self.app.prefs.appData['projects'] 35 36 # item = self.Append(wx.ID_ANY, _translate("Tell me more...")) 37 # parent.Bind(wx.EVT_MENU, self.onAbout, id=item.GetId()) 38 39 PavloviaMenu.knownUsers = pavlovia.knownUsers 40 41 # sub-menu for usernames and login 42 self.userMenu = wx.Menu() 43 # if a user was previously logged in then set them as current 44 lastPavUser = PavloviaMenu.appData['pavloviaUser'] 45 if pavlovia.knownUsers and (lastPavUser not in pavlovia.knownUsers): 46 lastPavUser = None 47 # if lastPavUser and not PavloviaMenu.currentUser: 48 # self.setUser(PavloviaMenu.appData['pavloviaUser']) 49 for name in self.knownUsers: 50 self.addToSubMenu(name, self.userMenu, self.onSetUser) 51 self.userMenu.AppendSeparator() 52 self.loginBtn = self.userMenu.Append(wx.ID_ANY, 53 _translate("Log in to Pavlovia...\t{}") 54 .format(keys['pavlovia_logIn'])) 55 parent.Bind(wx.EVT_MENU, self.onLogInPavlovia, id=self.loginBtn.GetId()) 56 self.AppendSubMenu(self.userMenu, _translate("User")) 57 58 # search 59 self.searchBtn = self.Append(wx.ID_ANY, 60 _translate("Search Pavlovia\t{}") 61 .format(keys['projectsFind'])) 62 parent.Bind(wx.EVT_MENU, self.onSearch, id=self.searchBtn.GetId()) 63 64 # new 65 self.newBtn = self.Append(wx.ID_ANY, 66 _translate("New...\t{}").format(keys['projectsNew'])) 67 parent.Bind(wx.EVT_MENU, self.onNew, id=self.newBtn.GetId()) 68 69 self.syncBtn = self.Append(wx.ID_ANY, 70 _translate("Sync\t{}").format(keys['projectsSync'])) 71 parent.Bind(wx.EVT_MENU, self.onSync, id=self.syncBtn.GetId()) 72 73 def addToSubMenu(self, name, menu, function): 74 item = menu.Append(wx.ID_ANY, name) 75 self.parent.Bind(wx.EVT_MENU, function, id=item.GetId()) 76 77 def onAbout(self, event): 78 wx.GetApp().followLink(event) 79 80 def onSetUser(self, event): 81 user = self.userMenu.GetLabelText(event.GetId()) 82 self.setUser(user) 83 84 def setUser(self, user=None): 85 86 if user is None and PavloviaMenu.appData['pavloviaUser']: 87 user = PavloviaMenu.appData['pavloviaUser'] 88 89 if user in [PavloviaMenu.currentUser, None]: 90 return # nothing to do here. Move along please. 91 92 PavloviaMenu.currentUser = user 93 PavloviaMenu.appData['pavloviaUser'] = user 94 if user in pavlovia.knownUsers: 95 token = pavlovia.knownUsers[user]['token'] 96 try: 97 pavlovia.getCurrentSession().setToken(token) 98 except requests.exceptions.ConnectionError: 99 logging.warning("Tried to log in to Pavlovia but no network " 100 "connection") 101 return 102 else: 103 if hasattr(self, 'onLogInPavlovia'): 104 self.onLogInPavlovia() 105 106 if PavloviaMenu.searchDlg: 107 PavloviaMenu.searchDlg.updateUserProjs() 108 109 def onSync(self, event): 110 retVal = syncProject(parent=self.parent, project=self.parent.project) 111 if hasattr(self.parent, 'gitFeedback'): 112 self.parent.gitFeedback(retVal) 113 114 def onSearch(self, event): 115 PavloviaMenu.searchDlg = SearchFrame(app=self.parent.app) 116 PavloviaMenu.searchDlg.Show() 117 118 def onLogInPavlovia(self, event=None): 119 logInPavlovia(parent=self.parent) 120 121 def onNew(self, event): 122 """Create a new project 123 """ 124 if pavlovia.getCurrentSession().user.username: 125 projEditor = ProjectEditor() 126 if projEditor.ShowModal() == wx.ID_OK: 127 self.parent.project = projEditor.project 128 # do a first sync as well 129 retVal = syncProject(parent=self.parent, project=projEditor.project) 130 self.parent.gitFeedback(retVal) 131 else: 132 infoDlg = dialogs.MessageDialog(parent=None, type='Info', 133 message=_translate( 134 "You need to log in" 135 " to create a project")) 136 infoDlg.Show() 137 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/psychopy/app/pavlovia_ui/menu.py b/psychopy/app/pavlovia_ui/menu.py --- a/psychopy/app/pavlovia_ui/menu.py +++ b/psychopy/app/pavlovia_ui/menu.py @@ -82,9 +82,9 @@ self.setUser(user) def setUser(self, user=None): - - if user is None and PavloviaMenu.appData['pavloviaUser']: - user = PavloviaMenu.appData['pavloviaUser'] + if PavloviaMenu.appData: + if user is None and PavloviaMenu.appData['pavloviaUser']: + user = PavloviaMenu.appData['pavloviaUser'] if user in [PavloviaMenu.currentUser, None]: return # nothing to do here. Move along please.
{"golden_diff": "diff --git a/psychopy/app/pavlovia_ui/menu.py b/psychopy/app/pavlovia_ui/menu.py\n--- a/psychopy/app/pavlovia_ui/menu.py\n+++ b/psychopy/app/pavlovia_ui/menu.py\n@@ -82,9 +82,9 @@\n self.setUser(user)\n \n def setUser(self, user=None):\n-\n- if user is None and PavloviaMenu.appData['pavloviaUser']:\n- user = PavloviaMenu.appData['pavloviaUser']\n+ if PavloviaMenu.appData:\n+ if user is None and PavloviaMenu.appData['pavloviaUser']:\n+ user = PavloviaMenu.appData['pavloviaUser']\n \n if user in [PavloviaMenu.currentUser, None]:\n return # nothing to do here. Move along please.\n", "issue": "Windows seg fault when defaultView set to Runner\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Part of the PsychoPy library\n# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2021 Open Science Tools Ltd.\n# Distributed under the terms of the GNU General Public License (GPL).\n\nimport wx\nimport requests\n\nfrom psychopy import logging\nfrom .. import dialogs\nfrom .functions import logInPavlovia\nfrom psychopy.app.pavlovia_ui.project import syncProject\nfrom .search import SearchFrame\nfrom .project import ProjectEditor\nfrom psychopy.localization import _translate\nfrom psychopy.projects import pavlovia\n\n\nclass PavloviaMenu(wx.Menu):\n app = None\n appData = None\n currentUser = None\n knownUsers = None\n searchDlg = None\n\n def __init__(self, parent):\n wx.Menu.__init__(self)\n self.parent = parent # type: BuilderFrame\n PavloviaMenu.app = parent.app\n keys = self.app.keys\n # from prefs fetch info about prev usernames and projects\n PavloviaMenu.appData = self.app.prefs.appData['projects']\n\n # item = self.Append(wx.ID_ANY, _translate(\"Tell me more...\"))\n # parent.Bind(wx.EVT_MENU, self.onAbout, id=item.GetId())\n\n PavloviaMenu.knownUsers = pavlovia.knownUsers\n\n # sub-menu for usernames and login\n self.userMenu = wx.Menu()\n # if a user was previously logged in then set them as current\n lastPavUser = PavloviaMenu.appData['pavloviaUser']\n if pavlovia.knownUsers and (lastPavUser not in pavlovia.knownUsers):\n lastPavUser = None\n # if lastPavUser and not PavloviaMenu.currentUser:\n # self.setUser(PavloviaMenu.appData['pavloviaUser'])\n for name in self.knownUsers:\n self.addToSubMenu(name, self.userMenu, self.onSetUser)\n self.userMenu.AppendSeparator()\n self.loginBtn = self.userMenu.Append(wx.ID_ANY,\n _translate(\"Log in to Pavlovia...\\t{}\")\n .format(keys['pavlovia_logIn']))\n parent.Bind(wx.EVT_MENU, self.onLogInPavlovia, id=self.loginBtn.GetId())\n self.AppendSubMenu(self.userMenu, _translate(\"User\"))\n\n # search\n self.searchBtn = self.Append(wx.ID_ANY,\n _translate(\"Search Pavlovia\\t{}\")\n .format(keys['projectsFind']))\n parent.Bind(wx.EVT_MENU, self.onSearch, id=self.searchBtn.GetId())\n\n # new\n self.newBtn = self.Append(wx.ID_ANY,\n _translate(\"New...\\t{}\").format(keys['projectsNew']))\n parent.Bind(wx.EVT_MENU, self.onNew, id=self.newBtn.GetId())\n\n self.syncBtn = self.Append(wx.ID_ANY,\n _translate(\"Sync\\t{}\").format(keys['projectsSync']))\n parent.Bind(wx.EVT_MENU, self.onSync, id=self.syncBtn.GetId())\n\n def addToSubMenu(self, name, menu, function):\n item = menu.Append(wx.ID_ANY, name)\n self.parent.Bind(wx.EVT_MENU, function, id=item.GetId())\n\n def onAbout(self, event):\n wx.GetApp().followLink(event)\n\n def onSetUser(self, event):\n user = self.userMenu.GetLabelText(event.GetId())\n self.setUser(user)\n\n def setUser(self, user=None):\n\n if user is None and PavloviaMenu.appData['pavloviaUser']:\n user = PavloviaMenu.appData['pavloviaUser']\n\n if user in [PavloviaMenu.currentUser, None]:\n return # nothing to do here. Move along please.\n\n PavloviaMenu.currentUser = user\n PavloviaMenu.appData['pavloviaUser'] = user\n if user in pavlovia.knownUsers:\n token = pavlovia.knownUsers[user]['token']\n try:\n pavlovia.getCurrentSession().setToken(token)\n except requests.exceptions.ConnectionError:\n logging.warning(\"Tried to log in to Pavlovia but no network \"\n \"connection\")\n return\n else:\n if hasattr(self, 'onLogInPavlovia'):\n self.onLogInPavlovia()\n\n if PavloviaMenu.searchDlg:\n PavloviaMenu.searchDlg.updateUserProjs()\n\n def onSync(self, event):\n retVal = syncProject(parent=self.parent, project=self.parent.project)\n if hasattr(self.parent, 'gitFeedback'):\n self.parent.gitFeedback(retVal)\n\n def onSearch(self, event):\n PavloviaMenu.searchDlg = SearchFrame(app=self.parent.app)\n PavloviaMenu.searchDlg.Show()\n\n def onLogInPavlovia(self, event=None):\n logInPavlovia(parent=self.parent)\n\n def onNew(self, event):\n \"\"\"Create a new project\n \"\"\"\n if pavlovia.getCurrentSession().user.username:\n projEditor = ProjectEditor()\n if projEditor.ShowModal() == wx.ID_OK:\n self.parent.project = projEditor.project\n # do a first sync as well\n retVal = syncProject(parent=self.parent, project=projEditor.project)\n self.parent.gitFeedback(retVal)\n else:\n infoDlg = dialogs.MessageDialog(parent=None, type='Info',\n message=_translate(\n \"You need to log in\"\n \" to create a project\"))\n infoDlg.Show()\n", "path": "psychopy/app/pavlovia_ui/menu.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Part of the PsychoPy library\n# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2021 Open Science Tools Ltd.\n# Distributed under the terms of the GNU General Public License (GPL).\n\nimport wx\nimport requests\n\nfrom psychopy import logging\nfrom .. import dialogs\nfrom .functions import logInPavlovia\nfrom psychopy.app.pavlovia_ui.project import syncProject\nfrom .search import SearchFrame\nfrom .project import ProjectEditor\nfrom psychopy.localization import _translate\nfrom psychopy.projects import pavlovia\n\n\nclass PavloviaMenu(wx.Menu):\n app = None\n appData = None\n currentUser = None\n knownUsers = None\n searchDlg = None\n\n def __init__(self, parent):\n wx.Menu.__init__(self)\n self.parent = parent # type: BuilderFrame\n PavloviaMenu.app = parent.app\n keys = self.app.keys\n # from prefs fetch info about prev usernames and projects\n PavloviaMenu.appData = self.app.prefs.appData['projects']\n\n # item = self.Append(wx.ID_ANY, _translate(\"Tell me more...\"))\n # parent.Bind(wx.EVT_MENU, self.onAbout, id=item.GetId())\n\n PavloviaMenu.knownUsers = pavlovia.knownUsers\n\n # sub-menu for usernames and login\n self.userMenu = wx.Menu()\n # if a user was previously logged in then set them as current\n lastPavUser = PavloviaMenu.appData['pavloviaUser']\n if pavlovia.knownUsers and (lastPavUser not in pavlovia.knownUsers):\n lastPavUser = None\n # if lastPavUser and not PavloviaMenu.currentUser:\n # self.setUser(PavloviaMenu.appData['pavloviaUser'])\n for name in self.knownUsers:\n self.addToSubMenu(name, self.userMenu, self.onSetUser)\n self.userMenu.AppendSeparator()\n self.loginBtn = self.userMenu.Append(wx.ID_ANY,\n _translate(\"Log in to Pavlovia...\\t{}\")\n .format(keys['pavlovia_logIn']))\n parent.Bind(wx.EVT_MENU, self.onLogInPavlovia, id=self.loginBtn.GetId())\n self.AppendSubMenu(self.userMenu, _translate(\"User\"))\n\n # search\n self.searchBtn = self.Append(wx.ID_ANY,\n _translate(\"Search Pavlovia\\t{}\")\n .format(keys['projectsFind']))\n parent.Bind(wx.EVT_MENU, self.onSearch, id=self.searchBtn.GetId())\n\n # new\n self.newBtn = self.Append(wx.ID_ANY,\n _translate(\"New...\\t{}\").format(keys['projectsNew']))\n parent.Bind(wx.EVT_MENU, self.onNew, id=self.newBtn.GetId())\n\n self.syncBtn = self.Append(wx.ID_ANY,\n _translate(\"Sync\\t{}\").format(keys['projectsSync']))\n parent.Bind(wx.EVT_MENU, self.onSync, id=self.syncBtn.GetId())\n\n def addToSubMenu(self, name, menu, function):\n item = menu.Append(wx.ID_ANY, name)\n self.parent.Bind(wx.EVT_MENU, function, id=item.GetId())\n\n def onAbout(self, event):\n wx.GetApp().followLink(event)\n\n def onSetUser(self, event):\n user = self.userMenu.GetLabelText(event.GetId())\n self.setUser(user)\n\n def setUser(self, user=None):\n if PavloviaMenu.appData:\n if user is None and PavloviaMenu.appData['pavloviaUser']:\n user = PavloviaMenu.appData['pavloviaUser']\n\n if user in [PavloviaMenu.currentUser, None]:\n return # nothing to do here. Move along please.\n\n PavloviaMenu.currentUser = user\n PavloviaMenu.appData['pavloviaUser'] = user\n if user in pavlovia.knownUsers:\n token = pavlovia.knownUsers[user]['token']\n try:\n pavlovia.getCurrentSession().setToken(token)\n except requests.exceptions.ConnectionError:\n logging.warning(\"Tried to log in to Pavlovia but no network \"\n \"connection\")\n return\n else:\n if hasattr(self, 'onLogInPavlovia'):\n self.onLogInPavlovia()\n\n if PavloviaMenu.searchDlg:\n PavloviaMenu.searchDlg.updateUserProjs()\n\n def onSync(self, event):\n retVal = syncProject(parent=self.parent, project=self.parent.project)\n if hasattr(self.parent, 'gitFeedback'):\n self.parent.gitFeedback(retVal)\n\n def onSearch(self, event):\n PavloviaMenu.searchDlg = SearchFrame(app=self.parent.app)\n PavloviaMenu.searchDlg.Show()\n\n def onLogInPavlovia(self, event=None):\n logInPavlovia(parent=self.parent)\n\n def onNew(self, event):\n \"\"\"Create a new project\n \"\"\"\n if pavlovia.getCurrentSession().user.username:\n projEditor = ProjectEditor()\n if projEditor.ShowModal() == wx.ID_OK:\n self.parent.project = projEditor.project\n # do a first sync as well\n retVal = syncProject(parent=self.parent, project=projEditor.project)\n self.parent.gitFeedback(retVal)\n else:\n infoDlg = dialogs.MessageDialog(parent=None, type='Info',\n message=_translate(\n \"You need to log in\"\n \" to create a project\"))\n infoDlg.Show()\n", "path": "psychopy/app/pavlovia_ui/menu.py"}]}
1,796
195
gh_patches_debug_13301
rasdani/github-patches
git_diff
scoutapp__scout_apm_python-219
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Core Agent inheriting and holding open sockets from parent Reported via Slack: ``` Mike O'Toole 9:12 AM hey there -- are there any instructions available on installing/starting the core-agent separately from a django app? cschneid:scout-orange: 9:13 AM @Mike O'Toole - the binary gets downloaded to /tmp/scout_apm_core/… and has a help command. Mike O'Toole 9:13 AM we're running into an issue where the core-agent process seems to hang on to the ports that are uWSGI is binding when it first starts up. Then when we try to restart our uWSGI process, it tries to bind the ports again, and it can't because the core-agent process has still got them cschneid:scout-orange: 9:14 AM That is odd, it’s a totally different process, so shouldn’t ever have access to port file descriptors. Mike O'Toole 9:15 AM yeah -- that's what i thought too, but looking at lsof this is what im seeing: core-agen 23301 chuck-uwsgi 8u IPv4 7817953 0t0 TCP *:3031 (LISTEN) core-agen 23301 chuck-uwsgi 9u IPv4 7817954 0t0 TCP *:8080 (LISTEN) cschneid:scout-orange: 9:15 AM ok, well that’s not intended… :slightly_smiling_face: Mike O'Toole 9:15 AM 3031/8080 are the ports that uWSGI is binding cschneid:scout-orange: 9:16 AM For now, you’re looking for core-agent start --socket-path <match the default, or the explicit config> Mike O'Toole 9:16 AM sweet, thanks cschneid:scout-orange: 9:17 AM the default socket path is: def derive_socket_path(self): return "{}/{}/core-agent.sock".format( self.config.value("core_agent_dir"), self.config.value("core_agent_full_name"), which would be something like: /tmp/scout_apm_core/scout_apm_core-latest-x86_64-apple-darwin/core-agent.sock I’m really confused by the core agent holding onto those ports. We launch via subprocess.check_call( ... which I suppose would maintain file descriptors? If close_fds is true, all file descriptors except 0, 1 and 2 will be closed before the child process is executed. (POSIX only). The default varies by platform: Always true on POSIX. from the docs. sounds like it should have closed the socket handles? Mike O'Toole 9:22 AM hmmm weird cschneid:scout-orange: 9:22 AM What version of python are you on? I can try to reproduce Mike O'Toole 9:24 AM python 2.7.6 ahh, here's someone having a related issue with uWSGI https://stackoverflow.com/questions/48376664/uwsgi-subprocess-create-a-new-process-the-socket-close-wait sounds like that config option is what we need cschneid:scout-orange: 9:25 AM ok, the docs I shared were from 3.5, I wonder if that behavior changed ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/scout_apm/core/core_agent_manager.py` Content: ``` 1 # coding=utf-8 2 from __future__ import absolute_import, division, print_function, unicode_literals 3 4 import hashlib 5 import json 6 import logging 7 import os 8 import subprocess 9 import tarfile 10 import time 11 12 import requests 13 14 from scout_apm.core.context import AgentContext 15 16 logger = logging.getLogger(__name__) 17 18 19 class CoreAgentManager(object): 20 def __init__(self): 21 self.core_agent_bin_path = None 22 self.core_agent_bin_version = None 23 self.core_agent_dir = "{}/{}".format( 24 AgentContext.instance.config.value("core_agent_dir"), 25 AgentContext.instance.config.value("core_agent_full_name"), 26 ) 27 self.downloader = CoreAgentDownloader( 28 self.core_agent_dir, 29 AgentContext.instance.config.value("core_agent_full_name"), 30 ) 31 32 def launch(self): 33 if not AgentContext.instance.config.value("core_agent_launch"): 34 logger.debug( 35 "Not attempting to launch Core Agent " 36 "due to 'core_agent_launch' setting." 37 ) 38 return False 39 40 if not self.verify(): 41 if not AgentContext.instance.config.value("core_agent_download"): 42 logger.debug( 43 "Not attempting to download Core Agent due " 44 "to 'core_agent_download' setting." 45 ) 46 return False 47 48 self.download() 49 50 if not self.verify(): 51 logger.debug("Failed to verify Core Agent. Not launching Core Agent.") 52 return False 53 54 return self.run() 55 56 def download(self): 57 self.downloader.download() 58 59 def run(self): 60 try: 61 subprocess.check_call( 62 self.agent_binary() 63 + self.daemonize_flag() 64 + self.log_level() 65 + self.log_file() 66 + self.config_file() 67 + self.socket_path() 68 ) 69 except Exception: 70 # TODO detect failure of launch properly 71 logger.exception("Error running Core Agent") 72 return False 73 return True 74 75 def agent_binary(self): 76 return [self.core_agent_bin_path, "start"] 77 78 def daemonize_flag(self): 79 return ["--daemonize", "true"] 80 81 def socket_path(self): 82 socket_path = AgentContext.instance.config.value("socket_path") 83 return ["--socket", socket_path] 84 85 def log_level(self): 86 level = AgentContext.instance.config.value("log_level") 87 return ["--log-level", level] 88 89 def log_file(self): 90 path = AgentContext.instance.config.value("log_file") 91 if path is not None: 92 return ["--log-file", path] 93 else: 94 return [] 95 96 def config_file(self): 97 path = AgentContext.instance.config.value("config_file") 98 if path is not None: 99 return ["--config-file", path] 100 else: 101 return [] 102 103 def verify(self): 104 manifest = CoreAgentManifest(self.core_agent_dir + "/manifest.json") 105 if not manifest.is_valid(): 106 logger.debug( 107 "Core Agent verification failed: CoreAgentManifest is not valid." 108 ) 109 self.core_agent_bin_path = None 110 self.core_agent_bin_version = None 111 return False 112 113 bin_path = self.core_agent_dir + "/" + manifest.bin_name 114 if sha256_digest(bin_path) == manifest.sha256: 115 self.core_agent_bin_path = bin_path 116 self.core_agent_bin_version = manifest.bin_version 117 return True 118 else: 119 logger.debug("Core Agent verification failed: SHA mismatch.") 120 self.core_agent_bin_path = None 121 self.core_agent_bin_version = None 122 return False 123 124 125 class CoreAgentDownloader(object): 126 def __init__(self, download_destination, core_agent_full_name): 127 self.stale_download_secs = 120 128 self.destination = download_destination 129 self.core_agent_full_name = core_agent_full_name 130 self.package_location = self.destination + "/{}.tgz".format( 131 self.core_agent_full_name 132 ) 133 self.download_lock_path = self.destination + "/download.lock" 134 self.download_lock_fd = None 135 136 def download(self): 137 self.create_core_agent_dir() 138 self.obtain_download_lock() 139 if self.download_lock_fd is not None: 140 try: 141 self.download_package() 142 self.untar() 143 except OSError: 144 logger.exception("Exception raised while downloading Core Agent") 145 finally: 146 self.release_download_lock() 147 148 def create_core_agent_dir(self): 149 try: 150 os.makedirs( 151 self.destination, AgentContext.instance.config.core_agent_permissions() 152 ) 153 except OSError: 154 pass 155 156 def obtain_download_lock(self): 157 self.clean_stale_download_lock() 158 try: 159 self.download_lock_fd = os.open( 160 self.download_lock_path, 161 os.O_RDWR | os.O_CREAT | os.O_EXCL | os.O_NONBLOCK, 162 ) 163 except OSError as e: 164 logger.debug( 165 "Could not obtain download lock on %s: %r", self.download_lock_path, e 166 ) 167 self.download_lock_fd = None 168 169 def clean_stale_download_lock(self): 170 try: 171 delta = time.time() - os.stat(self.download_lock_path).st_ctime 172 if delta > self.stale_download_secs: 173 logger.debug("Clearing stale download lock file.") 174 os.unlink(self.download_lock_path) 175 except OSError: 176 pass 177 178 def release_download_lock(self): 179 if self.download_lock_fd is not None: 180 os.unlink(self.download_lock_path) 181 os.close(self.download_lock_fd) 182 183 def download_package(self): 184 logger.debug("Downloading: %s to %s", self.full_url(), self.package_location) 185 req = requests.get(self.full_url(), stream=True) 186 with open(self.package_location, "wb") as f: 187 for chunk in req.iter_content(1024 * 1000): 188 f.write(chunk) 189 190 def untar(self): 191 t = tarfile.open(self.package_location, "r") 192 t.extractall(self.destination) 193 194 def full_url(self): 195 return "{root_url}/{core_agent_full_name}.tgz".format( 196 root_url=self.root_url(), core_agent_full_name=self.core_agent_full_name 197 ) 198 199 def root_url(self): 200 return AgentContext.instance.config.value("download_url") 201 202 203 class CoreAgentManifest(object): 204 def __init__(self, path): 205 self.manifest_path = path 206 self.bin_name = None 207 self.bin_version = None 208 self.sha256 = None 209 self.valid = False 210 try: 211 self.parse() 212 except (ValueError, TypeError, OSError, IOError) as e: 213 logger.debug("Error parsing Core Agent Manifest: %r", e) 214 215 def parse(self): 216 logger.debug("Parsing Core Agent manifest path: %s", self.manifest_path) 217 with open(self.manifest_path) as manifest_file: 218 self.raw = manifest_file.read() 219 self.json = json.loads(self.raw) 220 self.version = self.json["version"] 221 self.bin_version = self.json["core_agent_version"] 222 self.bin_name = self.json["core_agent_binary"] 223 self.sha256 = self.json["core_agent_binary_sha256"] 224 self.valid = True 225 logger.debug("Core Agent manifest json: %s", self.json) 226 227 def is_valid(self): 228 return self.valid 229 230 231 def sha256_digest(filename, block_size=65536): 232 try: 233 sha256 = hashlib.sha256() 234 with open(filename, "rb") as f: 235 for block in iter(lambda: f.read(block_size), b""): 236 sha256.update(block) 237 return sha256.hexdigest() 238 except OSError as e: 239 logger.debug("Error on digest: %r", e) 240 return None 241 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/scout_apm/core/core_agent_manager.py b/src/scout_apm/core/core_agent_manager.py --- a/src/scout_apm/core/core_agent_manager.py +++ b/src/scout_apm/core/core_agent_manager.py @@ -59,12 +59,15 @@ def run(self): try: subprocess.check_call( - self.agent_binary() - + self.daemonize_flag() - + self.log_level() - + self.log_file() - + self.config_file() - + self.socket_path() + ( + self.agent_binary() + + self.daemonize_flag() + + self.log_level() + + self.log_file() + + self.config_file() + + self.socket_path() + ), + close_fds=True, ) except Exception: # TODO detect failure of launch properly
{"golden_diff": "diff --git a/src/scout_apm/core/core_agent_manager.py b/src/scout_apm/core/core_agent_manager.py\n--- a/src/scout_apm/core/core_agent_manager.py\n+++ b/src/scout_apm/core/core_agent_manager.py\n@@ -59,12 +59,15 @@\n def run(self):\n try:\n subprocess.check_call(\n- self.agent_binary()\n- + self.daemonize_flag()\n- + self.log_level()\n- + self.log_file()\n- + self.config_file()\n- + self.socket_path()\n+ (\n+ self.agent_binary()\n+ + self.daemonize_flag()\n+ + self.log_level()\n+ + self.log_file()\n+ + self.config_file()\n+ + self.socket_path()\n+ ),\n+ close_fds=True,\n )\n except Exception:\n # TODO detect failure of launch properly\n", "issue": "Core Agent inheriting and holding open sockets from parent\nReported via Slack:\r\n\r\n```\r\n\r\nMike O'Toole 9:12 AM\r\nhey there -- are there any instructions available on installing/starting the core-agent separately from a django app?\r\ncschneid:scout-orange: 9:13 AM\r\n@Mike O'Toole - the binary gets downloaded to /tmp/scout_apm_core/\u2026 and has a help command.\r\nMike O'Toole 9:13 AM\r\nwe're running into an issue where the core-agent process seems to hang on to the ports that are uWSGI is binding when it first starts up. Then when we try to restart our uWSGI process, it tries to bind the ports again, and it can't because the core-agent process has still got them\r\ncschneid:scout-orange: 9:14 AM\r\nThat is odd, it\u2019s a totally different process, so shouldn\u2019t ever have access to port file descriptors.\r\nMike O'Toole 9:15 AM\r\nyeah -- that's what i thought too, but looking at lsof this is what im seeing:\r\ncore-agen 23301 chuck-uwsgi 8u IPv4 7817953 0t0 TCP *:3031 (LISTEN)\r\ncore-agen 23301 chuck-uwsgi 9u IPv4 7817954 0t0 TCP *:8080 (LISTEN)\r\ncschneid:scout-orange: 9:15 AM\r\nok, well that\u2019s not intended\u2026 :slightly_smiling_face:\r\nMike O'Toole 9:15 AM\r\n3031/8080 are the ports that uWSGI is binding\r\ncschneid:scout-orange: 9:16 AM\r\nFor now, you\u2019re looking for core-agent start --socket-path <match the default, or the explicit config> \r\nMike O'Toole 9:16 AM\r\nsweet, thanks\r\ncschneid:scout-orange: 9:17 AM\r\nthe default socket path is:\r\n def derive_socket_path(self):\r\n return \"{}/{}/core-agent.sock\".format(\r\n self.config.value(\"core_agent_dir\"),\r\n self.config.value(\"core_agent_full_name\"),\r\nwhich would be something like: /tmp/scout_apm_core/scout_apm_core-latest-x86_64-apple-darwin/core-agent.sock\r\nI\u2019m really confused by the core agent holding onto those ports. We launch via subprocess.check_call( ... which I suppose would maintain file descriptors?\r\nIf close_fds is true, all file descriptors except 0, 1 and 2 will be closed before the child process is executed. (POSIX only). The default varies by platform: Always true on POSIX. \r\nfrom the docs.\r\nsounds like it should have closed the socket handles?\r\nMike O'Toole 9:22 AM\r\nhmmm weird\r\ncschneid:scout-orange: 9:22 AM\r\nWhat version of python are you on? I can try to reproduce\r\nMike O'Toole 9:24 AM\r\npython 2.7.6\r\nahh, here's someone having a related issue with uWSGI\r\n\r\nhttps://stackoverflow.com/questions/48376664/uwsgi-subprocess-create-a-new-process-the-socket-close-wait\r\n\r\nsounds like that config option is what we need\r\ncschneid:scout-orange: 9:25 AM\r\nok, the docs I shared were from 3.5, I wonder if that behavior changed\r\n```\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport hashlib\nimport json\nimport logging\nimport os\nimport subprocess\nimport tarfile\nimport time\n\nimport requests\n\nfrom scout_apm.core.context import AgentContext\n\nlogger = logging.getLogger(__name__)\n\n\nclass CoreAgentManager(object):\n def __init__(self):\n self.core_agent_bin_path = None\n self.core_agent_bin_version = None\n self.core_agent_dir = \"{}/{}\".format(\n AgentContext.instance.config.value(\"core_agent_dir\"),\n AgentContext.instance.config.value(\"core_agent_full_name\"),\n )\n self.downloader = CoreAgentDownloader(\n self.core_agent_dir,\n AgentContext.instance.config.value(\"core_agent_full_name\"),\n )\n\n def launch(self):\n if not AgentContext.instance.config.value(\"core_agent_launch\"):\n logger.debug(\n \"Not attempting to launch Core Agent \"\n \"due to 'core_agent_launch' setting.\"\n )\n return False\n\n if not self.verify():\n if not AgentContext.instance.config.value(\"core_agent_download\"):\n logger.debug(\n \"Not attempting to download Core Agent due \"\n \"to 'core_agent_download' setting.\"\n )\n return False\n\n self.download()\n\n if not self.verify():\n logger.debug(\"Failed to verify Core Agent. Not launching Core Agent.\")\n return False\n\n return self.run()\n\n def download(self):\n self.downloader.download()\n\n def run(self):\n try:\n subprocess.check_call(\n self.agent_binary()\n + self.daemonize_flag()\n + self.log_level()\n + self.log_file()\n + self.config_file()\n + self.socket_path()\n )\n except Exception:\n # TODO detect failure of launch properly\n logger.exception(\"Error running Core Agent\")\n return False\n return True\n\n def agent_binary(self):\n return [self.core_agent_bin_path, \"start\"]\n\n def daemonize_flag(self):\n return [\"--daemonize\", \"true\"]\n\n def socket_path(self):\n socket_path = AgentContext.instance.config.value(\"socket_path\")\n return [\"--socket\", socket_path]\n\n def log_level(self):\n level = AgentContext.instance.config.value(\"log_level\")\n return [\"--log-level\", level]\n\n def log_file(self):\n path = AgentContext.instance.config.value(\"log_file\")\n if path is not None:\n return [\"--log-file\", path]\n else:\n return []\n\n def config_file(self):\n path = AgentContext.instance.config.value(\"config_file\")\n if path is not None:\n return [\"--config-file\", path]\n else:\n return []\n\n def verify(self):\n manifest = CoreAgentManifest(self.core_agent_dir + \"/manifest.json\")\n if not manifest.is_valid():\n logger.debug(\n \"Core Agent verification failed: CoreAgentManifest is not valid.\"\n )\n self.core_agent_bin_path = None\n self.core_agent_bin_version = None\n return False\n\n bin_path = self.core_agent_dir + \"/\" + manifest.bin_name\n if sha256_digest(bin_path) == manifest.sha256:\n self.core_agent_bin_path = bin_path\n self.core_agent_bin_version = manifest.bin_version\n return True\n else:\n logger.debug(\"Core Agent verification failed: SHA mismatch.\")\n self.core_agent_bin_path = None\n self.core_agent_bin_version = None\n return False\n\n\nclass CoreAgentDownloader(object):\n def __init__(self, download_destination, core_agent_full_name):\n self.stale_download_secs = 120\n self.destination = download_destination\n self.core_agent_full_name = core_agent_full_name\n self.package_location = self.destination + \"/{}.tgz\".format(\n self.core_agent_full_name\n )\n self.download_lock_path = self.destination + \"/download.lock\"\n self.download_lock_fd = None\n\n def download(self):\n self.create_core_agent_dir()\n self.obtain_download_lock()\n if self.download_lock_fd is not None:\n try:\n self.download_package()\n self.untar()\n except OSError:\n logger.exception(\"Exception raised while downloading Core Agent\")\n finally:\n self.release_download_lock()\n\n def create_core_agent_dir(self):\n try:\n os.makedirs(\n self.destination, AgentContext.instance.config.core_agent_permissions()\n )\n except OSError:\n pass\n\n def obtain_download_lock(self):\n self.clean_stale_download_lock()\n try:\n self.download_lock_fd = os.open(\n self.download_lock_path,\n os.O_RDWR | os.O_CREAT | os.O_EXCL | os.O_NONBLOCK,\n )\n except OSError as e:\n logger.debug(\n \"Could not obtain download lock on %s: %r\", self.download_lock_path, e\n )\n self.download_lock_fd = None\n\n def clean_stale_download_lock(self):\n try:\n delta = time.time() - os.stat(self.download_lock_path).st_ctime\n if delta > self.stale_download_secs:\n logger.debug(\"Clearing stale download lock file.\")\n os.unlink(self.download_lock_path)\n except OSError:\n pass\n\n def release_download_lock(self):\n if self.download_lock_fd is not None:\n os.unlink(self.download_lock_path)\n os.close(self.download_lock_fd)\n\n def download_package(self):\n logger.debug(\"Downloading: %s to %s\", self.full_url(), self.package_location)\n req = requests.get(self.full_url(), stream=True)\n with open(self.package_location, \"wb\") as f:\n for chunk in req.iter_content(1024 * 1000):\n f.write(chunk)\n\n def untar(self):\n t = tarfile.open(self.package_location, \"r\")\n t.extractall(self.destination)\n\n def full_url(self):\n return \"{root_url}/{core_agent_full_name}.tgz\".format(\n root_url=self.root_url(), core_agent_full_name=self.core_agent_full_name\n )\n\n def root_url(self):\n return AgentContext.instance.config.value(\"download_url\")\n\n\nclass CoreAgentManifest(object):\n def __init__(self, path):\n self.manifest_path = path\n self.bin_name = None\n self.bin_version = None\n self.sha256 = None\n self.valid = False\n try:\n self.parse()\n except (ValueError, TypeError, OSError, IOError) as e:\n logger.debug(\"Error parsing Core Agent Manifest: %r\", e)\n\n def parse(self):\n logger.debug(\"Parsing Core Agent manifest path: %s\", self.manifest_path)\n with open(self.manifest_path) as manifest_file:\n self.raw = manifest_file.read()\n self.json = json.loads(self.raw)\n self.version = self.json[\"version\"]\n self.bin_version = self.json[\"core_agent_version\"]\n self.bin_name = self.json[\"core_agent_binary\"]\n self.sha256 = self.json[\"core_agent_binary_sha256\"]\n self.valid = True\n logger.debug(\"Core Agent manifest json: %s\", self.json)\n\n def is_valid(self):\n return self.valid\n\n\ndef sha256_digest(filename, block_size=65536):\n try:\n sha256 = hashlib.sha256()\n with open(filename, \"rb\") as f:\n for block in iter(lambda: f.read(block_size), b\"\"):\n sha256.update(block)\n return sha256.hexdigest()\n except OSError as e:\n logger.debug(\"Error on digest: %r\", e)\n return None\n", "path": "src/scout_apm/core/core_agent_manager.py"}], "after_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport hashlib\nimport json\nimport logging\nimport os\nimport subprocess\nimport tarfile\nimport time\n\nimport requests\n\nfrom scout_apm.core.context import AgentContext\n\nlogger = logging.getLogger(__name__)\n\n\nclass CoreAgentManager(object):\n def __init__(self):\n self.core_agent_bin_path = None\n self.core_agent_bin_version = None\n self.core_agent_dir = \"{}/{}\".format(\n AgentContext.instance.config.value(\"core_agent_dir\"),\n AgentContext.instance.config.value(\"core_agent_full_name\"),\n )\n self.downloader = CoreAgentDownloader(\n self.core_agent_dir,\n AgentContext.instance.config.value(\"core_agent_full_name\"),\n )\n\n def launch(self):\n if not AgentContext.instance.config.value(\"core_agent_launch\"):\n logger.debug(\n \"Not attempting to launch Core Agent \"\n \"due to 'core_agent_launch' setting.\"\n )\n return False\n\n if not self.verify():\n if not AgentContext.instance.config.value(\"core_agent_download\"):\n logger.debug(\n \"Not attempting to download Core Agent due \"\n \"to 'core_agent_download' setting.\"\n )\n return False\n\n self.download()\n\n if not self.verify():\n logger.debug(\"Failed to verify Core Agent. Not launching Core Agent.\")\n return False\n\n return self.run()\n\n def download(self):\n self.downloader.download()\n\n def run(self):\n try:\n subprocess.check_call(\n (\n self.agent_binary()\n + self.daemonize_flag()\n + self.log_level()\n + self.log_file()\n + self.config_file()\n + self.socket_path()\n ),\n close_fds=True,\n )\n except Exception:\n # TODO detect failure of launch properly\n logger.exception(\"Error running Core Agent\")\n return False\n return True\n\n def agent_binary(self):\n return [self.core_agent_bin_path, \"start\"]\n\n def daemonize_flag(self):\n return [\"--daemonize\", \"true\"]\n\n def socket_path(self):\n socket_path = AgentContext.instance.config.value(\"socket_path\")\n return [\"--socket\", socket_path]\n\n def log_level(self):\n level = AgentContext.instance.config.value(\"log_level\")\n return [\"--log-level\", level]\n\n def log_file(self):\n path = AgentContext.instance.config.value(\"log_file\")\n if path is not None:\n return [\"--log-file\", path]\n else:\n return []\n\n def config_file(self):\n path = AgentContext.instance.config.value(\"config_file\")\n if path is not None:\n return [\"--config-file\", path]\n else:\n return []\n\n def verify(self):\n manifest = CoreAgentManifest(self.core_agent_dir + \"/manifest.json\")\n if not manifest.is_valid():\n logger.debug(\n \"Core Agent verification failed: CoreAgentManifest is not valid.\"\n )\n self.core_agent_bin_path = None\n self.core_agent_bin_version = None\n return False\n\n bin_path = self.core_agent_dir + \"/\" + manifest.bin_name\n if sha256_digest(bin_path) == manifest.sha256:\n self.core_agent_bin_path = bin_path\n self.core_agent_bin_version = manifest.bin_version\n return True\n else:\n logger.debug(\"Core Agent verification failed: SHA mismatch.\")\n self.core_agent_bin_path = None\n self.core_agent_bin_version = None\n return False\n\n\nclass CoreAgentDownloader(object):\n def __init__(self, download_destination, core_agent_full_name):\n self.stale_download_secs = 120\n self.destination = download_destination\n self.core_agent_full_name = core_agent_full_name\n self.package_location = self.destination + \"/{}.tgz\".format(\n self.core_agent_full_name\n )\n self.download_lock_path = self.destination + \"/download.lock\"\n self.download_lock_fd = None\n\n def download(self):\n self.create_core_agent_dir()\n self.obtain_download_lock()\n if self.download_lock_fd is not None:\n try:\n self.download_package()\n self.untar()\n except OSError:\n logger.exception(\"Exception raised while downloading Core Agent\")\n finally:\n self.release_download_lock()\n\n def create_core_agent_dir(self):\n try:\n os.makedirs(\n self.destination, AgentContext.instance.config.core_agent_permissions()\n )\n except OSError:\n pass\n\n def obtain_download_lock(self):\n self.clean_stale_download_lock()\n try:\n self.download_lock_fd = os.open(\n self.download_lock_path,\n os.O_RDWR | os.O_CREAT | os.O_EXCL | os.O_NONBLOCK,\n )\n except OSError as e:\n logger.debug(\n \"Could not obtain download lock on %s: %r\", self.download_lock_path, e\n )\n self.download_lock_fd = None\n\n def clean_stale_download_lock(self):\n try:\n delta = time.time() - os.stat(self.download_lock_path).st_ctime\n if delta > self.stale_download_secs:\n logger.debug(\"Clearing stale download lock file.\")\n os.unlink(self.download_lock_path)\n except OSError:\n pass\n\n def release_download_lock(self):\n if self.download_lock_fd is not None:\n os.unlink(self.download_lock_path)\n os.close(self.download_lock_fd)\n\n def download_package(self):\n logger.debug(\"Downloading: %s to %s\", self.full_url(), self.package_location)\n req = requests.get(self.full_url(), stream=True)\n with open(self.package_location, \"wb\") as f:\n for chunk in req.iter_content(1024 * 1000):\n f.write(chunk)\n\n def untar(self):\n t = tarfile.open(self.package_location, \"r\")\n t.extractall(self.destination)\n\n def full_url(self):\n return \"{root_url}/{core_agent_full_name}.tgz\".format(\n root_url=self.root_url(), core_agent_full_name=self.core_agent_full_name\n )\n\n def root_url(self):\n return AgentContext.instance.config.value(\"download_url\")\n\n\nclass CoreAgentManifest(object):\n def __init__(self, path):\n self.manifest_path = path\n self.bin_name = None\n self.bin_version = None\n self.sha256 = None\n self.valid = False\n try:\n self.parse()\n except (ValueError, TypeError, OSError, IOError) as e:\n logger.debug(\"Error parsing Core Agent Manifest: %r\", e)\n\n def parse(self):\n logger.debug(\"Parsing Core Agent manifest path: %s\", self.manifest_path)\n with open(self.manifest_path) as manifest_file:\n self.raw = manifest_file.read()\n self.json = json.loads(self.raw)\n self.version = self.json[\"version\"]\n self.bin_version = self.json[\"core_agent_version\"]\n self.bin_name = self.json[\"core_agent_binary\"]\n self.sha256 = self.json[\"core_agent_binary_sha256\"]\n self.valid = True\n logger.debug(\"Core Agent manifest json: %s\", self.json)\n\n def is_valid(self):\n return self.valid\n\n\ndef sha256_digest(filename, block_size=65536):\n try:\n sha256 = hashlib.sha256()\n with open(filename, \"rb\") as f:\n for block in iter(lambda: f.read(block_size), b\"\"):\n sha256.update(block)\n return sha256.hexdigest()\n except OSError as e:\n logger.debug(\"Error on digest: %r\", e)\n return None\n", "path": "src/scout_apm/core/core_agent_manager.py"}]}
3,303
192
gh_patches_debug_11710
rasdani/github-patches
git_diff
Textualize__textual-2317
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Scrolling containers should be focusable by default `ScrollHorizontal` and `ScrollVertical` should have `can_focus=True`. Check this doesn't break any of the example apps. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/textual/containers.py` Content: ``` 1 """ 2 Container widgets for quick styling. 3 4 """ 5 6 7 from .widget import Widget 8 9 10 class Container(Widget): 11 """Simple container widget, with vertical layout.""" 12 13 DEFAULT_CSS = """ 14 Container { 15 height: 1fr; 16 layout: vertical; 17 overflow: auto; 18 } 19 """ 20 21 22 class Vertical(Widget): 23 """A container which arranges children vertically.""" 24 25 DEFAULT_CSS = """ 26 Vertical { 27 width: 1fr; 28 layout: vertical; 29 overflow: hidden hidden; 30 } 31 """ 32 33 34 class VerticalScroll(Widget): 35 """A container which arranges children vertically, with an automatic vertical scrollbar.""" 36 37 DEFAULT_CSS = """ 38 VerticalScroll { 39 width: 1fr; 40 layout: vertical; 41 overflow-y: auto; 42 } 43 """ 44 45 46 class Horizontal(Widget): 47 """A container which arranges children horizontally.""" 48 49 DEFAULT_CSS = """ 50 Horizontal { 51 height: 1fr; 52 layout: horizontal; 53 overflow: hidden hidden; 54 } 55 """ 56 57 58 class HorizontalScroll(Widget): 59 """A container which arranges children horizontally, with an automatic horizontal scrollbar.""" 60 61 DEFAULT_CSS = """ 62 HorizontalScroll { 63 height: 1fr; 64 layout: horizontal; 65 overflow-x: auto; 66 } 67 """ 68 69 70 class Center(Widget): 71 """A container which centers children horizontally.""" 72 73 DEFAULT_CSS = """ 74 Center { 75 align-horizontal: center; 76 height: auto; 77 width: 1fr; 78 } 79 """ 80 81 82 class Middle(Widget): 83 """A container which aligns children vertically in the middle.""" 84 85 DEFAULT_CSS = """ 86 Middle { 87 align-vertical: middle; 88 width: auto; 89 height: 1fr; 90 } 91 """ 92 93 94 class Grid(Widget): 95 """A container with grid alignment.""" 96 97 DEFAULT_CSS = """ 98 Grid { 99 height: 1fr; 100 layout: grid; 101 } 102 """ 103 104 105 class Content(Widget, can_focus=True, can_focus_children=False): 106 """A container for content such as text.""" 107 108 DEFAULT_CSS = """ 109 VerticalScroll { 110 height: 1fr; 111 layout: vertical; 112 overflow-y: auto; 113 } 114 """ 115 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/textual/containers.py b/src/textual/containers.py --- a/src/textual/containers.py +++ b/src/textual/containers.py @@ -31,7 +31,7 @@ """ -class VerticalScroll(Widget): +class VerticalScroll(Widget, can_focus=True): """A container which arranges children vertically, with an automatic vertical scrollbar.""" DEFAULT_CSS = """ @@ -55,7 +55,7 @@ """ -class HorizontalScroll(Widget): +class HorizontalScroll(Widget, can_focus=True): """A container which arranges children horizontally, with an automatic horizontal scrollbar.""" DEFAULT_CSS = """
{"golden_diff": "diff --git a/src/textual/containers.py b/src/textual/containers.py\n--- a/src/textual/containers.py\n+++ b/src/textual/containers.py\n@@ -31,7 +31,7 @@\n \"\"\"\n \n \n-class VerticalScroll(Widget):\n+class VerticalScroll(Widget, can_focus=True):\n \"\"\"A container which arranges children vertically, with an automatic vertical scrollbar.\"\"\"\n \n DEFAULT_CSS = \"\"\"\n@@ -55,7 +55,7 @@\n \"\"\"\n \n \n-class HorizontalScroll(Widget):\n+class HorizontalScroll(Widget, can_focus=True):\n \"\"\"A container which arranges children horizontally, with an automatic horizontal scrollbar.\"\"\"\n \n DEFAULT_CSS = \"\"\"\n", "issue": "Scrolling containers should be focusable by default\n`ScrollHorizontal` and `ScrollVertical` should have `can_focus=True`.\n\nCheck this doesn't break any of the example apps.\n", "before_files": [{"content": "\"\"\"\nContainer widgets for quick styling.\n\n\"\"\"\n\n\nfrom .widget import Widget\n\n\nclass Container(Widget):\n \"\"\"Simple container widget, with vertical layout.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n Container {\n height: 1fr;\n layout: vertical;\n overflow: auto;\n }\n \"\"\"\n\n\nclass Vertical(Widget):\n \"\"\"A container which arranges children vertically.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n Vertical {\n width: 1fr;\n layout: vertical;\n overflow: hidden hidden;\n }\n \"\"\"\n\n\nclass VerticalScroll(Widget):\n \"\"\"A container which arranges children vertically, with an automatic vertical scrollbar.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n VerticalScroll {\n width: 1fr;\n layout: vertical;\n overflow-y: auto;\n }\n \"\"\"\n\n\nclass Horizontal(Widget):\n \"\"\"A container which arranges children horizontally.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n Horizontal {\n height: 1fr;\n layout: horizontal;\n overflow: hidden hidden;\n }\n \"\"\"\n\n\nclass HorizontalScroll(Widget):\n \"\"\"A container which arranges children horizontally, with an automatic horizontal scrollbar.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n HorizontalScroll {\n height: 1fr;\n layout: horizontal;\n overflow-x: auto;\n }\n \"\"\"\n\n\nclass Center(Widget):\n \"\"\"A container which centers children horizontally.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n Center {\n align-horizontal: center;\n height: auto;\n width: 1fr;\n }\n \"\"\"\n\n\nclass Middle(Widget):\n \"\"\"A container which aligns children vertically in the middle.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n Middle {\n align-vertical: middle;\n width: auto;\n height: 1fr;\n }\n \"\"\"\n\n\nclass Grid(Widget):\n \"\"\"A container with grid alignment.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n Grid {\n height: 1fr;\n layout: grid;\n }\n \"\"\"\n\n\nclass Content(Widget, can_focus=True, can_focus_children=False):\n \"\"\"A container for content such as text.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n VerticalScroll {\n height: 1fr;\n layout: vertical;\n overflow-y: auto;\n }\n \"\"\"\n", "path": "src/textual/containers.py"}], "after_files": [{"content": "\"\"\"\nContainer widgets for quick styling.\n\n\"\"\"\n\n\nfrom .widget import Widget\n\n\nclass Container(Widget):\n \"\"\"Simple container widget, with vertical layout.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n Container {\n height: 1fr;\n layout: vertical;\n overflow: auto;\n }\n \"\"\"\n\n\nclass Vertical(Widget):\n \"\"\"A container which arranges children vertically.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n Vertical {\n width: 1fr;\n layout: vertical;\n overflow: hidden hidden;\n }\n \"\"\"\n\n\nclass VerticalScroll(Widget, can_focus=True):\n \"\"\"A container which arranges children vertically, with an automatic vertical scrollbar.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n VerticalScroll {\n width: 1fr;\n layout: vertical;\n overflow-y: auto;\n }\n \"\"\"\n\n\nclass Horizontal(Widget):\n \"\"\"A container which arranges children horizontally.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n Horizontal {\n height: 1fr;\n layout: horizontal;\n overflow: hidden hidden;\n }\n \"\"\"\n\n\nclass HorizontalScroll(Widget, can_focus=True):\n \"\"\"A container which arranges children horizontally, with an automatic horizontal scrollbar.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n HorizontalScroll {\n height: 1fr;\n layout: horizontal;\n overflow-x: auto;\n }\n \"\"\"\n\n\nclass Center(Widget):\n \"\"\"A container which centers children horizontally.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n Center {\n align-horizontal: center;\n height: auto;\n width: 1fr;\n }\n \"\"\"\n\n\nclass Middle(Widget):\n \"\"\"A container which aligns children vertically in the middle.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n Middle {\n align-vertical: middle;\n width: auto;\n height: 1fr;\n }\n \"\"\"\n\n\nclass Grid(Widget):\n \"\"\"A container with grid alignment.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n Grid {\n height: 1fr;\n layout: grid;\n }\n \"\"\"\n\n\nclass Content(Widget, can_focus=True, can_focus_children=False):\n \"\"\"A container for content such as text.\"\"\"\n\n DEFAULT_CSS = \"\"\"\n VerticalScroll {\n height: 1fr;\n layout: vertical;\n overflow-y: auto;\n }\n \"\"\"\n", "path": "src/textual/containers.py"}]}
1,002
144
gh_patches_debug_5083
rasdani/github-patches
git_diff
ivy-llc__ivy-17744
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- gcd --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ivy/functional/frontends/paddle/tensor/math.py` Content: ``` 1 # global 2 import ivy 3 from ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes 4 from ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back 5 6 7 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 8 @to_ivy_arrays_and_back 9 def sin(x, name=None): 10 return ivy.sin(x) 11 12 13 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 14 @to_ivy_arrays_and_back 15 def cos(x, name=None): 16 return ivy.cos(x) 17 18 19 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 20 @to_ivy_arrays_and_back 21 def acos(x, name=None): 22 return ivy.acos(x) 23 24 25 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 26 @to_ivy_arrays_and_back 27 def cosh(x, name=None): 28 return ivy.cosh(x) 29 30 31 @with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle") 32 @to_ivy_arrays_and_back 33 def tanh(x, name=None): 34 return ivy.tanh(x) 35 36 37 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 38 @to_ivy_arrays_and_back 39 def acosh(x, name=None): 40 return ivy.acosh(x) 41 42 43 @with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle") 44 @to_ivy_arrays_and_back 45 def asin(x, name=None): 46 return ivy.asin(x) 47 48 49 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 50 @to_ivy_arrays_and_back 51 def log(x, name=None): 52 return ivy.log(x) 53 54 55 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 56 @to_ivy_arrays_and_back 57 def divide(x, y, name=None): 58 return ivy.divide(x, y) 59 60 61 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 62 @to_ivy_arrays_and_back 63 def abs(x, name=None): 64 return ivy.abs(x) 65 66 67 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 68 @to_ivy_arrays_and_back 69 def multiply(x, y, name=None): 70 return ivy.multiply(x, y) 71 72 73 @with_unsupported_dtypes( 74 {"2.5.0 and below": ("bool", "unsigned", "int8", "float16", "bfloat16")}, "paddle" 75 ) 76 @to_ivy_arrays_and_back 77 def add(x, y, name=None): 78 return ivy.add(x, y) 79 80 81 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 82 @to_ivy_arrays_and_back 83 def subtract(x, y, name=None): 84 return ivy.subtract(x, y) 85 86 87 @with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle") 88 @to_ivy_arrays_and_back 89 def sqrt(x, name=None): 90 return ivy.sqrt(x) 91 92 93 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 94 @to_ivy_arrays_and_back 95 def atanh(x, name=None): 96 return ivy.atanh(x) 97 98 99 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 100 @to_ivy_arrays_and_back 101 def atan(x, name=None): 102 return ivy.atan(x) 103 104 105 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 106 @to_ivy_arrays_and_back 107 def round(x, name=None): 108 return ivy.round(x) 109 110 111 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 112 @to_ivy_arrays_and_back 113 def ceil(x, name=None): 114 return ivy.ceil(x) 115 116 117 @with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle") 118 @to_ivy_arrays_and_back 119 def sinh(x, name=None): 120 return ivy.sinh(x) 121 122 123 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 124 @to_ivy_arrays_and_back 125 def pow(x, y, name=None): 126 return ivy.pow(x, y) 127 128 129 @with_unsupported_dtypes({"2.4.2 and below": ("int16", "float16")}, "paddle") 130 @to_ivy_arrays_and_back 131 def conj(x, name=None): 132 return ivy.conj(x) 133 134 135 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") 136 @to_ivy_arrays_and_back 137 def floor(x, name=None): 138 return ivy.floor(x) 139 140 141 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 142 @to_ivy_arrays_and_back 143 def remainder(x, y, name=None): 144 return ivy.remainder(x, y) 145 146 147 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 148 @to_ivy_arrays_and_back 149 def log2(x, name=None): 150 return ivy.log2(x) 151 152 153 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 154 @to_ivy_arrays_and_back 155 def log1p(x, name=None): 156 return ivy.log1p(x) 157 158 159 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 160 @to_ivy_arrays_and_back 161 def rad2deg(x, name=None): 162 return ivy.rad2deg(x) 163 164 165 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 166 @to_ivy_arrays_and_back 167 def deg2rad(x, name=None): 168 return ivy.deg2rad(x) 169 170 171 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 172 @to_ivy_arrays_and_back 173 def gcd(x, y, name=None): 174 return ivy.gcd(x, y) 175 176 177 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 178 @to_ivy_arrays_and_back 179 def tan(x, name=None): 180 return ivy.tan(x) 181 182 183 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") 184 @to_ivy_arrays_and_back 185 def atan2(x, y, name=None): 186 return ivy.atan2(x, y) 187 188 189 @with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle") 190 @to_ivy_arrays_and_back 191 def square(x, name=None): 192 return ivy.square(x) 193 194 195 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 196 @to_ivy_arrays_and_back 197 def sign(x, name=None): 198 return ivy.sign(x) 199 200 201 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") 202 @to_ivy_arrays_and_back 203 def neg(x, name=None): 204 return ivy.negative(x) 205 206 207 @with_supported_dtypes({"2.5.0 and below": ("float32", "float64")}, "paddle") 208 @to_ivy_arrays_and_back 209 def exp(x, name=None): 210 return ivy.exp(x) 211 212 213 @with_supported_dtypes( 214 { 215 "2.4.2 and below": ( 216 "float32", 217 "float64", 218 "int32", 219 "int64", 220 "complex64", 221 "complex128", 222 ) 223 }, 224 "paddle", 225 ) 226 @to_ivy_arrays_and_back 227 def cumprod(x, dim=None, dtype=None, name=None): 228 return ivy.cumprod(x, axis=dim, dtype=dtype) 229 230 231 @with_unsupported_dtypes({"2.5.0 and below": ("float16", "bfloat16")}, "paddle") 232 @to_ivy_arrays_and_back 233 def reciprocal(x, name=None): 234 return ivy.reciprocal(x) 235 236 237 @with_unsupported_dtypes({"2.5.0 and below": "bfloat16"}, "paddle") 238 @to_ivy_arrays_and_back 239 def fmin(x, y, name=None): 240 return ivy.fmin(x, y) 241 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ivy/functional/frontends/paddle/tensor/math.py b/ivy/functional/frontends/paddle/tensor/math.py --- a/ivy/functional/frontends/paddle/tensor/math.py +++ b/ivy/functional/frontends/paddle/tensor/math.py @@ -234,7 +234,14 @@ return ivy.reciprocal(x) +@with_supported_dtypes({"2.5.0 and below": ("int32", "int64")}, "paddle") +@to_ivy_arrays_and_back +def gcd(x, y, name=None): + return ivy.gcd(x, y) + + @with_unsupported_dtypes({"2.5.0 and below": "bfloat16"}, "paddle") @to_ivy_arrays_and_back def fmin(x, y, name=None): return ivy.fmin(x, y) +
{"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/math.py b/ivy/functional/frontends/paddle/tensor/math.py\n--- a/ivy/functional/frontends/paddle/tensor/math.py\n+++ b/ivy/functional/frontends/paddle/tensor/math.py\n@@ -234,7 +234,14 @@\n return ivy.reciprocal(x)\n \n \n+@with_supported_dtypes({\"2.5.0 and below\": (\"int32\", \"int64\")}, \"paddle\")\n+@to_ivy_arrays_and_back\n+def gcd(x, y, name=None):\n+ return ivy.gcd(x, y)\n+\n+ \n @with_unsupported_dtypes({\"2.5.0 and below\": \"bfloat16\"}, \"paddle\")\n @to_ivy_arrays_and_back\n def fmin(x, y, name=None):\n return ivy.fmin(x, y)\n+\n", "issue": "gcd\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sin(x, name=None):\n return ivy.sin(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cos(x, name=None):\n return ivy.cos(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acos(x, name=None):\n return ivy.acos(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cosh(x, name=None):\n return ivy.cosh(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef tanh(x, name=None):\n return ivy.tanh(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acosh(x, name=None):\n return ivy.acosh(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef asin(x, name=None):\n return ivy.asin(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log(x, name=None):\n return ivy.log(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef divide(x, y, name=None):\n return ivy.divide(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef abs(x, name=None):\n return ivy.abs(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef multiply(x, y, name=None):\n return ivy.multiply(x, y)\n\n\n@with_unsupported_dtypes(\n {\"2.5.0 and below\": (\"bool\", \"unsigned\", \"int8\", \"float16\", \"bfloat16\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef add(x, y, name=None):\n return ivy.add(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef subtract(x, y, name=None):\n return ivy.subtract(x, y)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sqrt(x, name=None):\n return ivy.sqrt(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atanh(x, name=None):\n return ivy.atanh(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atan(x, name=None):\n return ivy.atan(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef round(x, name=None):\n return ivy.round(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef ceil(x, name=None):\n return ivy.ceil(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sinh(x, name=None):\n return ivy.sinh(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef pow(x, y, name=None):\n return ivy.pow(x, y)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"int16\", \"float16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef conj(x, name=None):\n return ivy.conj(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef floor(x, name=None):\n return ivy.floor(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef remainder(x, y, name=None):\n return ivy.remainder(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log2(x, name=None):\n return ivy.log2(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log1p(x, name=None):\n return ivy.log1p(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef rad2deg(x, name=None):\n return ivy.rad2deg(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef deg2rad(x, name=None):\n return ivy.deg2rad(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef gcd(x, y, name=None):\n return ivy.gcd(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef tan(x, name=None):\n return ivy.tan(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atan2(x, y, name=None):\n return ivy.atan2(x, y)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef square(x, name=None):\n return ivy.square(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sign(x, name=None):\n return ivy.sign(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef neg(x, name=None):\n return ivy.negative(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef exp(x, name=None):\n return ivy.exp(x)\n\n\n@with_supported_dtypes(\n {\n \"2.4.2 and below\": (\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef cumprod(x, dim=None, dtype=None, name=None):\n return ivy.cumprod(x, axis=dim, dtype=dtype)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef reciprocal(x, name=None):\n return ivy.reciprocal(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": \"bfloat16\"}, \"paddle\")\n@to_ivy_arrays_and_back\ndef fmin(x, y, name=None):\n return ivy.fmin(x, y)\n", "path": "ivy/functional/frontends/paddle/tensor/math.py"}], "after_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes, with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import to_ivy_arrays_and_back\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sin(x, name=None):\n return ivy.sin(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cos(x, name=None):\n return ivy.cos(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acos(x, name=None):\n return ivy.acos(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cosh(x, name=None):\n return ivy.cosh(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef tanh(x, name=None):\n return ivy.tanh(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acosh(x, name=None):\n return ivy.acosh(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef asin(x, name=None):\n return ivy.asin(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log(x, name=None):\n return ivy.log(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef divide(x, y, name=None):\n return ivy.divide(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef abs(x, name=None):\n return ivy.abs(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef multiply(x, y, name=None):\n return ivy.multiply(x, y)\n\n\n@with_unsupported_dtypes(\n {\"2.5.0 and below\": (\"bool\", \"unsigned\", \"int8\", \"float16\", \"bfloat16\")}, \"paddle\"\n)\n@to_ivy_arrays_and_back\ndef add(x, y, name=None):\n return ivy.add(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef subtract(x, y, name=None):\n return ivy.subtract(x, y)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sqrt(x, name=None):\n return ivy.sqrt(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atanh(x, name=None):\n return ivy.atanh(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atan(x, name=None):\n return ivy.atan(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef round(x, name=None):\n return ivy.round(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef ceil(x, name=None):\n return ivy.ceil(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sinh(x, name=None):\n return ivy.sinh(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef pow(x, y, name=None):\n return ivy.pow(x, y)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"int16\", \"float16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef conj(x, name=None):\n return ivy.conj(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef floor(x, name=None):\n return ivy.floor(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef remainder(x, y, name=None):\n return ivy.remainder(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log2(x, name=None):\n return ivy.log2(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef log1p(x, name=None):\n return ivy.log1p(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef rad2deg(x, name=None):\n return ivy.rad2deg(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef deg2rad(x, name=None):\n return ivy.deg2rad(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef gcd(x, y, name=None):\n return ivy.gcd(x, y)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef tan(x, name=None):\n return ivy.tan(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef atan2(x, y, name=None):\n return ivy.atan2(x, y)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef square(x, name=None):\n return ivy.square(x)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sign(x, name=None):\n return ivy.sign(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef neg(x, name=None):\n return ivy.negative(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"float32\", \"float64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef exp(x, name=None):\n return ivy.exp(x)\n\n\n@with_supported_dtypes(\n {\n \"2.4.2 and below\": (\n \"float32\",\n \"float64\",\n \"int32\",\n \"int64\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef cumprod(x, dim=None, dtype=None, name=None):\n return ivy.cumprod(x, axis=dim, dtype=dtype)\n\n\n@with_unsupported_dtypes({\"2.5.0 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef reciprocal(x, name=None):\n return ivy.reciprocal(x)\n\n\n@with_supported_dtypes({\"2.5.0 and below\": (\"int32\", \"int64\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef gcd(x, y, name=None):\n return ivy.gcd(x, y)\n\n \n@with_unsupported_dtypes({\"2.5.0 and below\": \"bfloat16\"}, \"paddle\")\n@to_ivy_arrays_and_back\ndef fmin(x, y, name=None):\n return ivy.fmin(x, y)\n\n", "path": "ivy/functional/frontends/paddle/tensor/math.py"}]}
3,078
202
gh_patches_debug_14494
rasdani/github-patches
git_diff
quantumlib__Cirq-3163
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Update code for when Engine metric qubits are formatted differently There is currently code in calibration.py that looks at qubit ids that start with `q` and removes this `q` before getting the grid qubit. If the API no longer returns `q` prefixed code this should be removed, otherwise if we are going to continue to support this we should add it to `grid_qubit_from_proto`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cirq/google/engine/calibration.py` Content: ``` 1 # Copyright 2019 The Cirq Developers 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # https://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 """Calibration wrapper for calibrations returned from the Quantum Engine.""" 15 16 from collections import abc, defaultdict 17 import datetime 18 19 from typing import Any, Dict, Iterator, Optional, Tuple, TYPE_CHECKING 20 21 from cirq import devices, vis 22 from cirq.google.api import v2 23 24 if TYPE_CHECKING: 25 import cirq 26 27 28 class Calibration(abc.Mapping): 29 """A convenience wrapper for calibrations that acts like a dictionary. 30 31 Calibrations act as dictionaries whose keys are the names of the metric, 32 and whose values are the metric values. The metric values themselves are 33 represented as a dictionary. These metric value dictionaries have 34 keys that are tuples of `cirq.GridQubit`s and values that are lists of the 35 metric values for those qubits. If a metric acts globally and is attached 36 to no specified number of qubits, the map will be from the empty tuple 37 to the metrics values. 38 39 Calibrations act just like a python dictionary. For example you can get 40 a list of all of the metric names using 41 42 `calibration.keys()` 43 44 and query a single value by looking up the name by index: 45 46 `calibration['t1']` 47 48 Attributes: 49 timestamp: The time that this calibration was run, in milliseconds since 50 the epoch. 51 """ 52 53 def __init__(self, calibration: v2.metrics_pb2.MetricsSnapshot) -> None: 54 self.timestamp = calibration.timestamp_ms 55 self._metric_dict = self._compute_metric_dict(calibration.metrics) 56 57 def _compute_metric_dict( 58 self, metrics: v2.metrics_pb2.MetricsSnapshot 59 ) -> Dict[str, Dict[Tuple['cirq.GridQubit', ...], Any]]: 60 results: Dict[str, Dict[Tuple[devices. 61 GridQubit, ...], Any]] = defaultdict(dict) 62 for metric in metrics: 63 name = metric.name 64 # Flatten the values to a list, removing keys containing type names 65 # (e.g. proto version of each value is {<type>: value}). 66 flat_values = [ 67 getattr(v, v.WhichOneof('val')) for v in metric.values 68 ] 69 if metric.targets: 70 targets = [ 71 t[1:] if t.startswith('q') else t for t in metric.targets 72 ] 73 # TODO: Remove when calibrations don't prepend this. 74 # Github issue: https://github.com/quantumlib/Cirq/issues/2963 75 qubits = tuple(v2.grid_qubit_from_proto_id(t) for t in targets) 76 results[name][qubits] = flat_values 77 else: 78 assert len(results[name]) == 0, ( 79 'Only one metric of a given name can have no targets. ' 80 'Found multiple for key {}'.format(name)) 81 results[name][()] = flat_values 82 return results 83 84 def __getitem__(self, key: str) -> Dict[Tuple['cirq.GridQubit', ...], Any]: 85 """Supports getting calibrations by index. 86 87 Calibration may be accessed by key: 88 89 `calibration['t1']`. 90 91 This returns a map from tuples of `cirq.GridQubit`s to a list of the 92 values of the metric. If there are no targets, the only key will only 93 be an empty tuple. 94 """ 95 if not isinstance(key, str): 96 raise TypeError( 97 'Calibration metrics only have string keys. Key was {}'.format( 98 key)) 99 if key not in self._metric_dict: 100 raise KeyError('Metric named {} not in calibration'.format(key)) 101 return self._metric_dict[key] 102 103 def __iter__(self) -> Iterator: 104 return iter(self._metric_dict) 105 106 def __len__(self) -> int: 107 return len(self._metric_dict) 108 109 def __str__(self) -> str: 110 111 return 'Calibration(keys={})'.format(list(sorted(self.keys()))) 112 113 def timestamp_str(self, 114 tz: Optional[datetime.tzinfo] = None, 115 timespec: str = 'auto') -> str: 116 """Return a string for the calibration timestamp. 117 118 Args: 119 tz: The timezone for the string. If None, the method uses the 120 platform's local date and time. 121 timespec: See datetime.isoformat for valid values. 122 123 Returns: 124 The string in ISO 8601 format YYYY-MM-DDTHH:MM:SS.ffffff. 125 """ 126 dt = datetime.datetime.fromtimestamp(self.timestamp / 1000, tz) 127 dt += datetime.timedelta(microseconds=self.timestamp % 1000000) 128 return dt.isoformat(sep=' ', timespec=timespec) 129 130 def heatmap(self, key: str) -> vis.Heatmap: 131 """Return a heatmap for metrics that target single qubits. 132 133 Args: 134 key: The metric key to return a heatmap for. 135 136 Returns: 137 A `cirq.Heatmap` for the metric. 138 139 Raises: 140 AssertionError if the heatmap is not for single qubits or the metric 141 values are not single floats. 142 """ 143 metrics = self[key] 144 assert all(len(k) == 1 for k in metrics.keys()), ( 145 'Heatmaps are only supported if all the targets in a metric' 146 ' are single qubits.') 147 assert all(len(k) == 1 for k in metrics.values()), ( 148 'Heatmaps are only supported if all the values in a metric' 149 ' are single metric values.') 150 value_map = {qubit: value for (qubit,), (value,) in metrics.items()} 151 return vis.Heatmap(value_map) 152 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cirq/google/engine/calibration.py b/cirq/google/engine/calibration.py --- a/cirq/google/engine/calibration.py +++ b/cirq/google/engine/calibration.py @@ -67,12 +67,8 @@ getattr(v, v.WhichOneof('val')) for v in metric.values ] if metric.targets: - targets = [ - t[1:] if t.startswith('q') else t for t in metric.targets - ] - # TODO: Remove when calibrations don't prepend this. - # Github issue: https://github.com/quantumlib/Cirq/issues/2963 - qubits = tuple(v2.grid_qubit_from_proto_id(t) for t in targets) + qubits = tuple( + v2.grid_qubit_from_proto_id(t) for t in metric.targets) results[name][qubits] = flat_values else: assert len(results[name]) == 0, (
{"golden_diff": "diff --git a/cirq/google/engine/calibration.py b/cirq/google/engine/calibration.py\n--- a/cirq/google/engine/calibration.py\n+++ b/cirq/google/engine/calibration.py\n@@ -67,12 +67,8 @@\n getattr(v, v.WhichOneof('val')) for v in metric.values\n ]\n if metric.targets:\n- targets = [\n- t[1:] if t.startswith('q') else t for t in metric.targets\n- ]\n- # TODO: Remove when calibrations don't prepend this.\n- # Github issue: https://github.com/quantumlib/Cirq/issues/2963\n- qubits = tuple(v2.grid_qubit_from_proto_id(t) for t in targets)\n+ qubits = tuple(\n+ v2.grid_qubit_from_proto_id(t) for t in metric.targets)\n results[name][qubits] = flat_values\n else:\n assert len(results[name]) == 0, (\n", "issue": "Update code for when Engine metric qubits are formatted differently\nThere is currently code in calibration.py that looks at qubit ids that start with `q` and removes this `q` before getting the grid qubit. If the API no longer returns `q` prefixed code this should be removed, otherwise if we are going to continue to support this we should add it to `grid_qubit_from_proto`.\r\n\r\n\n", "before_files": [{"content": "# Copyright 2019 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Calibration wrapper for calibrations returned from the Quantum Engine.\"\"\"\n\nfrom collections import abc, defaultdict\nimport datetime\n\nfrom typing import Any, Dict, Iterator, Optional, Tuple, TYPE_CHECKING\n\nfrom cirq import devices, vis\nfrom cirq.google.api import v2\n\nif TYPE_CHECKING:\n import cirq\n\n\nclass Calibration(abc.Mapping):\n \"\"\"A convenience wrapper for calibrations that acts like a dictionary.\n\n Calibrations act as dictionaries whose keys are the names of the metric,\n and whose values are the metric values. The metric values themselves are\n represented as a dictionary. These metric value dictionaries have\n keys that are tuples of `cirq.GridQubit`s and values that are lists of the\n metric values for those qubits. If a metric acts globally and is attached\n to no specified number of qubits, the map will be from the empty tuple\n to the metrics values.\n\n Calibrations act just like a python dictionary. For example you can get\n a list of all of the metric names using\n\n `calibration.keys()`\n\n and query a single value by looking up the name by index:\n\n `calibration['t1']`\n\n Attributes:\n timestamp: The time that this calibration was run, in milliseconds since\n the epoch.\n \"\"\"\n\n def __init__(self, calibration: v2.metrics_pb2.MetricsSnapshot) -> None:\n self.timestamp = calibration.timestamp_ms\n self._metric_dict = self._compute_metric_dict(calibration.metrics)\n\n def _compute_metric_dict(\n self, metrics: v2.metrics_pb2.MetricsSnapshot\n ) -> Dict[str, Dict[Tuple['cirq.GridQubit', ...], Any]]:\n results: Dict[str, Dict[Tuple[devices.\n GridQubit, ...], Any]] = defaultdict(dict)\n for metric in metrics:\n name = metric.name\n # Flatten the values to a list, removing keys containing type names\n # (e.g. proto version of each value is {<type>: value}).\n flat_values = [\n getattr(v, v.WhichOneof('val')) for v in metric.values\n ]\n if metric.targets:\n targets = [\n t[1:] if t.startswith('q') else t for t in metric.targets\n ]\n # TODO: Remove when calibrations don't prepend this.\n # Github issue: https://github.com/quantumlib/Cirq/issues/2963\n qubits = tuple(v2.grid_qubit_from_proto_id(t) for t in targets)\n results[name][qubits] = flat_values\n else:\n assert len(results[name]) == 0, (\n 'Only one metric of a given name can have no targets. '\n 'Found multiple for key {}'.format(name))\n results[name][()] = flat_values\n return results\n\n def __getitem__(self, key: str) -> Dict[Tuple['cirq.GridQubit', ...], Any]:\n \"\"\"Supports getting calibrations by index.\n\n Calibration may be accessed by key:\n\n `calibration['t1']`.\n\n This returns a map from tuples of `cirq.GridQubit`s to a list of the\n values of the metric. If there are no targets, the only key will only\n be an empty tuple.\n \"\"\"\n if not isinstance(key, str):\n raise TypeError(\n 'Calibration metrics only have string keys. Key was {}'.format(\n key))\n if key not in self._metric_dict:\n raise KeyError('Metric named {} not in calibration'.format(key))\n return self._metric_dict[key]\n\n def __iter__(self) -> Iterator:\n return iter(self._metric_dict)\n\n def __len__(self) -> int:\n return len(self._metric_dict)\n\n def __str__(self) -> str:\n\n return 'Calibration(keys={})'.format(list(sorted(self.keys())))\n\n def timestamp_str(self,\n tz: Optional[datetime.tzinfo] = None,\n timespec: str = 'auto') -> str:\n \"\"\"Return a string for the calibration timestamp.\n\n Args:\n tz: The timezone for the string. If None, the method uses the\n platform's local date and time.\n timespec: See datetime.isoformat for valid values.\n\n Returns:\n The string in ISO 8601 format YYYY-MM-DDTHH:MM:SS.ffffff.\n \"\"\"\n dt = datetime.datetime.fromtimestamp(self.timestamp / 1000, tz)\n dt += datetime.timedelta(microseconds=self.timestamp % 1000000)\n return dt.isoformat(sep=' ', timespec=timespec)\n\n def heatmap(self, key: str) -> vis.Heatmap:\n \"\"\"Return a heatmap for metrics that target single qubits.\n\n Args:\n key: The metric key to return a heatmap for.\n\n Returns:\n A `cirq.Heatmap` for the metric.\n\n Raises:\n AssertionError if the heatmap is not for single qubits or the metric\n values are not single floats.\n \"\"\"\n metrics = self[key]\n assert all(len(k) == 1 for k in metrics.keys()), (\n 'Heatmaps are only supported if all the targets in a metric'\n ' are single qubits.')\n assert all(len(k) == 1 for k in metrics.values()), (\n 'Heatmaps are only supported if all the values in a metric'\n ' are single metric values.')\n value_map = {qubit: value for (qubit,), (value,) in metrics.items()}\n return vis.Heatmap(value_map)\n", "path": "cirq/google/engine/calibration.py"}], "after_files": [{"content": "# Copyright 2019 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Calibration wrapper for calibrations returned from the Quantum Engine.\"\"\"\n\nfrom collections import abc, defaultdict\nimport datetime\n\nfrom typing import Any, Dict, Iterator, Optional, Tuple, TYPE_CHECKING\n\nfrom cirq import devices, vis\nfrom cirq.google.api import v2\n\nif TYPE_CHECKING:\n import cirq\n\n\nclass Calibration(abc.Mapping):\n \"\"\"A convenience wrapper for calibrations that acts like a dictionary.\n\n Calibrations act as dictionaries whose keys are the names of the metric,\n and whose values are the metric values. The metric values themselves are\n represented as a dictionary. These metric value dictionaries have\n keys that are tuples of `cirq.GridQubit`s and values that are lists of the\n metric values for those qubits. If a metric acts globally and is attached\n to no specified number of qubits, the map will be from the empty tuple\n to the metrics values.\n\n Calibrations act just like a python dictionary. For example you can get\n a list of all of the metric names using\n\n `calibration.keys()`\n\n and query a single value by looking up the name by index:\n\n `calibration['t1']`\n\n Attributes:\n timestamp: The time that this calibration was run, in milliseconds since\n the epoch.\n \"\"\"\n\n def __init__(self, calibration: v2.metrics_pb2.MetricsSnapshot) -> None:\n self.timestamp = calibration.timestamp_ms\n self._metric_dict = self._compute_metric_dict(calibration.metrics)\n\n def _compute_metric_dict(\n self, metrics: v2.metrics_pb2.MetricsSnapshot\n ) -> Dict[str, Dict[Tuple['cirq.GridQubit', ...], Any]]:\n results: Dict[str, Dict[Tuple[devices.\n GridQubit, ...], Any]] = defaultdict(dict)\n for metric in metrics:\n name = metric.name\n # Flatten the values to a list, removing keys containing type names\n # (e.g. proto version of each value is {<type>: value}).\n flat_values = [\n getattr(v, v.WhichOneof('val')) for v in metric.values\n ]\n if metric.targets:\n qubits = tuple(\n v2.grid_qubit_from_proto_id(t) for t in metric.targets)\n results[name][qubits] = flat_values\n else:\n assert len(results[name]) == 0, (\n 'Only one metric of a given name can have no targets. '\n 'Found multiple for key {}'.format(name))\n results[name][()] = flat_values\n return results\n\n def __getitem__(self, key: str) -> Dict[Tuple['cirq.GridQubit', ...], Any]:\n \"\"\"Supports getting calibrations by index.\n\n Calibration may be accessed by key:\n\n `calibration['t1']`.\n\n This returns a map from tuples of `cirq.GridQubit`s to a list of the\n values of the metric. If there are no targets, the only key will only\n be an empty tuple.\n \"\"\"\n if not isinstance(key, str):\n raise TypeError(\n 'Calibration metrics only have string keys. Key was {}'.format(\n key))\n if key not in self._metric_dict:\n raise KeyError('Metric named {} not in calibration'.format(key))\n return self._metric_dict[key]\n\n def __iter__(self) -> Iterator:\n return iter(self._metric_dict)\n\n def __len__(self) -> int:\n return len(self._metric_dict)\n\n def __str__(self) -> str:\n\n return 'Calibration(keys={})'.format(list(sorted(self.keys())))\n\n def timestamp_str(self,\n tz: Optional[datetime.tzinfo] = None,\n timespec: str = 'auto') -> str:\n \"\"\"Return a string for the calibration timestamp.\n\n Args:\n tz: The timezone for the string. If None, the method uses the\n platform's local date and time.\n timespec: See datetime.isoformat for valid values.\n\n Returns:\n The string in ISO 8601 format YYYY-MM-DDTHH:MM:SS.ffffff.\n \"\"\"\n dt = datetime.datetime.fromtimestamp(self.timestamp / 1000, tz)\n dt += datetime.timedelta(microseconds=self.timestamp % 1000000)\n return dt.isoformat(sep=' ', timespec=timespec)\n\n def heatmap(self, key: str) -> vis.Heatmap:\n \"\"\"Return a heatmap for metrics that target single qubits.\n\n Args:\n key: The metric key to return a heatmap for.\n\n Returns:\n A `cirq.Heatmap` for the metric.\n\n Raises:\n AssertionError if the heatmap is not for single qubits or the metric\n values are not single floats.\n \"\"\"\n metrics = self[key]\n assert all(len(k) == 1 for k in metrics.keys()), (\n 'Heatmaps are only supported if all the targets in a metric'\n ' are single qubits.')\n assert all(len(k) == 1 for k in metrics.values()), (\n 'Heatmaps are only supported if all the values in a metric'\n ' are single metric values.')\n value_map = {qubit: value for (qubit,), (value,) in metrics.items()}\n return vis.Heatmap(value_map)\n", "path": "cirq/google/engine/calibration.py"}]}
2,045
215
gh_patches_debug_14127
rasdani/github-patches
git_diff
ansible__ansible-modules-core-4998
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ini_file module doesn't match existing key when it is indented ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME `ini_file` module ##### ANSIBLE VERSION <!--- Paste verbatim output from “ansible --version” between quotes below --> ``` ansible 2.1.1.0 ``` ##### CONFIGURATION N/A ##### OS / ENVIRONMENT N/A ##### SUMMARY When a key in an INI file exists but is prefixed with whitespace, the `ini_file` module adds a new entry rather than updating the existing entry. ##### STEPS TO REPRODUCE Playbook: ``` - hosts: localhost gather_facts: no tasks: - ini_file: dest: ./foo.ini section: main option: foo value: baz ``` INI file: ``` [main] foo = bar ``` ##### EXPECTED RESULTS ``` [main] foo = baz ``` ##### ACTUAL RESULTS ``` [main] foo = bar foo = baz ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `files/ini_file.py` Content: ``` 1 #!/usr/bin/python 2 # -*- coding: utf-8 -*- 3 4 # (c) 2012, Jan-Piet Mens <jpmens () gmail.com> 5 # (c) 2015, Ales Nosek <anosek.nosek () gmail.com> 6 # 7 # This file is part of Ansible 8 # 9 # Ansible is free software: you can redistribute it and/or modify 10 # it under the terms of the GNU General Public License as published by 11 # the Free Software Foundation, either version 3 of the License, or 12 # (at your option) any later version. 13 # 14 # Ansible is distributed in the hope that it will be useful, 15 # but WITHOUT ANY WARRANTY; without even the implied warranty of 16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 # GNU General Public License for more details. 18 # 19 # You should have received a copy of the GNU General Public License 20 # along with Ansible. If not, see <http://www.gnu.org/licenses/>. 21 # 22 23 DOCUMENTATION = ''' 24 --- 25 module: ini_file 26 short_description: Tweak settings in INI files 27 extends_documentation_fragment: files 28 description: 29 - Manage (add, remove, change) individual settings in an INI-style file without having 30 to manage the file as a whole with, say, M(template) or M(assemble). Adds missing 31 sections if they don't exist. 32 - Before version 2.0, comments are discarded when the source file is read, and therefore will not show up in the destination file. 33 version_added: "0.9" 34 options: 35 dest: 36 description: 37 - Path to the INI-style file; this file is created if required 38 required: true 39 default: null 40 section: 41 description: 42 - Section name in INI file. This is added if C(state=present) automatically when 43 a single value is being set. 44 required: true 45 default: null 46 option: 47 description: 48 - if set (required for changing a I(value)), this is the name of the option. 49 - May be omitted if adding/removing a whole I(section). 50 required: false 51 default: null 52 value: 53 description: 54 - the string value to be associated with an I(option). May be omitted when removing an I(option). 55 required: false 56 default: null 57 backup: 58 description: 59 - Create a backup file including the timestamp information so you can get 60 the original file back if you somehow clobbered it incorrectly. 61 required: false 62 default: "no" 63 choices: [ "yes", "no" ] 64 others: 65 description: 66 - all arguments accepted by the M(file) module also work here 67 required: false 68 state: 69 description: 70 - If set to C(absent) the option or section will be removed if present instead of created. 71 required: false 72 default: "present" 73 choices: [ "present", "absent" ] 74 no_extra_spaces: 75 description: 76 - do not insert spaces before and after '=' symbol 77 required: false 78 default: false 79 version_added: "2.1" 80 notes: 81 - While it is possible to add an I(option) without specifying a I(value), this makes 82 no sense. 83 - A section named C(default) cannot be added by the module, but if it exists, individual 84 options within the section can be updated. (This is a limitation of Python's I(ConfigParser).) 85 Either use M(template) to create a base INI file with a C([default]) section, or use 86 M(lineinfile) to add the missing line. 87 requirements: [ ConfigParser ] 88 author: 89 - "Jan-Piet Mens (@jpmens)" 90 - "Ales Nosek (@noseka1)" 91 ''' 92 93 EXAMPLES = ''' 94 # Ensure "fav=lemonade is in section "[drinks]" in specified file 95 - ini_file: dest=/etc/conf section=drinks option=fav value=lemonade mode=0600 backup=yes 96 97 - ini_file: dest=/etc/anotherconf 98 section=drinks 99 option=temperature 100 value=cold 101 backup=yes 102 ''' 103 104 import os 105 import re 106 107 # ============================================================== 108 # match_opt 109 110 def match_opt(option, line): 111 option = re.escape(option) 112 return re.match('%s( |\t)*=' % option, line) \ 113 or re.match('# *%s( |\t)*=' % option, line) \ 114 or re.match('; *%s( |\t)*=' % option, line) 115 116 # ============================================================== 117 # match_active_opt 118 119 def match_active_opt(option, line): 120 option = re.escape(option) 121 return re.match('%s( |\t)*=' % option, line) 122 123 # ============================================================== 124 # do_ini 125 126 def do_ini(module, filename, section=None, option=None, value=None, state='present', backup=False, no_extra_spaces=False): 127 128 129 if not os.path.exists(filename): 130 try: 131 open(filename,'w').close() 132 except: 133 module.fail_json(msg="Destination file %s not writable" % filename) 134 ini_file = open(filename, 'r') 135 try: 136 ini_lines = ini_file.readlines() 137 # append a fake section line to simplify the logic 138 ini_lines.append('[') 139 finally: 140 ini_file.close() 141 142 within_section = not section 143 section_start = 0 144 changed = False 145 if no_extra_spaces: 146 assignment_format = '%s=%s\n' 147 else: 148 assignment_format = '%s = %s\n' 149 150 for index, line in enumerate(ini_lines): 151 if line.startswith('[%s]' % section): 152 within_section = True 153 section_start = index 154 elif line.startswith('['): 155 if within_section: 156 if state == 'present': 157 # insert missing option line at the end of the section 158 for i in range(index, 0, -1): 159 # search backwards for previous non-blank or non-comment line 160 if not re.match(r'^[ \t]*([#;].*)?$', ini_lines[i - 1]): 161 ini_lines.insert(i, assignment_format % (option, value)) 162 changed = True 163 break 164 elif state == 'absent' and not option: 165 # remove the entire section 166 del ini_lines[section_start:index] 167 changed = True 168 break 169 else: 170 if within_section and option: 171 if state == 'present': 172 # change the existing option line 173 if match_opt(option, line): 174 newline = assignment_format % (option, value) 175 changed = ini_lines[index] != newline 176 ini_lines[index] = newline 177 if changed: 178 # remove all possible option occurences from the rest of the section 179 index = index + 1 180 while index < len(ini_lines): 181 line = ini_lines[index] 182 if line.startswith('['): 183 break 184 if match_active_opt(option, line): 185 del ini_lines[index] 186 else: 187 index = index + 1 188 break 189 else: 190 # comment out the existing option line 191 if match_active_opt(option, line): 192 ini_lines[index] = '#%s' % ini_lines[index] 193 changed = True 194 break 195 196 # remove the fake section line 197 del ini_lines[-1:] 198 199 if not within_section and option and state == 'present': 200 ini_lines.append('[%s]\n' % section) 201 ini_lines.append(assignment_format % (option, value)) 202 changed = True 203 204 205 backup_file = None 206 if changed and not module.check_mode: 207 if backup: 208 backup_file = module.backup_local(filename) 209 ini_file = open(filename, 'w') 210 try: 211 ini_file.writelines(ini_lines) 212 finally: 213 ini_file.close() 214 215 return (changed, backup_file) 216 217 # ============================================================== 218 # main 219 220 def main(): 221 222 module = AnsibleModule( 223 argument_spec = dict( 224 dest = dict(required=True), 225 section = dict(required=True), 226 option = dict(required=False), 227 value = dict(required=False), 228 backup = dict(default='no', type='bool'), 229 state = dict(default='present', choices=['present', 'absent']), 230 no_extra_spaces = dict(required=False, default=False, type='bool') 231 ), 232 add_file_common_args = True, 233 supports_check_mode = True 234 ) 235 236 dest = os.path.expanduser(module.params['dest']) 237 section = module.params['section'] 238 option = module.params['option'] 239 value = module.params['value'] 240 state = module.params['state'] 241 backup = module.params['backup'] 242 no_extra_spaces = module.params['no_extra_spaces'] 243 244 (changed,backup_file) = do_ini(module, dest, section, option, value, state, backup, no_extra_spaces) 245 246 file_args = module.load_file_common_arguments(module.params) 247 changed = module.set_fs_attributes_if_different(file_args, changed) 248 249 results = { 'changed': changed, 'msg': "OK", 'dest': dest } 250 if backup_file is not None: 251 results['backup_file'] = backup_file 252 253 # Mission complete 254 module.exit_json(**results) 255 256 # import module snippets 257 from ansible.module_utils.basic import * 258 if __name__ == '__main__': 259 main() 260 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/files/ini_file.py b/files/ini_file.py --- a/files/ini_file.py +++ b/files/ini_file.py @@ -109,7 +109,7 @@ def match_opt(option, line): option = re.escape(option) - return re.match('%s( |\t)*=' % option, line) \ + return re.match(' *%s( |\t)*=' % option, line) \ or re.match('# *%s( |\t)*=' % option, line) \ or re.match('; *%s( |\t)*=' % option, line) @@ -118,7 +118,7 @@ def match_active_opt(option, line): option = re.escape(option) - return re.match('%s( |\t)*=' % option, line) + return re.match(' *%s( |\t)*=' % option, line) # ============================================================== # do_ini
{"golden_diff": "diff --git a/files/ini_file.py b/files/ini_file.py\n--- a/files/ini_file.py\n+++ b/files/ini_file.py\n@@ -109,7 +109,7 @@\n \n def match_opt(option, line):\n option = re.escape(option)\n- return re.match('%s( |\\t)*=' % option, line) \\\n+ return re.match(' *%s( |\\t)*=' % option, line) \\\n or re.match('# *%s( |\\t)*=' % option, line) \\\n or re.match('; *%s( |\\t)*=' % option, line)\n \n@@ -118,7 +118,7 @@\n \n def match_active_opt(option, line):\n option = re.escape(option)\n- return re.match('%s( |\\t)*=' % option, line)\n+ return re.match(' *%s( |\\t)*=' % option, line)\n \n # ==============================================================\n # do_ini\n", "issue": "ini_file module doesn't match existing key when it is indented\n##### ISSUE TYPE\n- Bug Report\n##### COMPONENT NAME\n\n`ini_file` module\n##### ANSIBLE VERSION\n\n<!--- Paste verbatim output from \u201cansible --version\u201d between quotes below -->\n\n```\nansible 2.1.1.0\n```\n##### CONFIGURATION\n\nN/A\n##### OS / ENVIRONMENT\n\nN/A\n##### SUMMARY\n\nWhen a key in an INI file exists but is prefixed with whitespace, the `ini_file` module adds a new entry rather than updating the existing entry.\n##### STEPS TO REPRODUCE\n\nPlaybook:\n\n```\n- hosts: localhost\n gather_facts: no\n tasks:\n - ini_file:\n dest: ./foo.ini\n section: main\n option: foo\n value: baz\n```\n\nINI file:\n\n```\n[main]\n foo = bar\n```\n##### EXPECTED RESULTS\n\n```\n[main]\n foo = baz\n```\n##### ACTUAL RESULTS\n\n```\n[main]\n foo = bar\nfoo = baz\n```\n\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2012, Jan-Piet Mens <jpmens () gmail.com>\n# (c) 2015, Ales Nosek <anosek.nosek () gmail.com>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\n\nDOCUMENTATION = '''\n---\nmodule: ini_file\nshort_description: Tweak settings in INI files\nextends_documentation_fragment: files\ndescription:\n - Manage (add, remove, change) individual settings in an INI-style file without having\n to manage the file as a whole with, say, M(template) or M(assemble). Adds missing\n sections if they don't exist.\n - Before version 2.0, comments are discarded when the source file is read, and therefore will not show up in the destination file.\nversion_added: \"0.9\"\noptions:\n dest:\n description:\n - Path to the INI-style file; this file is created if required\n required: true\n default: null\n section:\n description:\n - Section name in INI file. This is added if C(state=present) automatically when\n a single value is being set.\n required: true\n default: null\n option:\n description:\n - if set (required for changing a I(value)), this is the name of the option.\n - May be omitted if adding/removing a whole I(section).\n required: false\n default: null\n value:\n description:\n - the string value to be associated with an I(option). May be omitted when removing an I(option).\n required: false\n default: null\n backup:\n description:\n - Create a backup file including the timestamp information so you can get\n the original file back if you somehow clobbered it incorrectly.\n required: false\n default: \"no\"\n choices: [ \"yes\", \"no\" ]\n others:\n description:\n - all arguments accepted by the M(file) module also work here\n required: false\n state:\n description:\n - If set to C(absent) the option or section will be removed if present instead of created.\n required: false\n default: \"present\"\n choices: [ \"present\", \"absent\" ]\n no_extra_spaces:\n description:\n - do not insert spaces before and after '=' symbol\n required: false\n default: false\n version_added: \"2.1\"\nnotes:\n - While it is possible to add an I(option) without specifying a I(value), this makes\n no sense.\n - A section named C(default) cannot be added by the module, but if it exists, individual\n options within the section can be updated. (This is a limitation of Python's I(ConfigParser).)\n Either use M(template) to create a base INI file with a C([default]) section, or use\n M(lineinfile) to add the missing line.\nrequirements: [ ConfigParser ]\nauthor:\n - \"Jan-Piet Mens (@jpmens)\"\n - \"Ales Nosek (@noseka1)\"\n'''\n\nEXAMPLES = '''\n# Ensure \"fav=lemonade is in section \"[drinks]\" in specified file\n- ini_file: dest=/etc/conf section=drinks option=fav value=lemonade mode=0600 backup=yes\n\n- ini_file: dest=/etc/anotherconf\n section=drinks\n option=temperature\n value=cold\n backup=yes\n'''\n\nimport os\nimport re\n\n# ==============================================================\n# match_opt\n\ndef match_opt(option, line):\n option = re.escape(option)\n return re.match('%s( |\\t)*=' % option, line) \\\n or re.match('# *%s( |\\t)*=' % option, line) \\\n or re.match('; *%s( |\\t)*=' % option, line)\n\n# ==============================================================\n# match_active_opt\n\ndef match_active_opt(option, line):\n option = re.escape(option)\n return re.match('%s( |\\t)*=' % option, line)\n\n# ==============================================================\n# do_ini\n\ndef do_ini(module, filename, section=None, option=None, value=None, state='present', backup=False, no_extra_spaces=False):\n\n\n if not os.path.exists(filename):\n try:\n open(filename,'w').close()\n except:\n module.fail_json(msg=\"Destination file %s not writable\" % filename)\n ini_file = open(filename, 'r')\n try:\n ini_lines = ini_file.readlines()\n # append a fake section line to simplify the logic\n ini_lines.append('[')\n finally:\n ini_file.close()\n\n within_section = not section\n section_start = 0\n changed = False\n if no_extra_spaces:\n assignment_format = '%s=%s\\n'\n else:\n assignment_format = '%s = %s\\n'\n\n for index, line in enumerate(ini_lines):\n if line.startswith('[%s]' % section):\n within_section = True\n section_start = index\n elif line.startswith('['):\n if within_section:\n if state == 'present':\n # insert missing option line at the end of the section\n for i in range(index, 0, -1):\n # search backwards for previous non-blank or non-comment line\n if not re.match(r'^[ \\t]*([#;].*)?$', ini_lines[i - 1]):\n ini_lines.insert(i, assignment_format % (option, value))\n changed = True\n break\n elif state == 'absent' and not option:\n # remove the entire section\n del ini_lines[section_start:index]\n changed = True\n break\n else:\n if within_section and option:\n if state == 'present':\n # change the existing option line\n if match_opt(option, line):\n newline = assignment_format % (option, value)\n changed = ini_lines[index] != newline\n ini_lines[index] = newline\n if changed:\n # remove all possible option occurences from the rest of the section\n index = index + 1\n while index < len(ini_lines):\n line = ini_lines[index]\n if line.startswith('['):\n break\n if match_active_opt(option, line):\n del ini_lines[index]\n else:\n index = index + 1\n break\n else:\n # comment out the existing option line\n if match_active_opt(option, line):\n ini_lines[index] = '#%s' % ini_lines[index]\n changed = True\n break\n\n # remove the fake section line\n del ini_lines[-1:]\n\n if not within_section and option and state == 'present':\n ini_lines.append('[%s]\\n' % section)\n ini_lines.append(assignment_format % (option, value))\n changed = True\n\n\n backup_file = None\n if changed and not module.check_mode:\n if backup:\n backup_file = module.backup_local(filename)\n ini_file = open(filename, 'w')\n try:\n ini_file.writelines(ini_lines)\n finally:\n ini_file.close()\n\n return (changed, backup_file)\n\n# ==============================================================\n# main\n\ndef main():\n\n module = AnsibleModule(\n argument_spec = dict(\n dest = dict(required=True),\n section = dict(required=True),\n option = dict(required=False),\n value = dict(required=False),\n backup = dict(default='no', type='bool'),\n state = dict(default='present', choices=['present', 'absent']),\n no_extra_spaces = dict(required=False, default=False, type='bool')\n ),\n add_file_common_args = True,\n supports_check_mode = True\n )\n\n dest = os.path.expanduser(module.params['dest'])\n section = module.params['section']\n option = module.params['option']\n value = module.params['value']\n state = module.params['state']\n backup = module.params['backup']\n no_extra_spaces = module.params['no_extra_spaces']\n\n (changed,backup_file) = do_ini(module, dest, section, option, value, state, backup, no_extra_spaces)\n\n file_args = module.load_file_common_arguments(module.params)\n changed = module.set_fs_attributes_if_different(file_args, changed)\n\n results = { 'changed': changed, 'msg': \"OK\", 'dest': dest }\n if backup_file is not None:\n results['backup_file'] = backup_file\n\n # Mission complete\n module.exit_json(**results)\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nif __name__ == '__main__':\n main()\n", "path": "files/ini_file.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2012, Jan-Piet Mens <jpmens () gmail.com>\n# (c) 2015, Ales Nosek <anosek.nosek () gmail.com>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n#\n\nDOCUMENTATION = '''\n---\nmodule: ini_file\nshort_description: Tweak settings in INI files\nextends_documentation_fragment: files\ndescription:\n - Manage (add, remove, change) individual settings in an INI-style file without having\n to manage the file as a whole with, say, M(template) or M(assemble). Adds missing\n sections if they don't exist.\n - Before version 2.0, comments are discarded when the source file is read, and therefore will not show up in the destination file.\nversion_added: \"0.9\"\noptions:\n dest:\n description:\n - Path to the INI-style file; this file is created if required\n required: true\n default: null\n section:\n description:\n - Section name in INI file. This is added if C(state=present) automatically when\n a single value is being set.\n required: true\n default: null\n option:\n description:\n - if set (required for changing a I(value)), this is the name of the option.\n - May be omitted if adding/removing a whole I(section).\n required: false\n default: null\n value:\n description:\n - the string value to be associated with an I(option). May be omitted when removing an I(option).\n required: false\n default: null\n backup:\n description:\n - Create a backup file including the timestamp information so you can get\n the original file back if you somehow clobbered it incorrectly.\n required: false\n default: \"no\"\n choices: [ \"yes\", \"no\" ]\n others:\n description:\n - all arguments accepted by the M(file) module also work here\n required: false\n state:\n description:\n - If set to C(absent) the option or section will be removed if present instead of created.\n required: false\n default: \"present\"\n choices: [ \"present\", \"absent\" ]\n no_extra_spaces:\n description:\n - do not insert spaces before and after '=' symbol\n required: false\n default: false\n version_added: \"2.1\"\nnotes:\n - While it is possible to add an I(option) without specifying a I(value), this makes\n no sense.\n - A section named C(default) cannot be added by the module, but if it exists, individual\n options within the section can be updated. (This is a limitation of Python's I(ConfigParser).)\n Either use M(template) to create a base INI file with a C([default]) section, or use\n M(lineinfile) to add the missing line.\nrequirements: [ ConfigParser ]\nauthor:\n - \"Jan-Piet Mens (@jpmens)\"\n - \"Ales Nosek (@noseka1)\"\n'''\n\nEXAMPLES = '''\n# Ensure \"fav=lemonade is in section \"[drinks]\" in specified file\n- ini_file: dest=/etc/conf section=drinks option=fav value=lemonade mode=0600 backup=yes\n\n- ini_file: dest=/etc/anotherconf\n section=drinks\n option=temperature\n value=cold\n backup=yes\n'''\n\nimport os\nimport re\n\n# ==============================================================\n# match_opt\n\ndef match_opt(option, line):\n option = re.escape(option)\n return re.match(' *%s( |\\t)*=' % option, line) \\\n or re.match('# *%s( |\\t)*=' % option, line) \\\n or re.match('; *%s( |\\t)*=' % option, line)\n\n# ==============================================================\n# match_active_opt\n\ndef match_active_opt(option, line):\n option = re.escape(option)\n return re.match(' *%s( |\\t)*=' % option, line)\n\n# ==============================================================\n# do_ini\n\ndef do_ini(module, filename, section=None, option=None, value=None, state='present', backup=False, no_extra_spaces=False):\n\n\n if not os.path.exists(filename):\n try:\n open(filename,'w').close()\n except:\n module.fail_json(msg=\"Destination file %s not writable\" % filename)\n ini_file = open(filename, 'r')\n try:\n ini_lines = ini_file.readlines()\n # append a fake section line to simplify the logic\n ini_lines.append('[')\n finally:\n ini_file.close()\n\n within_section = not section\n section_start = 0\n changed = False\n if no_extra_spaces:\n assignment_format = '%s=%s\\n'\n else:\n assignment_format = '%s = %s\\n'\n\n for index, line in enumerate(ini_lines):\n if line.startswith('[%s]' % section):\n within_section = True\n section_start = index\n elif line.startswith('['):\n if within_section:\n if state == 'present':\n # insert missing option line at the end of the section\n for i in range(index, 0, -1):\n # search backwards for previous non-blank or non-comment line\n if not re.match(r'^[ \\t]*([#;].*)?$', ini_lines[i - 1]):\n ini_lines.insert(i, assignment_format % (option, value))\n changed = True\n break\n elif state == 'absent' and not option:\n # remove the entire section\n del ini_lines[section_start:index]\n changed = True\n break\n else:\n if within_section and option:\n if state == 'present':\n # change the existing option line\n if match_opt(option, line):\n newline = assignment_format % (option, value)\n changed = ini_lines[index] != newline\n ini_lines[index] = newline\n if changed:\n # remove all possible option occurences from the rest of the section\n index = index + 1\n while index < len(ini_lines):\n line = ini_lines[index]\n if line.startswith('['):\n break\n if match_active_opt(option, line):\n del ini_lines[index]\n else:\n index = index + 1\n break\n else:\n # comment out the existing option line\n if match_active_opt(option, line):\n ini_lines[index] = '#%s' % ini_lines[index]\n changed = True\n break\n\n # remove the fake section line\n del ini_lines[-1:]\n\n if not within_section and option and state == 'present':\n ini_lines.append('[%s]\\n' % section)\n ini_lines.append(assignment_format % (option, value))\n changed = True\n\n\n backup_file = None\n if changed and not module.check_mode:\n if backup:\n backup_file = module.backup_local(filename)\n ini_file = open(filename, 'w')\n try:\n ini_file.writelines(ini_lines)\n finally:\n ini_file.close()\n\n return (changed, backup_file)\n\n# ==============================================================\n# main\n\ndef main():\n\n module = AnsibleModule(\n argument_spec = dict(\n dest = dict(required=True),\n section = dict(required=True),\n option = dict(required=False),\n value = dict(required=False),\n backup = dict(default='no', type='bool'),\n state = dict(default='present', choices=['present', 'absent']),\n no_extra_spaces = dict(required=False, default=False, type='bool')\n ),\n add_file_common_args = True,\n supports_check_mode = True\n )\n\n dest = os.path.expanduser(module.params['dest'])\n section = module.params['section']\n option = module.params['option']\n value = module.params['value']\n state = module.params['state']\n backup = module.params['backup']\n no_extra_spaces = module.params['no_extra_spaces']\n\n (changed,backup_file) = do_ini(module, dest, section, option, value, state, backup, no_extra_spaces)\n\n file_args = module.load_file_common_arguments(module.params)\n changed = module.set_fs_attributes_if_different(file_args, changed)\n\n results = { 'changed': changed, 'msg': \"OK\", 'dest': dest }\n if backup_file is not None:\n results['backup_file'] = backup_file\n\n # Mission complete\n module.exit_json(**results)\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nif __name__ == '__main__':\n main()\n", "path": "files/ini_file.py"}]}
3,171
211
gh_patches_debug_18922
rasdani/github-patches
git_diff
Kinto__kinto-1664
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- OpenID payload cache uses the wrong cache key The `cache_key` becomes the same for every access token due to this bug: https://github.com/Kinto/kinto/blob/e1e0d6be0024418fd100210901f9d2ca06344fe1/kinto/plugins/openid/__init__.py#L51 No matter what the `hmac_tokens` variable is the `cache_key` always becomes `'openid:verify:%s'`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 import codecs 2 import os 3 from setuptools import setup, find_packages 4 5 # abspath here because setup.py may be __main__, in which case 6 # __file__ is not guaranteed to be absolute 7 here = os.path.abspath(os.path.dirname(__file__)) 8 9 10 def read_file(filename): 11 """Open a related file and return its content.""" 12 with codecs.open(os.path.join(here, filename), encoding='utf-8') as f: 13 content = f.read() 14 return content 15 16 17 README = read_file('README.rst') 18 CHANGELOG = read_file('CHANGELOG.rst') 19 CONTRIBUTORS = read_file('CONTRIBUTORS.rst') 20 21 REQUIREMENTS = [ 22 'bcrypt', 23 'colander >= 1.4.0', 24 'cornice', 25 'cornice_swagger >= 0.5.1', 26 'dockerflow', 27 'jsonschema', 28 'jsonpatch', 29 'logging-color-formatter >= 1.0.1', # Message interpolations. 30 'python-dateutil', 31 'pyramid >= 1.9.1, < 2.0', 32 'pyramid_multiauth >= 0.8', # User on policy selected event. 33 'transaction', 34 # pyramid_tm changed the location of their tween in 2.x and one of 35 # our tests fails on 2.0. 36 'pyramid_tm >= 2.1', 37 'requests', 38 'waitress', 39 'ujson >= 1.35', 40 ] 41 42 POSTGRESQL_REQUIRES = [ 43 'SQLAlchemy', 44 'psycopg2 > 2.5', 45 'zope.sqlalchemy', 46 ] 47 48 REDIS_REQUIRES = [ 49 'kinto_redis' 50 ] 51 52 MEMCACHED_REQUIRES = [ 53 'python-memcached' 54 ] 55 56 SETUP_REQUIRES = [ 57 'pytest-runner' 58 ] 59 60 TEST_REQUIREMENTS = [ 61 'bravado_core', 62 'pytest', 63 'WebTest' 64 ] 65 66 DEPENDENCY_LINKS = [] 67 68 MONITORING_REQUIRES = [ 69 'raven', 70 'statsd', 71 'newrelic', 72 'werkzeug', 73 ] 74 75 ENTRY_POINTS = { 76 'paste.app_factory': [ 77 'main = kinto:main', 78 ], 79 'console_scripts': [ 80 'kinto = kinto.__main__:main' 81 ], 82 } 83 84 85 setup(name='kinto', 86 version='9.1.1', 87 description='Kinto Web Service - Store, Sync, Share, and Self-Host.', 88 long_description='{}\n\n{}\n\n{}'.format(README, CHANGELOG, CONTRIBUTORS), 89 license='Apache License (2.0)', 90 classifiers=[ 91 'Programming Language :: Python', 92 'Programming Language :: Python :: 3', 93 'Programming Language :: Python :: 3.5', 94 'Programming Language :: Python :: 3.6', 95 'Programming Language :: Python :: Implementation :: CPython', 96 'Topic :: Internet :: WWW/HTTP', 97 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application', 98 'License :: OSI Approved :: Apache Software License' 99 ], 100 keywords='web sync json storage services', 101 author='Mozilla Services', 102 author_email='[email protected]', 103 url='https://github.com/Kinto/kinto', 104 packages=find_packages(), 105 package_data={'': ['*.rst', '*.py', '*.yaml']}, 106 include_package_data=True, 107 zip_safe=False, 108 setup_requires=SETUP_REQUIRES, 109 tests_require=TEST_REQUIREMENTS, 110 install_requires=REQUIREMENTS, 111 extras_require={ 112 'redis': REDIS_REQUIRES, 113 'memcached': MEMCACHED_REQUIRES, 114 'postgresql': POSTGRESQL_REQUIRES, 115 'monitoring': MONITORING_REQUIRES, 116 }, 117 test_suite='tests', 118 dependency_links=DEPENDENCY_LINKS, 119 entry_points=ENTRY_POINTS) 120 ``` Path: `kinto/plugins/openid/__init__.py` Content: ``` 1 import re 2 3 import requests 4 from pyramid import authentication as base_auth 5 from pyramid.interfaces import IAuthenticationPolicy 6 from zope.interface import implementer 7 8 from kinto.core import logger 9 from kinto.core import utils as core_utils 10 from kinto.core.openapi import OpenAPI 11 12 from .utils import fetch_openid_config 13 14 15 @implementer(IAuthenticationPolicy) 16 class OpenIDConnectPolicy(base_auth.CallbackAuthenticationPolicy): 17 def __init__(self, issuer, client_id, realm='Realm', **kwargs): 18 self.realm = realm 19 self.issuer = issuer 20 self.client_id = client_id 21 self.client_secret = kwargs.get('client_secret', '') 22 self.header_type = kwargs.get('header_type', 'Bearer') 23 self.userid_field = kwargs.get('userid_field', 'sub') 24 self.verification_ttl = int(kwargs.get('verification_ttl_seconds', 86400)) 25 26 # Fetch OpenID config (at instantiation, ie. startup) 27 self.oid_config = fetch_openid_config(issuer) 28 29 self._jwt_keys = None 30 31 def unauthenticated_userid(self, request): 32 """Return the userid or ``None`` if token could not be verified. 33 """ 34 settings = request.registry.settings 35 hmac_secret = settings['userid_hmac_secret'] 36 37 authorization = request.headers.get('Authorization', '') 38 try: 39 authmeth, access_token = authorization.split(' ', 1) 40 except ValueError: 41 return None 42 43 if authmeth.lower() != self.header_type.lower(): 44 return None 45 46 # XXX JWT Access token 47 # https://auth0.com/docs/tokens/access-token#access-token-format 48 49 # Check cache if these tokens were already verified. 50 hmac_tokens = core_utils.hmac_digest(hmac_secret, access_token) 51 cache_key = 'openid:verify:%s'.format(hmac_tokens) 52 payload = request.registry.cache.get(cache_key) 53 if payload is None: 54 # This can take some time. 55 payload = self._verify_token(access_token) 56 if payload is None: 57 return None 58 # Save for next time / refresh ttl. 59 request.registry.cache.set(cache_key, payload, ttl=self.verification_ttl) 60 # Extract meaningful field from userinfo (eg. email or sub) 61 return payload.get(self.userid_field) 62 63 def forget(self, request): 64 """A no-op. Credentials are sent on every request. 65 Return WWW-Authenticate Realm header for Bearer token. 66 """ 67 return [('WWW-Authenticate', '%s realm="%s"' % (self.header_type, self.realm))] 68 69 def _verify_token(self, access_token): 70 uri = self.oid_config['userinfo_endpoint'] 71 # Opaque access token string. Fetch user info from profile. 72 try: 73 resp = requests.get(uri, headers={'Authorization': 'Bearer ' + access_token}) 74 resp.raise_for_status() 75 userprofile = resp.json() 76 return userprofile 77 78 except (requests.exceptions.HTTPError, ValueError, KeyError) as e: 79 logger.debug('Unable to fetch user profile from %s (%s)' % (uri, e)) 80 return None 81 82 83 def includeme(config): 84 # Activate end-points. 85 config.scan('kinto.plugins.openid.views') 86 87 settings = config.get_settings() 88 89 openid_policies = [] 90 for k, v in settings.items(): 91 m = re.match('multiauth\.policy\.(.*)\.use', k) 92 if m: 93 if v.endswith('OpenIDConnectPolicy'): 94 openid_policies.append(m.group(1)) 95 96 if len(openid_policies) == 0: 97 # Do not add the capability if no policy is configured. 98 return 99 100 providers_infos = [] 101 for name in openid_policies: 102 issuer = settings['multiauth.policy.%s.issuer' % name] 103 openid_config = fetch_openid_config(issuer) 104 105 client_id = settings['multiauth.policy.%s.client_id' % name] 106 header_type = settings.get('multiauth.policy.%s.header_type', 'Bearer') 107 108 providers_infos.append({ 109 'name': name, 110 'issuer': openid_config['issuer'], 111 'auth_path': '/openid/%s/login' % name, 112 'client_id': client_id, 113 'header_type': header_type, 114 'userinfo_endpoint': openid_config['userinfo_endpoint'], 115 }) 116 117 OpenAPI.expose_authentication_method(name, { 118 'type': 'oauth2', 119 'authorizationUrl': openid_config['authorization_endpoint'], 120 }) 121 122 config.add_api_capability( 123 'openid', 124 description='OpenID connect support.', 125 url='http://kinto.readthedocs.io/en/stable/api/1.x/authentication.html', 126 providers=providers_infos) 127 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kinto/plugins/openid/__init__.py b/kinto/plugins/openid/__init__.py --- a/kinto/plugins/openid/__init__.py +++ b/kinto/plugins/openid/__init__.py @@ -48,7 +48,7 @@ # Check cache if these tokens were already verified. hmac_tokens = core_utils.hmac_digest(hmac_secret, access_token) - cache_key = 'openid:verify:%s'.format(hmac_tokens) + cache_key = 'openid:verify:{}'.format(hmac_tokens) payload = request.registry.cache.get(cache_key) if payload is None: # This can take some time. diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -83,7 +83,7 @@ setup(name='kinto', - version='9.1.1', + version='9.1.2', description='Kinto Web Service - Store, Sync, Share, and Self-Host.', long_description='{}\n\n{}\n\n{}'.format(README, CHANGELOG, CONTRIBUTORS), license='Apache License (2.0)',
{"golden_diff": "diff --git a/kinto/plugins/openid/__init__.py b/kinto/plugins/openid/__init__.py\n--- a/kinto/plugins/openid/__init__.py\n+++ b/kinto/plugins/openid/__init__.py\n@@ -48,7 +48,7 @@\n \n # Check cache if these tokens were already verified.\n hmac_tokens = core_utils.hmac_digest(hmac_secret, access_token)\n- cache_key = 'openid:verify:%s'.format(hmac_tokens)\n+ cache_key = 'openid:verify:{}'.format(hmac_tokens)\n payload = request.registry.cache.get(cache_key)\n if payload is None:\n # This can take some time.\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -83,7 +83,7 @@\n \n \n setup(name='kinto',\n- version='9.1.1',\n+ version='9.1.2',\n description='Kinto Web Service - Store, Sync, Share, and Self-Host.',\n long_description='{}\\n\\n{}\\n\\n{}'.format(README, CHANGELOG, CONTRIBUTORS),\n license='Apache License (2.0)',\n", "issue": "OpenID payload cache uses the wrong cache key\nThe `cache_key` becomes the same for every access token due to this bug: https://github.com/Kinto/kinto/blob/e1e0d6be0024418fd100210901f9d2ca06344fe1/kinto/plugins/openid/__init__.py#L51\r\nNo matter what the `hmac_tokens` variable is the `cache_key` always becomes `'openid:verify:%s'`.\r\n\r\n\n", "before_files": [{"content": "import codecs\nimport os\nfrom setuptools import setup, find_packages\n\n# abspath here because setup.py may be __main__, in which case\n# __file__ is not guaranteed to be absolute\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read_file(filename):\n \"\"\"Open a related file and return its content.\"\"\"\n with codecs.open(os.path.join(here, filename), encoding='utf-8') as f:\n content = f.read()\n return content\n\n\nREADME = read_file('README.rst')\nCHANGELOG = read_file('CHANGELOG.rst')\nCONTRIBUTORS = read_file('CONTRIBUTORS.rst')\n\nREQUIREMENTS = [\n 'bcrypt',\n 'colander >= 1.4.0',\n 'cornice',\n 'cornice_swagger >= 0.5.1',\n 'dockerflow',\n 'jsonschema',\n 'jsonpatch',\n 'logging-color-formatter >= 1.0.1', # Message interpolations.\n 'python-dateutil',\n 'pyramid >= 1.9.1, < 2.0',\n 'pyramid_multiauth >= 0.8', # User on policy selected event.\n 'transaction',\n # pyramid_tm changed the location of their tween in 2.x and one of\n # our tests fails on 2.0.\n 'pyramid_tm >= 2.1',\n 'requests',\n 'waitress',\n 'ujson >= 1.35',\n]\n\nPOSTGRESQL_REQUIRES = [\n 'SQLAlchemy',\n 'psycopg2 > 2.5',\n 'zope.sqlalchemy',\n]\n\nREDIS_REQUIRES = [\n 'kinto_redis'\n]\n\nMEMCACHED_REQUIRES = [\n 'python-memcached'\n]\n\nSETUP_REQUIRES = [\n 'pytest-runner'\n]\n\nTEST_REQUIREMENTS = [\n 'bravado_core',\n 'pytest',\n 'WebTest'\n]\n\nDEPENDENCY_LINKS = []\n\nMONITORING_REQUIRES = [\n 'raven',\n 'statsd',\n 'newrelic',\n 'werkzeug',\n]\n\nENTRY_POINTS = {\n 'paste.app_factory': [\n 'main = kinto:main',\n ],\n 'console_scripts': [\n 'kinto = kinto.__main__:main'\n ],\n}\n\n\nsetup(name='kinto',\n version='9.1.1',\n description='Kinto Web Service - Store, Sync, Share, and Self-Host.',\n long_description='{}\\n\\n{}\\n\\n{}'.format(README, CHANGELOG, CONTRIBUTORS),\n license='Apache License (2.0)',\n classifiers=[\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',\n 'License :: OSI Approved :: Apache Software License'\n ],\n keywords='web sync json storage services',\n author='Mozilla Services',\n author_email='[email protected]',\n url='https://github.com/Kinto/kinto',\n packages=find_packages(),\n package_data={'': ['*.rst', '*.py', '*.yaml']},\n include_package_data=True,\n zip_safe=False,\n setup_requires=SETUP_REQUIRES,\n tests_require=TEST_REQUIREMENTS,\n install_requires=REQUIREMENTS,\n extras_require={\n 'redis': REDIS_REQUIRES,\n 'memcached': MEMCACHED_REQUIRES,\n 'postgresql': POSTGRESQL_REQUIRES,\n 'monitoring': MONITORING_REQUIRES,\n },\n test_suite='tests',\n dependency_links=DEPENDENCY_LINKS,\n entry_points=ENTRY_POINTS)\n", "path": "setup.py"}, {"content": "import re\n\nimport requests\nfrom pyramid import authentication as base_auth\nfrom pyramid.interfaces import IAuthenticationPolicy\nfrom zope.interface import implementer\n\nfrom kinto.core import logger\nfrom kinto.core import utils as core_utils\nfrom kinto.core.openapi import OpenAPI\n\nfrom .utils import fetch_openid_config\n\n\n@implementer(IAuthenticationPolicy)\nclass OpenIDConnectPolicy(base_auth.CallbackAuthenticationPolicy):\n def __init__(self, issuer, client_id, realm='Realm', **kwargs):\n self.realm = realm\n self.issuer = issuer\n self.client_id = client_id\n self.client_secret = kwargs.get('client_secret', '')\n self.header_type = kwargs.get('header_type', 'Bearer')\n self.userid_field = kwargs.get('userid_field', 'sub')\n self.verification_ttl = int(kwargs.get('verification_ttl_seconds', 86400))\n\n # Fetch OpenID config (at instantiation, ie. startup)\n self.oid_config = fetch_openid_config(issuer)\n\n self._jwt_keys = None\n\n def unauthenticated_userid(self, request):\n \"\"\"Return the userid or ``None`` if token could not be verified.\n \"\"\"\n settings = request.registry.settings\n hmac_secret = settings['userid_hmac_secret']\n\n authorization = request.headers.get('Authorization', '')\n try:\n authmeth, access_token = authorization.split(' ', 1)\n except ValueError:\n return None\n\n if authmeth.lower() != self.header_type.lower():\n return None\n\n # XXX JWT Access token\n # https://auth0.com/docs/tokens/access-token#access-token-format\n\n # Check cache if these tokens were already verified.\n hmac_tokens = core_utils.hmac_digest(hmac_secret, access_token)\n cache_key = 'openid:verify:%s'.format(hmac_tokens)\n payload = request.registry.cache.get(cache_key)\n if payload is None:\n # This can take some time.\n payload = self._verify_token(access_token)\n if payload is None:\n return None\n # Save for next time / refresh ttl.\n request.registry.cache.set(cache_key, payload, ttl=self.verification_ttl)\n # Extract meaningful field from userinfo (eg. email or sub)\n return payload.get(self.userid_field)\n\n def forget(self, request):\n \"\"\"A no-op. Credentials are sent on every request.\n Return WWW-Authenticate Realm header for Bearer token.\n \"\"\"\n return [('WWW-Authenticate', '%s realm=\"%s\"' % (self.header_type, self.realm))]\n\n def _verify_token(self, access_token):\n uri = self.oid_config['userinfo_endpoint']\n # Opaque access token string. Fetch user info from profile.\n try:\n resp = requests.get(uri, headers={'Authorization': 'Bearer ' + access_token})\n resp.raise_for_status()\n userprofile = resp.json()\n return userprofile\n\n except (requests.exceptions.HTTPError, ValueError, KeyError) as e:\n logger.debug('Unable to fetch user profile from %s (%s)' % (uri, e))\n return None\n\n\ndef includeme(config):\n # Activate end-points.\n config.scan('kinto.plugins.openid.views')\n\n settings = config.get_settings()\n\n openid_policies = []\n for k, v in settings.items():\n m = re.match('multiauth\\.policy\\.(.*)\\.use', k)\n if m:\n if v.endswith('OpenIDConnectPolicy'):\n openid_policies.append(m.group(1))\n\n if len(openid_policies) == 0:\n # Do not add the capability if no policy is configured.\n return\n\n providers_infos = []\n for name in openid_policies:\n issuer = settings['multiauth.policy.%s.issuer' % name]\n openid_config = fetch_openid_config(issuer)\n\n client_id = settings['multiauth.policy.%s.client_id' % name]\n header_type = settings.get('multiauth.policy.%s.header_type', 'Bearer')\n\n providers_infos.append({\n 'name': name,\n 'issuer': openid_config['issuer'],\n 'auth_path': '/openid/%s/login' % name,\n 'client_id': client_id,\n 'header_type': header_type,\n 'userinfo_endpoint': openid_config['userinfo_endpoint'],\n })\n\n OpenAPI.expose_authentication_method(name, {\n 'type': 'oauth2',\n 'authorizationUrl': openid_config['authorization_endpoint'],\n })\n\n config.add_api_capability(\n 'openid',\n description='OpenID connect support.',\n url='http://kinto.readthedocs.io/en/stable/api/1.x/authentication.html',\n providers=providers_infos)\n", "path": "kinto/plugins/openid/__init__.py"}], "after_files": [{"content": "import codecs\nimport os\nfrom setuptools import setup, find_packages\n\n# abspath here because setup.py may be __main__, in which case\n# __file__ is not guaranteed to be absolute\nhere = os.path.abspath(os.path.dirname(__file__))\n\n\ndef read_file(filename):\n \"\"\"Open a related file and return its content.\"\"\"\n with codecs.open(os.path.join(here, filename), encoding='utf-8') as f:\n content = f.read()\n return content\n\n\nREADME = read_file('README.rst')\nCHANGELOG = read_file('CHANGELOG.rst')\nCONTRIBUTORS = read_file('CONTRIBUTORS.rst')\n\nREQUIREMENTS = [\n 'bcrypt',\n 'colander >= 1.4.0',\n 'cornice',\n 'cornice_swagger >= 0.5.1',\n 'dockerflow',\n 'jsonschema',\n 'jsonpatch',\n 'logging-color-formatter >= 1.0.1', # Message interpolations.\n 'python-dateutil',\n 'pyramid >= 1.9.1, < 2.0',\n 'pyramid_multiauth >= 0.8', # User on policy selected event.\n 'transaction',\n # pyramid_tm changed the location of their tween in 2.x and one of\n # our tests fails on 2.0.\n 'pyramid_tm >= 2.1',\n 'requests',\n 'waitress',\n 'ujson >= 1.35',\n]\n\nPOSTGRESQL_REQUIRES = [\n 'SQLAlchemy',\n 'psycopg2 > 2.5',\n 'zope.sqlalchemy',\n]\n\nREDIS_REQUIRES = [\n 'kinto_redis'\n]\n\nMEMCACHED_REQUIRES = [\n 'python-memcached'\n]\n\nSETUP_REQUIRES = [\n 'pytest-runner'\n]\n\nTEST_REQUIREMENTS = [\n 'bravado_core',\n 'pytest',\n 'WebTest'\n]\n\nDEPENDENCY_LINKS = []\n\nMONITORING_REQUIRES = [\n 'raven',\n 'statsd',\n 'newrelic',\n 'werkzeug',\n]\n\nENTRY_POINTS = {\n 'paste.app_factory': [\n 'main = kinto:main',\n ],\n 'console_scripts': [\n 'kinto = kinto.__main__:main'\n ],\n}\n\n\nsetup(name='kinto',\n version='9.1.2',\n description='Kinto Web Service - Store, Sync, Share, and Self-Host.',\n long_description='{}\\n\\n{}\\n\\n{}'.format(README, CHANGELOG, CONTRIBUTORS),\n license='Apache License (2.0)',\n classifiers=[\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Topic :: Internet :: WWW/HTTP',\n 'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',\n 'License :: OSI Approved :: Apache Software License'\n ],\n keywords='web sync json storage services',\n author='Mozilla Services',\n author_email='[email protected]',\n url='https://github.com/Kinto/kinto',\n packages=find_packages(),\n package_data={'': ['*.rst', '*.py', '*.yaml']},\n include_package_data=True,\n zip_safe=False,\n setup_requires=SETUP_REQUIRES,\n tests_require=TEST_REQUIREMENTS,\n install_requires=REQUIREMENTS,\n extras_require={\n 'redis': REDIS_REQUIRES,\n 'memcached': MEMCACHED_REQUIRES,\n 'postgresql': POSTGRESQL_REQUIRES,\n 'monitoring': MONITORING_REQUIRES,\n },\n test_suite='tests',\n dependency_links=DEPENDENCY_LINKS,\n entry_points=ENTRY_POINTS)\n", "path": "setup.py"}, {"content": "import re\n\nimport requests\nfrom pyramid import authentication as base_auth\nfrom pyramid.interfaces import IAuthenticationPolicy\nfrom zope.interface import implementer\n\nfrom kinto.core import logger\nfrom kinto.core import utils as core_utils\nfrom kinto.core.openapi import OpenAPI\n\nfrom .utils import fetch_openid_config\n\n\n@implementer(IAuthenticationPolicy)\nclass OpenIDConnectPolicy(base_auth.CallbackAuthenticationPolicy):\n def __init__(self, issuer, client_id, realm='Realm', **kwargs):\n self.realm = realm\n self.issuer = issuer\n self.client_id = client_id\n self.client_secret = kwargs.get('client_secret', '')\n self.header_type = kwargs.get('header_type', 'Bearer')\n self.userid_field = kwargs.get('userid_field', 'sub')\n self.verification_ttl = int(kwargs.get('verification_ttl_seconds', 86400))\n\n # Fetch OpenID config (at instantiation, ie. startup)\n self.oid_config = fetch_openid_config(issuer)\n\n self._jwt_keys = None\n\n def unauthenticated_userid(self, request):\n \"\"\"Return the userid or ``None`` if token could not be verified.\n \"\"\"\n settings = request.registry.settings\n hmac_secret = settings['userid_hmac_secret']\n\n authorization = request.headers.get('Authorization', '')\n try:\n authmeth, access_token = authorization.split(' ', 1)\n except ValueError:\n return None\n\n if authmeth.lower() != self.header_type.lower():\n return None\n\n # XXX JWT Access token\n # https://auth0.com/docs/tokens/access-token#access-token-format\n\n # Check cache if these tokens were already verified.\n hmac_tokens = core_utils.hmac_digest(hmac_secret, access_token)\n cache_key = 'openid:verify:{}'.format(hmac_tokens)\n payload = request.registry.cache.get(cache_key)\n if payload is None:\n # This can take some time.\n payload = self._verify_token(access_token)\n if payload is None:\n return None\n # Save for next time / refresh ttl.\n request.registry.cache.set(cache_key, payload, ttl=self.verification_ttl)\n # Extract meaningful field from userinfo (eg. email or sub)\n return payload.get(self.userid_field)\n\n def forget(self, request):\n \"\"\"A no-op. Credentials are sent on every request.\n Return WWW-Authenticate Realm header for Bearer token.\n \"\"\"\n return [('WWW-Authenticate', '%s realm=\"%s\"' % (self.header_type, self.realm))]\n\n def _verify_token(self, access_token):\n uri = self.oid_config['userinfo_endpoint']\n # Opaque access token string. Fetch user info from profile.\n try:\n resp = requests.get(uri, headers={'Authorization': 'Bearer ' + access_token})\n resp.raise_for_status()\n userprofile = resp.json()\n return userprofile\n\n except (requests.exceptions.HTTPError, ValueError, KeyError) as e:\n logger.debug('Unable to fetch user profile from %s (%s)' % (uri, e))\n return None\n\n\ndef includeme(config):\n # Activate end-points.\n config.scan('kinto.plugins.openid.views')\n\n settings = config.get_settings()\n\n openid_policies = []\n for k, v in settings.items():\n m = re.match('multiauth\\.policy\\.(.*)\\.use', k)\n if m:\n if v.endswith('OpenIDConnectPolicy'):\n openid_policies.append(m.group(1))\n\n if len(openid_policies) == 0:\n # Do not add the capability if no policy is configured.\n return\n\n providers_infos = []\n for name in openid_policies:\n issuer = settings['multiauth.policy.%s.issuer' % name]\n openid_config = fetch_openid_config(issuer)\n\n client_id = settings['multiauth.policy.%s.client_id' % name]\n header_type = settings.get('multiauth.policy.%s.header_type', 'Bearer')\n\n providers_infos.append({\n 'name': name,\n 'issuer': openid_config['issuer'],\n 'auth_path': '/openid/%s/login' % name,\n 'client_id': client_id,\n 'header_type': header_type,\n 'userinfo_endpoint': openid_config['userinfo_endpoint'],\n })\n\n OpenAPI.expose_authentication_method(name, {\n 'type': 'oauth2',\n 'authorizationUrl': openid_config['authorization_endpoint'],\n })\n\n config.add_api_capability(\n 'openid',\n description='OpenID connect support.',\n url='http://kinto.readthedocs.io/en/stable/api/1.x/authentication.html',\n providers=providers_infos)\n", "path": "kinto/plugins/openid/__init__.py"}]}
2,740
253
gh_patches_debug_35158
rasdani/github-patches
git_diff
python-discord__bot-527
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Restricting/redirecting output of commands to bot-commands for regular users **Edit:** I've decided to make this a general issue, since the `!free` command turns out to be a bit disruptive as well. In python-discussions, there have just been four consecutive calls to `!free` and it looks very disruptive (see below). My idea would be to create an easy way to redirect the output of certain commands, free and help, to bot commands for regular users, with the bot sending a temporary message to the user in channel redirecting them there. The bot could then delete that message (and, maybe, the invoking message as well) after a certain amount of time has passed. **Four times !free in python-discussion:** ![2019-01-18_17-38-32](https://user-images.githubusercontent.com/33516116/51400480-c8aeb980-1b48-11e9-838e-515904e62776.png) **Old message for context:** Now the `!help` command works outside of bot-commands again, we may want to think about restricting the use/output to bot-commands for regular users. The help-embeds are quite sizeable and, therefore, quite disruptive in on-topic channels. I want to propose to redirect the **output** of help (the help-embed) to bot-commands for non-staff members, prepended by a mention of the user invoking the command. In addition, we could display a small, non-disruptive embed in the original context channel redirecting the user to bot commands. I think this should also be the case for help-calls on specific commands, as the embeds for those can be quite sizeable as well (e.g., `!help site`). Personally, I'd like this restriction to only apply to regular users, so staff can pull up help on a command quickly in channel and use help in the staff channels. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bot/cogs/free.py` Content: ``` 1 import logging 2 from datetime import datetime 3 from operator import itemgetter 4 5 from discord import Colour, Embed, Member, utils 6 from discord.ext.commands import Bot, Cog, Context, command 7 8 from bot.constants import Categories, Channels, Free, STAFF_ROLES 9 from bot.decorators import redirect_output 10 11 log = logging.getLogger(__name__) 12 13 TIMEOUT = Free.activity_timeout 14 RATE = Free.cooldown_rate 15 PER = Free.cooldown_per 16 17 18 class Free(Cog): 19 """Tries to figure out which help channels are free.""" 20 21 PYTHON_HELP_ID = Categories.python_help 22 23 @command(name="free", aliases=('f',)) 24 @redirect_output(destination_channel=Channels.bot, bypass_roles=STAFF_ROLES) 25 async def free(self, ctx: Context, user: Member = None, seek: int = 2) -> None: 26 """ 27 Lists free help channels by likeliness of availability. 28 29 seek is used only when this command is invoked in a help channel. 30 You cannot override seek without mentioning a user first. 31 32 When seek is 2, we are avoiding considering the last active message 33 in a channel to be the one that invoked this command. 34 35 When seek is 3 or more, a user has been mentioned on the assumption 36 that they asked if the channel is free or they asked their question 37 in an active channel, and we want the message before that happened. 38 """ 39 free_channels = [] 40 python_help = utils.get(ctx.guild.categories, id=self.PYTHON_HELP_ID) 41 42 if user is not None and seek == 2: 43 seek = 3 44 elif not 0 < seek < 10: 45 seek = 3 46 47 # Iterate through all the help channels 48 # to check latest activity 49 for channel in python_help.channels: 50 # Seek further back in the help channel 51 # the command was invoked in 52 if channel.id == ctx.channel.id: 53 messages = await channel.history(limit=seek).flatten() 54 msg = messages[seek - 1] 55 # Otherwise get last message 56 else: 57 msg = await channel.history(limit=1).next() # noqa (False positive) 58 59 inactive = (datetime.utcnow() - msg.created_at).seconds 60 if inactive > TIMEOUT: 61 free_channels.append((inactive, channel)) 62 63 embed = Embed() 64 embed.colour = Colour.blurple() 65 embed.title = "**Looking for a free help channel?**" 66 67 if user is not None: 68 embed.description = f"**Hey {user.mention}!**\n\n" 69 else: 70 embed.description = "" 71 72 # Display all potentially inactive channels 73 # in descending order of inactivity 74 if free_channels: 75 embed.description += "**The following channel{0} look{1} free:**\n\n**".format( 76 's' if len(free_channels) > 1 else '', 77 '' if len(free_channels) > 1 else 's' 78 ) 79 80 # Sort channels in descending order by seconds 81 # Get position in list, inactivity, and channel object 82 # For each channel, add to embed.description 83 sorted_channels = sorted(free_channels, key=itemgetter(0), reverse=True) 84 for i, (inactive, channel) in enumerate(sorted_channels, 1): 85 minutes, seconds = divmod(inactive, 60) 86 if minutes > 59: 87 hours, minutes = divmod(minutes, 60) 88 embed.description += f"{i}. {channel.mention} inactive for {hours}h{minutes}m{seconds}s\n\n" 89 else: 90 embed.description += f"{i}. {channel.mention} inactive for {minutes}m{seconds}s\n\n" 91 92 embed.description += ("**\nThese channels aren't guaranteed to be free, " 93 "so use your best judgement and check for yourself.") 94 else: 95 embed.description = ("**Doesn't look like any channels are available right now. " 96 "You're welcome to check for yourself to be sure. " 97 "If all channels are truly busy, please be patient " 98 "as one will likely be available soon.**") 99 100 await ctx.send(embed=embed) 101 102 103 def setup(bot: Bot) -> None: 104 """Free cog load.""" 105 bot.add_cog(Free()) 106 log.info("Cog loaded: Free") 107 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bot/cogs/free.py b/bot/cogs/free.py --- a/bot/cogs/free.py +++ b/bot/cogs/free.py @@ -72,30 +72,27 @@ # Display all potentially inactive channels # in descending order of inactivity if free_channels: - embed.description += "**The following channel{0} look{1} free:**\n\n**".format( - 's' if len(free_channels) > 1 else '', - '' if len(free_channels) > 1 else 's' - ) - # Sort channels in descending order by seconds # Get position in list, inactivity, and channel object # For each channel, add to embed.description sorted_channels = sorted(free_channels, key=itemgetter(0), reverse=True) - for i, (inactive, channel) in enumerate(sorted_channels, 1): + + for (inactive, channel) in sorted_channels[:3]: minutes, seconds = divmod(inactive, 60) if minutes > 59: hours, minutes = divmod(minutes, 60) - embed.description += f"{i}. {channel.mention} inactive for {hours}h{minutes}m{seconds}s\n\n" + embed.description += f"{channel.mention} **{hours}h {minutes}m {seconds}s** inactive\n" else: - embed.description += f"{i}. {channel.mention} inactive for {minutes}m{seconds}s\n\n" + embed.description += f"{channel.mention} **{minutes}m {seconds}s** inactive\n" - embed.description += ("**\nThese channels aren't guaranteed to be free, " - "so use your best judgement and check for yourself.") + embed.set_footer(text="Please confirm these channels are free before posting") else: - embed.description = ("**Doesn't look like any channels are available right now. " - "You're welcome to check for yourself to be sure. " - "If all channels are truly busy, please be patient " - "as one will likely be available soon.**") + embed.description = ( + "Doesn't look like any channels are available right now. " + "You're welcome to check for yourself to be sure. " + "If all channels are truly busy, please be patient " + "as one will likely be available soon." + ) await ctx.send(embed=embed)
{"golden_diff": "diff --git a/bot/cogs/free.py b/bot/cogs/free.py\n--- a/bot/cogs/free.py\n+++ b/bot/cogs/free.py\n@@ -72,30 +72,27 @@\n # Display all potentially inactive channels\n # in descending order of inactivity\n if free_channels:\n- embed.description += \"**The following channel{0} look{1} free:**\\n\\n**\".format(\n- 's' if len(free_channels) > 1 else '',\n- '' if len(free_channels) > 1 else 's'\n- )\n-\n # Sort channels in descending order by seconds\n # Get position in list, inactivity, and channel object\n # For each channel, add to embed.description\n sorted_channels = sorted(free_channels, key=itemgetter(0), reverse=True)\n- for i, (inactive, channel) in enumerate(sorted_channels, 1):\n+\n+ for (inactive, channel) in sorted_channels[:3]:\n minutes, seconds = divmod(inactive, 60)\n if minutes > 59:\n hours, minutes = divmod(minutes, 60)\n- embed.description += f\"{i}. {channel.mention} inactive for {hours}h{minutes}m{seconds}s\\n\\n\"\n+ embed.description += f\"{channel.mention} **{hours}h {minutes}m {seconds}s** inactive\\n\"\n else:\n- embed.description += f\"{i}. {channel.mention} inactive for {minutes}m{seconds}s\\n\\n\"\n+ embed.description += f\"{channel.mention} **{minutes}m {seconds}s** inactive\\n\"\n \n- embed.description += (\"**\\nThese channels aren't guaranteed to be free, \"\n- \"so use your best judgement and check for yourself.\")\n+ embed.set_footer(text=\"Please confirm these channels are free before posting\")\n else:\n- embed.description = (\"**Doesn't look like any channels are available right now. \"\n- \"You're welcome to check for yourself to be sure. \"\n- \"If all channels are truly busy, please be patient \"\n- \"as one will likely be available soon.**\")\n+ embed.description = (\n+ \"Doesn't look like any channels are available right now. \"\n+ \"You're welcome to check for yourself to be sure. \"\n+ \"If all channels are truly busy, please be patient \"\n+ \"as one will likely be available soon.\"\n+ )\n \n await ctx.send(embed=embed)\n", "issue": "Restricting/redirecting output of commands to bot-commands for regular users\n**Edit:** I've decided to make this a general issue, since the `!free` command turns out to be a bit disruptive as well. In python-discussions, there have just been four consecutive calls to `!free` and it looks very disruptive (see below).\r\n\r\nMy idea would be to create an easy way to redirect the output of certain commands, free and help, to bot commands for regular users, with the bot sending a temporary message to the user in channel redirecting them there. The bot could then delete that message (and, maybe, the invoking message as well) after a certain amount of time has passed.\r\n\r\n**Four times !free in python-discussion:**\r\n![2019-01-18_17-38-32](https://user-images.githubusercontent.com/33516116/51400480-c8aeb980-1b48-11e9-838e-515904e62776.png)\r\n\r\n**Old message for context:**\r\nNow the `!help` command works outside of bot-commands again, we may want to think about restricting the use/output to bot-commands for regular users. The help-embeds are quite sizeable and, therefore, quite disruptive in on-topic channels.\r\n\r\nI want to propose to redirect the **output** of help (the help-embed) to bot-commands for non-staff members, prepended by a mention of the user invoking the command. In addition, we could display a small, non-disruptive embed in the original context channel redirecting the user to bot commands. I think this should also be the case for help-calls on specific commands, as the embeds for those can be quite sizeable as well (e.g., `!help site`).\r\n\r\nPersonally, I'd like this restriction to only apply to regular users, so staff can pull up help on a command quickly in channel and use help in the staff channels.\n", "before_files": [{"content": "import logging\nfrom datetime import datetime\nfrom operator import itemgetter\n\nfrom discord import Colour, Embed, Member, utils\nfrom discord.ext.commands import Bot, Cog, Context, command\n\nfrom bot.constants import Categories, Channels, Free, STAFF_ROLES\nfrom bot.decorators import redirect_output\n\nlog = logging.getLogger(__name__)\n\nTIMEOUT = Free.activity_timeout\nRATE = Free.cooldown_rate\nPER = Free.cooldown_per\n\n\nclass Free(Cog):\n \"\"\"Tries to figure out which help channels are free.\"\"\"\n\n PYTHON_HELP_ID = Categories.python_help\n\n @command(name=\"free\", aliases=('f',))\n @redirect_output(destination_channel=Channels.bot, bypass_roles=STAFF_ROLES)\n async def free(self, ctx: Context, user: Member = None, seek: int = 2) -> None:\n \"\"\"\n Lists free help channels by likeliness of availability.\n\n seek is used only when this command is invoked in a help channel.\n You cannot override seek without mentioning a user first.\n\n When seek is 2, we are avoiding considering the last active message\n in a channel to be the one that invoked this command.\n\n When seek is 3 or more, a user has been mentioned on the assumption\n that they asked if the channel is free or they asked their question\n in an active channel, and we want the message before that happened.\n \"\"\"\n free_channels = []\n python_help = utils.get(ctx.guild.categories, id=self.PYTHON_HELP_ID)\n\n if user is not None and seek == 2:\n seek = 3\n elif not 0 < seek < 10:\n seek = 3\n\n # Iterate through all the help channels\n # to check latest activity\n for channel in python_help.channels:\n # Seek further back in the help channel\n # the command was invoked in\n if channel.id == ctx.channel.id:\n messages = await channel.history(limit=seek).flatten()\n msg = messages[seek - 1]\n # Otherwise get last message\n else:\n msg = await channel.history(limit=1).next() # noqa (False positive)\n\n inactive = (datetime.utcnow() - msg.created_at).seconds\n if inactive > TIMEOUT:\n free_channels.append((inactive, channel))\n\n embed = Embed()\n embed.colour = Colour.blurple()\n embed.title = \"**Looking for a free help channel?**\"\n\n if user is not None:\n embed.description = f\"**Hey {user.mention}!**\\n\\n\"\n else:\n embed.description = \"\"\n\n # Display all potentially inactive channels\n # in descending order of inactivity\n if free_channels:\n embed.description += \"**The following channel{0} look{1} free:**\\n\\n**\".format(\n 's' if len(free_channels) > 1 else '',\n '' if len(free_channels) > 1 else 's'\n )\n\n # Sort channels in descending order by seconds\n # Get position in list, inactivity, and channel object\n # For each channel, add to embed.description\n sorted_channels = sorted(free_channels, key=itemgetter(0), reverse=True)\n for i, (inactive, channel) in enumerate(sorted_channels, 1):\n minutes, seconds = divmod(inactive, 60)\n if minutes > 59:\n hours, minutes = divmod(minutes, 60)\n embed.description += f\"{i}. {channel.mention} inactive for {hours}h{minutes}m{seconds}s\\n\\n\"\n else:\n embed.description += f\"{i}. {channel.mention} inactive for {minutes}m{seconds}s\\n\\n\"\n\n embed.description += (\"**\\nThese channels aren't guaranteed to be free, \"\n \"so use your best judgement and check for yourself.\")\n else:\n embed.description = (\"**Doesn't look like any channels are available right now. \"\n \"You're welcome to check for yourself to be sure. \"\n \"If all channels are truly busy, please be patient \"\n \"as one will likely be available soon.**\")\n\n await ctx.send(embed=embed)\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Free cog load.\"\"\"\n bot.add_cog(Free())\n log.info(\"Cog loaded: Free\")\n", "path": "bot/cogs/free.py"}], "after_files": [{"content": "import logging\nfrom datetime import datetime\nfrom operator import itemgetter\n\nfrom discord import Colour, Embed, Member, utils\nfrom discord.ext.commands import Bot, Cog, Context, command\n\nfrom bot.constants import Categories, Channels, Free, STAFF_ROLES\nfrom bot.decorators import redirect_output\n\nlog = logging.getLogger(__name__)\n\nTIMEOUT = Free.activity_timeout\nRATE = Free.cooldown_rate\nPER = Free.cooldown_per\n\n\nclass Free(Cog):\n \"\"\"Tries to figure out which help channels are free.\"\"\"\n\n PYTHON_HELP_ID = Categories.python_help\n\n @command(name=\"free\", aliases=('f',))\n @redirect_output(destination_channel=Channels.bot, bypass_roles=STAFF_ROLES)\n async def free(self, ctx: Context, user: Member = None, seek: int = 2) -> None:\n \"\"\"\n Lists free help channels by likeliness of availability.\n\n seek is used only when this command is invoked in a help channel.\n You cannot override seek without mentioning a user first.\n\n When seek is 2, we are avoiding considering the last active message\n in a channel to be the one that invoked this command.\n\n When seek is 3 or more, a user has been mentioned on the assumption\n that they asked if the channel is free or they asked their question\n in an active channel, and we want the message before that happened.\n \"\"\"\n free_channels = []\n python_help = utils.get(ctx.guild.categories, id=self.PYTHON_HELP_ID)\n\n if user is not None and seek == 2:\n seek = 3\n elif not 0 < seek < 10:\n seek = 3\n\n # Iterate through all the help channels\n # to check latest activity\n for channel in python_help.channels:\n # Seek further back in the help channel\n # the command was invoked in\n if channel.id == ctx.channel.id:\n messages = await channel.history(limit=seek).flatten()\n msg = messages[seek - 1]\n # Otherwise get last message\n else:\n msg = await channel.history(limit=1).next() # noqa (False positive)\n\n inactive = (datetime.utcnow() - msg.created_at).seconds\n if inactive > TIMEOUT:\n free_channels.append((inactive, channel))\n\n embed = Embed()\n embed.colour = Colour.blurple()\n embed.title = \"**Looking for a free help channel?**\"\n\n if user is not None:\n embed.description = f\"**Hey {user.mention}!**\\n\\n\"\n else:\n embed.description = \"\"\n\n # Display all potentially inactive channels\n # in descending order of inactivity\n if free_channels:\n # Sort channels in descending order by seconds\n # Get position in list, inactivity, and channel object\n # For each channel, add to embed.description\n sorted_channels = sorted(free_channels, key=itemgetter(0), reverse=True)\n\n for (inactive, channel) in sorted_channels[:3]:\n minutes, seconds = divmod(inactive, 60)\n if minutes > 59:\n hours, minutes = divmod(minutes, 60)\n embed.description += f\"{channel.mention} **{hours}h {minutes}m {seconds}s** inactive\\n\"\n else:\n embed.description += f\"{channel.mention} **{minutes}m {seconds}s** inactive\\n\"\n\n embed.set_footer(text=\"Please confirm these channels are free before posting\")\n else:\n embed.description = (\n \"Doesn't look like any channels are available right now. \"\n \"You're welcome to check for yourself to be sure. \"\n \"If all channels are truly busy, please be patient \"\n \"as one will likely be available soon.\"\n )\n\n await ctx.send(embed=embed)\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Free cog load.\"\"\"\n bot.add_cog(Free())\n log.info(\"Cog loaded: Free\")\n", "path": "bot/cogs/free.py"}]}
1,845
551
gh_patches_debug_2246
rasdani/github-patches
git_diff
StackStorm__st2-3408
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- st2kv does not resolve in Jinja cast expression In the rule: ` {{ "2" | int }} - 2` Dah `{{ trigger.count | int }} -> 2` OK `{{ st2kv.system.count | int }} -> 0` Wrong. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `st2common/st2common/services/keyvalues.py` Content: ``` 1 # Licensed to the StackStorm, Inc ('StackStorm') under one or more 2 # contributor license agreements. See the NOTICE file distributed with 3 # this work for additional information regarding copyright ownership. 4 # The ASF licenses this file to You under the Apache License, Version 2.0 5 # (the "License"); you may not use this file except in compliance with 6 # the License. You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 16 from st2common import log as logging 17 18 from st2common.constants.keyvalue import SYSTEM_SCOPE, FULL_SYSTEM_SCOPE 19 from st2common.constants.keyvalue import USER_SCOPE, FULL_USER_SCOPE 20 from st2common.constants.keyvalue import ALLOWED_SCOPES 21 from st2common.constants.keyvalue import DATASTORE_KEY_SEPARATOR 22 from st2common.exceptions.keyvalue import InvalidScopeException, InvalidUserException 23 from st2common.models.system.keyvalue import UserKeyReference 24 from st2common.persistence.keyvalue import KeyValuePair 25 26 __all__ = [ 27 'get_kvp_for_name', 28 'get_values_for_names', 29 30 'KeyValueLookup', 31 'UserKeyValueLookup' 32 ] 33 34 LOG = logging.getLogger(__name__) 35 36 37 def get_kvp_for_name(name): 38 try: 39 kvp_db = KeyValuePair.get_by_name(name) 40 except ValueError: 41 kvp_db = None 42 43 return kvp_db 44 45 46 def get_values_for_names(names, default_value=None): 47 """ 48 Retrieve values for the provided key names (multi get). 49 50 If a KeyValuePair objects for a particular name doesn't exist, the dictionary will contain 51 default_value for that name. 52 53 :rtype: ``dict`` 54 """ 55 result = {} 56 kvp_dbs = KeyValuePair.get_by_names(names=names) 57 58 name_to_kvp_db_map = {} 59 for kvp_db in kvp_dbs: 60 name_to_kvp_db_map[kvp_db.name] = kvp_db.value 61 62 for name in names: 63 result[name] = name_to_kvp_db_map.get(name, default_value) 64 65 return result 66 67 68 class KeyValueLookup(object): 69 70 def __init__(self, prefix=None, key_prefix=None, cache=None, scope=FULL_SYSTEM_SCOPE): 71 if not scope: 72 scope = FULL_SYSTEM_SCOPE 73 74 if scope == SYSTEM_SCOPE: 75 scope = FULL_SYSTEM_SCOPE 76 77 self._prefix = prefix 78 self._key_prefix = key_prefix or '' 79 self._value_cache = cache or {} 80 self._scope = scope 81 82 def __str__(self): 83 return self._value_cache[self._key_prefix] 84 85 def __getitem__(self, key): 86 return self._get(key) 87 88 def __getattr__(self, name): 89 return self._get(name) 90 91 def _get(self, name): 92 # get the value for this key and save in value_cache 93 if self._key_prefix: 94 key = '%s.%s' % (self._key_prefix, name) 95 else: 96 key = name 97 98 if self._prefix: 99 kvp_key = DATASTORE_KEY_SEPARATOR.join([self._prefix, key]) 100 else: 101 kvp_key = key 102 103 value = self._get_kv(kvp_key) 104 self._value_cache[key] = value 105 # return a KeyValueLookup as response since the lookup may not be complete e.g. if 106 # the lookup is for 'key_base.key_value' it is likely that the calling code, e.g. Jinja, 107 # will expect to do a dictionary style lookup for key_base and key_value as subsequent 108 # calls. Saving the value in cache avoids extra DB calls. 109 return KeyValueLookup(prefix=self._prefix, key_prefix=key, cache=self._value_cache, 110 scope=self._scope) 111 112 def _get_kv(self, key): 113 scope = self._scope 114 LOG.debug('Lookup system kv: scope: %s and key: %s', scope, key) 115 kvp = KeyValuePair.get_by_scope_and_name(scope=scope, name=key) 116 if kvp: 117 LOG.debug('Got value %s from datastore.', kvp.value) 118 return kvp.value if kvp else '' 119 120 121 class UserKeyValueLookup(object): 122 123 def __init__(self, user, prefix=None, key_prefix=None, cache=None, scope=FULL_USER_SCOPE): 124 if not scope: 125 scope = FULL_USER_SCOPE 126 127 if scope == USER_SCOPE: 128 scope = FULL_USER_SCOPE 129 130 self._prefix = prefix 131 self._key_prefix = key_prefix or '' 132 self._value_cache = cache or {} 133 self._user = user 134 self._scope = scope 135 136 def __str__(self): 137 return self._value_cache[self._key_prefix] 138 139 def __getitem__(self, key): 140 return self._get(key) 141 142 def __getattr__(self, name): 143 return self._get(name) 144 145 def _get(self, name): 146 # get the value for this key and save in value_cache 147 if self._key_prefix: 148 key = '%s.%s' % (self._key_prefix, name) 149 else: 150 key = UserKeyReference(name=name, user=self._user).ref 151 152 if self._prefix: 153 kvp_key = DATASTORE_KEY_SEPARATOR.join([self._prefix, key]) 154 else: 155 kvp_key = key 156 157 value = self._get_kv(kvp_key) 158 self._value_cache[key] = value 159 # return a KeyValueLookup as response since the lookup may not be complete e.g. if 160 # the lookup is for 'key_base.key_value' it is likely that the calling code, e.g. Jinja, 161 # will expect to do a dictionary style lookup for key_base and key_value as subsequent 162 # calls. Saving the value in cache avoids extra DB calls. 163 return UserKeyValueLookup(prefix=self._prefix, user=self._user, key_prefix=key, 164 cache=self._value_cache, scope=self._scope) 165 166 def _get_kv(self, key): 167 scope = self._scope 168 kvp = KeyValuePair.get_by_scope_and_name(scope=scope, name=key) 169 return kvp.value if kvp else '' 170 171 172 def get_key_reference(scope, name, user=None): 173 """ 174 Given a key name and user this method returns a new name (string ref) 175 to address the key value pair in the context of that user. 176 177 :param user: User to whom key belongs. 178 :type name: ``str`` 179 180 :param name: Original name of the key. 181 :type name: ``str`` 182 183 :rtype: ``str`` 184 """ 185 if (scope == SYSTEM_SCOPE or scope == FULL_SYSTEM_SCOPE): 186 return name 187 elif (scope == USER_SCOPE or scope == FULL_USER_SCOPE): 188 if not user: 189 raise InvalidUserException('A valid user must be specified for user key ref.') 190 return UserKeyReference(name=name, user=user).ref 191 else: 192 raise InvalidScopeException('Scope "%s" is not valid. Allowed scopes are %s.' % 193 (scope, ALLOWED_SCOPES)) 194 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/st2common/st2common/services/keyvalues.py b/st2common/st2common/services/keyvalues.py --- a/st2common/st2common/services/keyvalues.py +++ b/st2common/st2common/services/keyvalues.py @@ -82,6 +82,12 @@ def __str__(self): return self._value_cache[self._key_prefix] + def __int__(self): + return int(float(self)) + + def __float__(self): + return float(str(self)) + def __getitem__(self, key): return self._get(key)
{"golden_diff": "diff --git a/st2common/st2common/services/keyvalues.py b/st2common/st2common/services/keyvalues.py\n--- a/st2common/st2common/services/keyvalues.py\n+++ b/st2common/st2common/services/keyvalues.py\n@@ -82,6 +82,12 @@\n def __str__(self):\n return self._value_cache[self._key_prefix]\n \n+ def __int__(self):\n+ return int(float(self))\n+\n+ def __float__(self):\n+ return float(str(self))\n+\n def __getitem__(self, key):\n return self._get(key)\n", "issue": "st2kv does not resolve in Jinja cast expression\nIn the rule: \r\n` {{ \"2\" | int }} - 2` Dah\r\n`{{ trigger.count | int }} -> 2` OK\r\n`{{ st2kv.system.count | int }} -> 0` Wrong.\r\n\n", "before_files": [{"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom st2common import log as logging\n\nfrom st2common.constants.keyvalue import SYSTEM_SCOPE, FULL_SYSTEM_SCOPE\nfrom st2common.constants.keyvalue import USER_SCOPE, FULL_USER_SCOPE\nfrom st2common.constants.keyvalue import ALLOWED_SCOPES\nfrom st2common.constants.keyvalue import DATASTORE_KEY_SEPARATOR\nfrom st2common.exceptions.keyvalue import InvalidScopeException, InvalidUserException\nfrom st2common.models.system.keyvalue import UserKeyReference\nfrom st2common.persistence.keyvalue import KeyValuePair\n\n__all__ = [\n 'get_kvp_for_name',\n 'get_values_for_names',\n\n 'KeyValueLookup',\n 'UserKeyValueLookup'\n]\n\nLOG = logging.getLogger(__name__)\n\n\ndef get_kvp_for_name(name):\n try:\n kvp_db = KeyValuePair.get_by_name(name)\n except ValueError:\n kvp_db = None\n\n return kvp_db\n\n\ndef get_values_for_names(names, default_value=None):\n \"\"\"\n Retrieve values for the provided key names (multi get).\n\n If a KeyValuePair objects for a particular name doesn't exist, the dictionary will contain\n default_value for that name.\n\n :rtype: ``dict``\n \"\"\"\n result = {}\n kvp_dbs = KeyValuePair.get_by_names(names=names)\n\n name_to_kvp_db_map = {}\n for kvp_db in kvp_dbs:\n name_to_kvp_db_map[kvp_db.name] = kvp_db.value\n\n for name in names:\n result[name] = name_to_kvp_db_map.get(name, default_value)\n\n return result\n\n\nclass KeyValueLookup(object):\n\n def __init__(self, prefix=None, key_prefix=None, cache=None, scope=FULL_SYSTEM_SCOPE):\n if not scope:\n scope = FULL_SYSTEM_SCOPE\n\n if scope == SYSTEM_SCOPE:\n scope = FULL_SYSTEM_SCOPE\n\n self._prefix = prefix\n self._key_prefix = key_prefix or ''\n self._value_cache = cache or {}\n self._scope = scope\n\n def __str__(self):\n return self._value_cache[self._key_prefix]\n\n def __getitem__(self, key):\n return self._get(key)\n\n def __getattr__(self, name):\n return self._get(name)\n\n def _get(self, name):\n # get the value for this key and save in value_cache\n if self._key_prefix:\n key = '%s.%s' % (self._key_prefix, name)\n else:\n key = name\n\n if self._prefix:\n kvp_key = DATASTORE_KEY_SEPARATOR.join([self._prefix, key])\n else:\n kvp_key = key\n\n value = self._get_kv(kvp_key)\n self._value_cache[key] = value\n # return a KeyValueLookup as response since the lookup may not be complete e.g. if\n # the lookup is for 'key_base.key_value' it is likely that the calling code, e.g. Jinja,\n # will expect to do a dictionary style lookup for key_base and key_value as subsequent\n # calls. Saving the value in cache avoids extra DB calls.\n return KeyValueLookup(prefix=self._prefix, key_prefix=key, cache=self._value_cache,\n scope=self._scope)\n\n def _get_kv(self, key):\n scope = self._scope\n LOG.debug('Lookup system kv: scope: %s and key: %s', scope, key)\n kvp = KeyValuePair.get_by_scope_and_name(scope=scope, name=key)\n if kvp:\n LOG.debug('Got value %s from datastore.', kvp.value)\n return kvp.value if kvp else ''\n\n\nclass UserKeyValueLookup(object):\n\n def __init__(self, user, prefix=None, key_prefix=None, cache=None, scope=FULL_USER_SCOPE):\n if not scope:\n scope = FULL_USER_SCOPE\n\n if scope == USER_SCOPE:\n scope = FULL_USER_SCOPE\n\n self._prefix = prefix\n self._key_prefix = key_prefix or ''\n self._value_cache = cache or {}\n self._user = user\n self._scope = scope\n\n def __str__(self):\n return self._value_cache[self._key_prefix]\n\n def __getitem__(self, key):\n return self._get(key)\n\n def __getattr__(self, name):\n return self._get(name)\n\n def _get(self, name):\n # get the value for this key and save in value_cache\n if self._key_prefix:\n key = '%s.%s' % (self._key_prefix, name)\n else:\n key = UserKeyReference(name=name, user=self._user).ref\n\n if self._prefix:\n kvp_key = DATASTORE_KEY_SEPARATOR.join([self._prefix, key])\n else:\n kvp_key = key\n\n value = self._get_kv(kvp_key)\n self._value_cache[key] = value\n # return a KeyValueLookup as response since the lookup may not be complete e.g. if\n # the lookup is for 'key_base.key_value' it is likely that the calling code, e.g. Jinja,\n # will expect to do a dictionary style lookup for key_base and key_value as subsequent\n # calls. Saving the value in cache avoids extra DB calls.\n return UserKeyValueLookup(prefix=self._prefix, user=self._user, key_prefix=key,\n cache=self._value_cache, scope=self._scope)\n\n def _get_kv(self, key):\n scope = self._scope\n kvp = KeyValuePair.get_by_scope_and_name(scope=scope, name=key)\n return kvp.value if kvp else ''\n\n\ndef get_key_reference(scope, name, user=None):\n \"\"\"\n Given a key name and user this method returns a new name (string ref)\n to address the key value pair in the context of that user.\n\n :param user: User to whom key belongs.\n :type name: ``str``\n\n :param name: Original name of the key.\n :type name: ``str``\n\n :rtype: ``str``\n \"\"\"\n if (scope == SYSTEM_SCOPE or scope == FULL_SYSTEM_SCOPE):\n return name\n elif (scope == USER_SCOPE or scope == FULL_USER_SCOPE):\n if not user:\n raise InvalidUserException('A valid user must be specified for user key ref.')\n return UserKeyReference(name=name, user=user).ref\n else:\n raise InvalidScopeException('Scope \"%s\" is not valid. Allowed scopes are %s.' %\n (scope, ALLOWED_SCOPES))\n", "path": "st2common/st2common/services/keyvalues.py"}], "after_files": [{"content": "# Licensed to the StackStorm, Inc ('StackStorm') under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom st2common import log as logging\n\nfrom st2common.constants.keyvalue import SYSTEM_SCOPE, FULL_SYSTEM_SCOPE\nfrom st2common.constants.keyvalue import USER_SCOPE, FULL_USER_SCOPE\nfrom st2common.constants.keyvalue import ALLOWED_SCOPES\nfrom st2common.constants.keyvalue import DATASTORE_KEY_SEPARATOR\nfrom st2common.exceptions.keyvalue import InvalidScopeException, InvalidUserException\nfrom st2common.models.system.keyvalue import UserKeyReference\nfrom st2common.persistence.keyvalue import KeyValuePair\n\n__all__ = [\n 'get_kvp_for_name',\n 'get_values_for_names',\n\n 'KeyValueLookup',\n 'UserKeyValueLookup'\n]\n\nLOG = logging.getLogger(__name__)\n\n\ndef get_kvp_for_name(name):\n try:\n kvp_db = KeyValuePair.get_by_name(name)\n except ValueError:\n kvp_db = None\n\n return kvp_db\n\n\ndef get_values_for_names(names, default_value=None):\n \"\"\"\n Retrieve values for the provided key names (multi get).\n\n If a KeyValuePair objects for a particular name doesn't exist, the dictionary will contain\n default_value for that name.\n\n :rtype: ``dict``\n \"\"\"\n result = {}\n kvp_dbs = KeyValuePair.get_by_names(names=names)\n\n name_to_kvp_db_map = {}\n for kvp_db in kvp_dbs:\n name_to_kvp_db_map[kvp_db.name] = kvp_db.value\n\n for name in names:\n result[name] = name_to_kvp_db_map.get(name, default_value)\n\n return result\n\n\nclass KeyValueLookup(object):\n\n def __init__(self, prefix=None, key_prefix=None, cache=None, scope=FULL_SYSTEM_SCOPE):\n if not scope:\n scope = FULL_SYSTEM_SCOPE\n\n if scope == SYSTEM_SCOPE:\n scope = FULL_SYSTEM_SCOPE\n\n self._prefix = prefix\n self._key_prefix = key_prefix or ''\n self._value_cache = cache or {}\n self._scope = scope\n\n def __str__(self):\n return self._value_cache[self._key_prefix]\n\n def __int__(self):\n return int(float(self))\n\n def __float__(self):\n return float(str(self))\n\n def __getitem__(self, key):\n return self._get(key)\n\n def __getattr__(self, name):\n return self._get(name)\n\n def _get(self, name):\n # get the value for this key and save in value_cache\n if self._key_prefix:\n key = '%s.%s' % (self._key_prefix, name)\n else:\n key = name\n\n if self._prefix:\n kvp_key = DATASTORE_KEY_SEPARATOR.join([self._prefix, key])\n else:\n kvp_key = key\n\n value = self._get_kv(kvp_key)\n self._value_cache[key] = value\n # return a KeyValueLookup as response since the lookup may not be complete e.g. if\n # the lookup is for 'key_base.key_value' it is likely that the calling code, e.g. Jinja,\n # will expect to do a dictionary style lookup for key_base and key_value as subsequent\n # calls. Saving the value in cache avoids extra DB calls.\n return KeyValueLookup(prefix=self._prefix, key_prefix=key, cache=self._value_cache,\n scope=self._scope)\n\n def _get_kv(self, key):\n scope = self._scope\n LOG.debug('Lookup system kv: scope: %s and key: %s', scope, key)\n kvp = KeyValuePair.get_by_scope_and_name(scope=scope, name=key)\n if kvp:\n LOG.debug('Got value %s from datastore.', kvp.value)\n return kvp.value if kvp else ''\n\n\nclass UserKeyValueLookup(object):\n\n def __init__(self, user, prefix=None, key_prefix=None, cache=None, scope=FULL_USER_SCOPE):\n if not scope:\n scope = FULL_USER_SCOPE\n\n if scope == USER_SCOPE:\n scope = FULL_USER_SCOPE\n\n self._prefix = prefix\n self._key_prefix = key_prefix or ''\n self._value_cache = cache or {}\n self._user = user\n self._scope = scope\n\n def __str__(self):\n return self._value_cache[self._key_prefix]\n\n def __getitem__(self, key):\n return self._get(key)\n\n def __getattr__(self, name):\n return self._get(name)\n\n def _get(self, name):\n # get the value for this key and save in value_cache\n if self._key_prefix:\n key = '%s.%s' % (self._key_prefix, name)\n else:\n key = UserKeyReference(name=name, user=self._user).ref\n\n if self._prefix:\n kvp_key = DATASTORE_KEY_SEPARATOR.join([self._prefix, key])\n else:\n kvp_key = key\n\n value = self._get_kv(kvp_key)\n self._value_cache[key] = value\n # return a KeyValueLookup as response since the lookup may not be complete e.g. if\n # the lookup is for 'key_base.key_value' it is likely that the calling code, e.g. Jinja,\n # will expect to do a dictionary style lookup for key_base and key_value as subsequent\n # calls. Saving the value in cache avoids extra DB calls.\n return UserKeyValueLookup(prefix=self._prefix, user=self._user, key_prefix=key,\n cache=self._value_cache, scope=self._scope)\n\n def _get_kv(self, key):\n scope = self._scope\n kvp = KeyValuePair.get_by_scope_and_name(scope=scope, name=key)\n return kvp.value if kvp else ''\n\n\ndef get_key_reference(scope, name, user=None):\n \"\"\"\n Given a key name and user this method returns a new name (string ref)\n to address the key value pair in the context of that user.\n\n :param user: User to whom key belongs.\n :type name: ``str``\n\n :param name: Original name of the key.\n :type name: ``str``\n\n :rtype: ``str``\n \"\"\"\n if (scope == SYSTEM_SCOPE or scope == FULL_SYSTEM_SCOPE):\n return name\n elif (scope == USER_SCOPE or scope == FULL_USER_SCOPE):\n if not user:\n raise InvalidUserException('A valid user must be specified for user key ref.')\n return UserKeyReference(name=name, user=user).ref\n else:\n raise InvalidScopeException('Scope \"%s\" is not valid. Allowed scopes are %s.' %\n (scope, ALLOWED_SCOPES))\n", "path": "st2common/st2common/services/keyvalues.py"}]}
2,363
132
gh_patches_debug_7723
rasdani/github-patches
git_diff
numpy__numpy-5879
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- _broadcast_shape is broken when 32 < nargs xref https://github.com/numpy/numpy/pull/5371 ``` python >>> np.__version__ '1.10.0.dev0+30e3d41' >>> from numpy.lib.stride_tricks import _broadcast_shape >>> lst = [np.ones((5, 7, 11))] * 32 + [np.ones((2, 3))] * 32 >>> _broadcast_shape(*lst) (2, 3) >>> _broadcast_shape(*lst[::-1]) # process the list in reverse order (5, 7, 11) ``` In [this line](https://github.com/numpy/numpy/blob/05b5335ecf25e59477956b4f85b9a8edbdf71bcc/numpy/lib/stride_tricks.py#L123) `b` is ultimately converted to an array of shape `1 x 1`; so it will broadcast with the rest of args regardless of their shape. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `numpy/lib/stride_tricks.py` Content: ``` 1 """ 2 Utilities that manipulate strides to achieve desirable effects. 3 4 An explanation of strides can be found in the "ndarray.rst" file in the 5 NumPy reference guide. 6 7 """ 8 from __future__ import division, absolute_import, print_function 9 10 import numpy as np 11 12 __all__ = ['broadcast_to', 'broadcast_arrays'] 13 14 15 class DummyArray(object): 16 """Dummy object that just exists to hang __array_interface__ dictionaries 17 and possibly keep alive a reference to a base array. 18 """ 19 20 def __init__(self, interface, base=None): 21 self.__array_interface__ = interface 22 self.base = base 23 24 25 def _maybe_view_as_subclass(original_array, new_array): 26 if type(original_array) is not type(new_array): 27 # if input was an ndarray subclass and subclasses were OK, 28 # then view the result as that subclass. 29 new_array = new_array.view(type=type(original_array)) 30 # Since we have done something akin to a view from original_array, we 31 # should let the subclass finalize (if it has it implemented, i.e., is 32 # not None). 33 if new_array.__array_finalize__: 34 new_array.__array_finalize__(original_array) 35 return new_array 36 37 38 def as_strided(x, shape=None, strides=None, subok=False): 39 """ Make an ndarray from the given array with the given shape and strides. 40 """ 41 # first convert input to array, possibly keeping subclass 42 x = np.array(x, copy=False, subok=subok) 43 interface = dict(x.__array_interface__) 44 if shape is not None: 45 interface['shape'] = tuple(shape) 46 if strides is not None: 47 interface['strides'] = tuple(strides) 48 array = np.asarray(DummyArray(interface, base=x)) 49 50 if array.dtype.fields is None and x.dtype.fields is not None: 51 # This should only happen if x.dtype is [('', 'Vx')] 52 array.dtype = x.dtype 53 54 return _maybe_view_as_subclass(x, array) 55 56 57 def _broadcast_to(array, shape, subok, readonly): 58 shape = tuple(shape) if np.iterable(shape) else (shape,) 59 array = np.array(array, copy=False, subok=subok) 60 if not shape and array.shape: 61 raise ValueError('cannot broadcast a non-scalar to a scalar array') 62 if any(size < 0 for size in shape): 63 raise ValueError('all elements of broadcast shape must be non-' 64 'negative') 65 broadcast = np.nditer( 66 (array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'], 67 op_flags=['readonly'], itershape=shape, order='C').itviews[0] 68 result = _maybe_view_as_subclass(array, broadcast) 69 if not readonly and array.flags.writeable: 70 result.flags.writeable = True 71 return result 72 73 74 def broadcast_to(array, shape, subok=False): 75 """Broadcast an array to a new shape. 76 77 Parameters 78 ---------- 79 array : array_like 80 The array to broadcast. 81 shape : tuple 82 The shape of the desired array. 83 subok : bool, optional 84 If True, then sub-classes will be passed-through, otherwise 85 the returned array will be forced to be a base-class array (default). 86 87 Returns 88 ------- 89 broadcast : array 90 A readonly view on the original array with the given shape. It is 91 typically not contiguous. Furthermore, more than one element of a 92 broadcasted array may refer to a single memory location. 93 94 Raises 95 ------ 96 ValueError 97 If the array is not compatible with the new shape according to NumPy's 98 broadcasting rules. 99 100 Notes 101 ----- 102 .. versionadded:: 1.10.0 103 104 Examples 105 -------- 106 >>> x = np.array([1, 2, 3]) 107 >>> np.broadcast_to(x, (3, 3)) 108 array([[1, 2, 3], 109 [1, 2, 3], 110 [1, 2, 3]]) 111 """ 112 return _broadcast_to(array, shape, subok=subok, readonly=True) 113 114 115 def _broadcast_shape(*args): 116 """Returns the shape of the ararys that would result from broadcasting the 117 supplied arrays against each other. 118 """ 119 if not args: 120 raise ValueError('must provide at least one argument') 121 if len(args) == 1: 122 # a single argument does not work with np.broadcast 123 return np.asarray(args[0]).shape 124 # use the old-iterator because np.nditer does not handle size 0 arrays 125 # consistently 126 b = np.broadcast(*args[:32]) 127 # unfortunately, it cannot handle 32 or more arguments directly 128 for pos in range(32, len(args), 31): 129 b = np.broadcast(b, *args[pos:(pos + 31)]) 130 return b.shape 131 132 133 def broadcast_arrays(*args, **kwargs): 134 """ 135 Broadcast any number of arrays against each other. 136 137 Parameters 138 ---------- 139 `*args` : array_likes 140 The arrays to broadcast. 141 142 subok : bool, optional 143 If True, then sub-classes will be passed-through, otherwise 144 the returned arrays will be forced to be a base-class array (default). 145 146 Returns 147 ------- 148 broadcasted : list of arrays 149 These arrays are views on the original arrays. They are typically 150 not contiguous. Furthermore, more than one element of a 151 broadcasted array may refer to a single memory location. If you 152 need to write to the arrays, make copies first. 153 154 Examples 155 -------- 156 >>> x = np.array([[1,2,3]]) 157 >>> y = np.array([[1],[2],[3]]) 158 >>> np.broadcast_arrays(x, y) 159 [array([[1, 2, 3], 160 [1, 2, 3], 161 [1, 2, 3]]), array([[1, 1, 1], 162 [2, 2, 2], 163 [3, 3, 3]])] 164 165 Here is a useful idiom for getting contiguous copies instead of 166 non-contiguous views. 167 168 >>> [np.array(a) for a in np.broadcast_arrays(x, y)] 169 [array([[1, 2, 3], 170 [1, 2, 3], 171 [1, 2, 3]]), array([[1, 1, 1], 172 [2, 2, 2], 173 [3, 3, 3]])] 174 175 """ 176 # nditer is not used here to avoid the limit of 32 arrays. 177 # Otherwise, something like the following one-liner would suffice: 178 # return np.nditer(args, flags=['multi_index', 'zerosize_ok'], 179 # order='C').itviews 180 181 subok = kwargs.pop('subok', False) 182 if kwargs: 183 raise TypeError('broadcast_arrays() got an unexpected keyword ' 184 'argument {}'.format(kwargs.pop())) 185 args = [np.array(_m, copy=False, subok=subok) for _m in args] 186 187 shape = _broadcast_shape(*args) 188 189 if all(array.shape == shape for array in args): 190 # Common case where nothing needs to be broadcasted. 191 return args 192 193 # TODO: consider making the results of broadcast_arrays readonly to match 194 # broadcast_to. This will require a deprecation cycle. 195 return [_broadcast_to(array, shape, subok=subok, readonly=False) 196 for array in args] 197 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/numpy/lib/stride_tricks.py b/numpy/lib/stride_tricks.py --- a/numpy/lib/stride_tricks.py +++ b/numpy/lib/stride_tricks.py @@ -126,6 +126,10 @@ b = np.broadcast(*args[:32]) # unfortunately, it cannot handle 32 or more arguments directly for pos in range(32, len(args), 31): + # ironically, np.broadcast does not properly handle np.broadcast + # objects (it treats them as scalars) + # use broadcasting to avoid allocating the full array + b = broadcast_to(0, b.shape) b = np.broadcast(b, *args[pos:(pos + 31)]) return b.shape
{"golden_diff": "diff --git a/numpy/lib/stride_tricks.py b/numpy/lib/stride_tricks.py\n--- a/numpy/lib/stride_tricks.py\n+++ b/numpy/lib/stride_tricks.py\n@@ -126,6 +126,10 @@\n b = np.broadcast(*args[:32])\n # unfortunately, it cannot handle 32 or more arguments directly\n for pos in range(32, len(args), 31):\n+ # ironically, np.broadcast does not properly handle np.broadcast\n+ # objects (it treats them as scalars)\n+ # use broadcasting to avoid allocating the full array\n+ b = broadcast_to(0, b.shape)\n b = np.broadcast(b, *args[pos:(pos + 31)])\n return b.shape\n", "issue": "_broadcast_shape is broken when 32 < nargs\nxref https://github.com/numpy/numpy/pull/5371\n\n``` python\n>>> np.__version__\n'1.10.0.dev0+30e3d41'\n>>> from numpy.lib.stride_tricks import _broadcast_shape\n>>> lst = [np.ones((5, 7, 11))] * 32 + [np.ones((2, 3))] * 32\n>>> _broadcast_shape(*lst)\n(2, 3)\n>>> _broadcast_shape(*lst[::-1]) # process the list in reverse order\n(5, 7, 11)\n```\n\nIn [this line](https://github.com/numpy/numpy/blob/05b5335ecf25e59477956b4f85b9a8edbdf71bcc/numpy/lib/stride_tricks.py#L123) `b` is ultimately converted to an array of shape `1 x 1`; so it will broadcast with the rest of args regardless of their shape.\n\n", "before_files": [{"content": "\"\"\"\nUtilities that manipulate strides to achieve desirable effects.\n\nAn explanation of strides can be found in the \"ndarray.rst\" file in the\nNumPy reference guide.\n\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nimport numpy as np\n\n__all__ = ['broadcast_to', 'broadcast_arrays']\n\n\nclass DummyArray(object):\n \"\"\"Dummy object that just exists to hang __array_interface__ dictionaries\n and possibly keep alive a reference to a base array.\n \"\"\"\n\n def __init__(self, interface, base=None):\n self.__array_interface__ = interface\n self.base = base\n\n\ndef _maybe_view_as_subclass(original_array, new_array):\n if type(original_array) is not type(new_array):\n # if input was an ndarray subclass and subclasses were OK,\n # then view the result as that subclass.\n new_array = new_array.view(type=type(original_array))\n # Since we have done something akin to a view from original_array, we\n # should let the subclass finalize (if it has it implemented, i.e., is\n # not None).\n if new_array.__array_finalize__:\n new_array.__array_finalize__(original_array)\n return new_array\n\n\ndef as_strided(x, shape=None, strides=None, subok=False):\n \"\"\" Make an ndarray from the given array with the given shape and strides.\n \"\"\"\n # first convert input to array, possibly keeping subclass\n x = np.array(x, copy=False, subok=subok)\n interface = dict(x.__array_interface__)\n if shape is not None:\n interface['shape'] = tuple(shape)\n if strides is not None:\n interface['strides'] = tuple(strides)\n array = np.asarray(DummyArray(interface, base=x))\n\n if array.dtype.fields is None and x.dtype.fields is not None:\n # This should only happen if x.dtype is [('', 'Vx')]\n array.dtype = x.dtype\n\n return _maybe_view_as_subclass(x, array)\n\n\ndef _broadcast_to(array, shape, subok, readonly):\n shape = tuple(shape) if np.iterable(shape) else (shape,)\n array = np.array(array, copy=False, subok=subok)\n if not shape and array.shape:\n raise ValueError('cannot broadcast a non-scalar to a scalar array')\n if any(size < 0 for size in shape):\n raise ValueError('all elements of broadcast shape must be non-'\n 'negative')\n broadcast = np.nditer(\n (array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'],\n op_flags=['readonly'], itershape=shape, order='C').itviews[0]\n result = _maybe_view_as_subclass(array, broadcast)\n if not readonly and array.flags.writeable:\n result.flags.writeable = True\n return result\n\n\ndef broadcast_to(array, shape, subok=False):\n \"\"\"Broadcast an array to a new shape.\n\n Parameters\n ----------\n array : array_like\n The array to broadcast.\n shape : tuple\n The shape of the desired array.\n subok : bool, optional\n If True, then sub-classes will be passed-through, otherwise\n the returned array will be forced to be a base-class array (default).\n\n Returns\n -------\n broadcast : array\n A readonly view on the original array with the given shape. It is\n typically not contiguous. Furthermore, more than one element of a\n broadcasted array may refer to a single memory location.\n\n Raises\n ------\n ValueError\n If the array is not compatible with the new shape according to NumPy's\n broadcasting rules.\n\n Notes\n -----\n .. versionadded:: 1.10.0\n\n Examples\n --------\n >>> x = np.array([1, 2, 3])\n >>> np.broadcast_to(x, (3, 3))\n array([[1, 2, 3],\n [1, 2, 3],\n [1, 2, 3]])\n \"\"\"\n return _broadcast_to(array, shape, subok=subok, readonly=True)\n\n\ndef _broadcast_shape(*args):\n \"\"\"Returns the shape of the ararys that would result from broadcasting the\n supplied arrays against each other.\n \"\"\"\n if not args:\n raise ValueError('must provide at least one argument')\n if len(args) == 1:\n # a single argument does not work with np.broadcast\n return np.asarray(args[0]).shape\n # use the old-iterator because np.nditer does not handle size 0 arrays\n # consistently\n b = np.broadcast(*args[:32])\n # unfortunately, it cannot handle 32 or more arguments directly\n for pos in range(32, len(args), 31):\n b = np.broadcast(b, *args[pos:(pos + 31)])\n return b.shape\n\n\ndef broadcast_arrays(*args, **kwargs):\n \"\"\"\n Broadcast any number of arrays against each other.\n\n Parameters\n ----------\n `*args` : array_likes\n The arrays to broadcast.\n\n subok : bool, optional\n If True, then sub-classes will be passed-through, otherwise\n the returned arrays will be forced to be a base-class array (default).\n\n Returns\n -------\n broadcasted : list of arrays\n These arrays are views on the original arrays. They are typically\n not contiguous. Furthermore, more than one element of a\n broadcasted array may refer to a single memory location. If you\n need to write to the arrays, make copies first.\n\n Examples\n --------\n >>> x = np.array([[1,2,3]])\n >>> y = np.array([[1],[2],[3]])\n >>> np.broadcast_arrays(x, y)\n [array([[1, 2, 3],\n [1, 2, 3],\n [1, 2, 3]]), array([[1, 1, 1],\n [2, 2, 2],\n [3, 3, 3]])]\n\n Here is a useful idiom for getting contiguous copies instead of\n non-contiguous views.\n\n >>> [np.array(a) for a in np.broadcast_arrays(x, y)]\n [array([[1, 2, 3],\n [1, 2, 3],\n [1, 2, 3]]), array([[1, 1, 1],\n [2, 2, 2],\n [3, 3, 3]])]\n\n \"\"\"\n # nditer is not used here to avoid the limit of 32 arrays.\n # Otherwise, something like the following one-liner would suffice:\n # return np.nditer(args, flags=['multi_index', 'zerosize_ok'],\n # order='C').itviews\n\n subok = kwargs.pop('subok', False)\n if kwargs:\n raise TypeError('broadcast_arrays() got an unexpected keyword '\n 'argument {}'.format(kwargs.pop()))\n args = [np.array(_m, copy=False, subok=subok) for _m in args]\n\n shape = _broadcast_shape(*args)\n\n if all(array.shape == shape for array in args):\n # Common case where nothing needs to be broadcasted.\n return args\n\n # TODO: consider making the results of broadcast_arrays readonly to match\n # broadcast_to. This will require a deprecation cycle.\n return [_broadcast_to(array, shape, subok=subok, readonly=False)\n for array in args]\n", "path": "numpy/lib/stride_tricks.py"}], "after_files": [{"content": "\"\"\"\nUtilities that manipulate strides to achieve desirable effects.\n\nAn explanation of strides can be found in the \"ndarray.rst\" file in the\nNumPy reference guide.\n\n\"\"\"\nfrom __future__ import division, absolute_import, print_function\n\nimport numpy as np\n\n__all__ = ['broadcast_to', 'broadcast_arrays']\n\n\nclass DummyArray(object):\n \"\"\"Dummy object that just exists to hang __array_interface__ dictionaries\n and possibly keep alive a reference to a base array.\n \"\"\"\n\n def __init__(self, interface, base=None):\n self.__array_interface__ = interface\n self.base = base\n\n\ndef _maybe_view_as_subclass(original_array, new_array):\n if type(original_array) is not type(new_array):\n # if input was an ndarray subclass and subclasses were OK,\n # then view the result as that subclass.\n new_array = new_array.view(type=type(original_array))\n # Since we have done something akin to a view from original_array, we\n # should let the subclass finalize (if it has it implemented, i.e., is\n # not None).\n if new_array.__array_finalize__:\n new_array.__array_finalize__(original_array)\n return new_array\n\n\ndef as_strided(x, shape=None, strides=None, subok=False):\n \"\"\" Make an ndarray from the given array with the given shape and strides.\n \"\"\"\n # first convert input to array, possibly keeping subclass\n x = np.array(x, copy=False, subok=subok)\n interface = dict(x.__array_interface__)\n if shape is not None:\n interface['shape'] = tuple(shape)\n if strides is not None:\n interface['strides'] = tuple(strides)\n array = np.asarray(DummyArray(interface, base=x))\n\n if array.dtype.fields is None and x.dtype.fields is not None:\n # This should only happen if x.dtype is [('', 'Vx')]\n array.dtype = x.dtype\n\n return _maybe_view_as_subclass(x, array)\n\n\ndef _broadcast_to(array, shape, subok, readonly):\n shape = tuple(shape) if np.iterable(shape) else (shape,)\n array = np.array(array, copy=False, subok=subok)\n if not shape and array.shape:\n raise ValueError('cannot broadcast a non-scalar to a scalar array')\n if any(size < 0 for size in shape):\n raise ValueError('all elements of broadcast shape must be non-'\n 'negative')\n broadcast = np.nditer(\n (array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'],\n op_flags=['readonly'], itershape=shape, order='C').itviews[0]\n result = _maybe_view_as_subclass(array, broadcast)\n if not readonly and array.flags.writeable:\n result.flags.writeable = True\n return result\n\n\ndef broadcast_to(array, shape, subok=False):\n \"\"\"Broadcast an array to a new shape.\n\n Parameters\n ----------\n array : array_like\n The array to broadcast.\n shape : tuple\n The shape of the desired array.\n subok : bool, optional\n If True, then sub-classes will be passed-through, otherwise\n the returned array will be forced to be a base-class array (default).\n\n Returns\n -------\n broadcast : array\n A readonly view on the original array with the given shape. It is\n typically not contiguous. Furthermore, more than one element of a\n broadcasted array may refer to a single memory location.\n\n Raises\n ------\n ValueError\n If the array is not compatible with the new shape according to NumPy's\n broadcasting rules.\n\n Notes\n -----\n .. versionadded:: 1.10.0\n\n Examples\n --------\n >>> x = np.array([1, 2, 3])\n >>> np.broadcast_to(x, (3, 3))\n array([[1, 2, 3],\n [1, 2, 3],\n [1, 2, 3]])\n \"\"\"\n return _broadcast_to(array, shape, subok=subok, readonly=True)\n\n\ndef _broadcast_shape(*args):\n \"\"\"Returns the shape of the ararys that would result from broadcasting the\n supplied arrays against each other.\n \"\"\"\n if not args:\n raise ValueError('must provide at least one argument')\n if len(args) == 1:\n # a single argument does not work with np.broadcast\n return np.asarray(args[0]).shape\n # use the old-iterator because np.nditer does not handle size 0 arrays\n # consistently\n b = np.broadcast(*args[:32])\n # unfortunately, it cannot handle 32 or more arguments directly\n for pos in range(32, len(args), 31):\n # ironically, np.broadcast does not properly handle np.broadcast\n # objects (it treats them as scalars)\n # use broadcasting to avoid allocating the full array\n b = broadcast_to(0, b.shape)\n b = np.broadcast(b, *args[pos:(pos + 31)])\n return b.shape\n\n\ndef broadcast_arrays(*args, **kwargs):\n \"\"\"\n Broadcast any number of arrays against each other.\n\n Parameters\n ----------\n `*args` : array_likes\n The arrays to broadcast.\n\n subok : bool, optional\n If True, then sub-classes will be passed-through, otherwise\n the returned arrays will be forced to be a base-class array (default).\n\n Returns\n -------\n broadcasted : list of arrays\n These arrays are views on the original arrays. They are typically\n not contiguous. Furthermore, more than one element of a\n broadcasted array may refer to a single memory location. If you\n need to write to the arrays, make copies first.\n\n Examples\n --------\n >>> x = np.array([[1,2,3]])\n >>> y = np.array([[1],[2],[3]])\n >>> np.broadcast_arrays(x, y)\n [array([[1, 2, 3],\n [1, 2, 3],\n [1, 2, 3]]), array([[1, 1, 1],\n [2, 2, 2],\n [3, 3, 3]])]\n\n Here is a useful idiom for getting contiguous copies instead of\n non-contiguous views.\n\n >>> [np.array(a) for a in np.broadcast_arrays(x, y)]\n [array([[1, 2, 3],\n [1, 2, 3],\n [1, 2, 3]]), array([[1, 1, 1],\n [2, 2, 2],\n [3, 3, 3]])]\n\n \"\"\"\n # nditer is not used here to avoid the limit of 32 arrays.\n # Otherwise, something like the following one-liner would suffice:\n # return np.nditer(args, flags=['multi_index', 'zerosize_ok'],\n # order='C').itviews\n\n subok = kwargs.pop('subok', False)\n if kwargs:\n raise TypeError('broadcast_arrays() got an unexpected keyword '\n 'argument {}'.format(kwargs.pop()))\n args = [np.array(_m, copy=False, subok=subok) for _m in args]\n\n shape = _broadcast_shape(*args)\n\n if all(array.shape == shape for array in args):\n # Common case where nothing needs to be broadcasted.\n return args\n\n # TODO: consider making the results of broadcast_arrays readonly to match\n # broadcast_to. This will require a deprecation cycle.\n return [_broadcast_to(array, shape, subok=subok, readonly=False)\n for array in args]\n", "path": "numpy/lib/stride_tricks.py"}]}
2,621
173
gh_patches_debug_11955
rasdani/github-patches
git_diff
MongoEngine__mongoengine-1430
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Drop Python 2.6 support For the most relevant discussion about the topic, see #1294. Plan: 1. In the upcoming minor release, I'm going to include `warnings.warn(msg, DeprecationWarning)`. with the message saying "Python v2.6 support is deprecated and is going to be dropped entirely in the upcoming v0.11.0 release. Update your Python version if you want to have access to the latest features and bug fixes in MongoEngine." 2. In v0.11.0 (most likely shipped with #1428), I'll update the way we do dict comprehensions and other relics of the past, thus making it truly incompatible with v2.6. Cc @lafrech @gukoff --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mongoengine/python_support.py` Content: ``` 1 """Helper functions and types to aid with Python 2.5 - 3 support.""" 2 3 import sys 4 import pymongo 5 6 7 if pymongo.version_tuple[0] < 3: 8 IS_PYMONGO_3 = False 9 else: 10 IS_PYMONGO_3 = True 11 12 PY3 = sys.version_info[0] == 3 13 14 if PY3: 15 import codecs 16 from io import BytesIO as StringIO 17 18 # return s converted to binary. b('test') should be equivalent to b'test' 19 def b(s): 20 return codecs.latin_1_encode(s)[0] 21 22 bin_type = bytes 23 txt_type = str 24 else: 25 try: 26 from cStringIO import StringIO 27 except ImportError: 28 from StringIO import StringIO 29 30 # Conversion to binary only necessary in Python 3 31 def b(s): 32 return s 33 34 bin_type = str 35 txt_type = unicode 36 37 str_types = (bin_type, txt_type) 38 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mongoengine/python_support.py b/mongoengine/python_support.py --- a/mongoengine/python_support.py +++ b/mongoengine/python_support.py @@ -1,9 +1,22 @@ -"""Helper functions and types to aid with Python 2.5 - 3 support.""" +"""Helper functions and types to aid with Python 2.6 - 3 support.""" import sys +import warnings + import pymongo +# Show a deprecation warning for people using Python v2.6 +# TODO remove in mongoengine v0.11.0 +if sys.version_info[0] == 2 and sys.version_info[1] == 6: + warnings.warn( + 'Python v2.6 support is deprecated and is going to be dropped ' + 'entirely in the upcoming v0.11.0 release. Update your Python ' + 'version if you want to have access to the latest features and ' + 'bug fixes in MongoEngine.', + DeprecationWarning + ) + if pymongo.version_tuple[0] < 3: IS_PYMONGO_3 = False else:
{"golden_diff": "diff --git a/mongoengine/python_support.py b/mongoengine/python_support.py\n--- a/mongoengine/python_support.py\n+++ b/mongoengine/python_support.py\n@@ -1,9 +1,22 @@\n-\"\"\"Helper functions and types to aid with Python 2.5 - 3 support.\"\"\"\n+\"\"\"Helper functions and types to aid with Python 2.6 - 3 support.\"\"\"\n \n import sys\n+import warnings\n+\n import pymongo\n \n \n+# Show a deprecation warning for people using Python v2.6\n+# TODO remove in mongoengine v0.11.0\n+if sys.version_info[0] == 2 and sys.version_info[1] == 6:\n+ warnings.warn(\n+ 'Python v2.6 support is deprecated and is going to be dropped '\n+ 'entirely in the upcoming v0.11.0 release. Update your Python '\n+ 'version if you want to have access to the latest features and '\n+ 'bug fixes in MongoEngine.',\n+ DeprecationWarning\n+ )\n+\n if pymongo.version_tuple[0] < 3:\n IS_PYMONGO_3 = False\n else:\n", "issue": "Drop Python 2.6 support\nFor the most relevant discussion about the topic, see #1294.\r\n\r\nPlan:\r\n1. In the upcoming minor release, I'm going to include `warnings.warn(msg, DeprecationWarning)`. with the message saying \"Python v2.6 support is deprecated and is going to be dropped entirely in the upcoming v0.11.0 release. Update your Python version if you want to have access to the latest features and bug fixes in MongoEngine.\"\r\n2. In v0.11.0 (most likely shipped with #1428), I'll update the way we do dict comprehensions and other relics of the past, thus making it truly incompatible with v2.6.\r\n\r\nCc @lafrech @gukoff \n", "before_files": [{"content": "\"\"\"Helper functions and types to aid with Python 2.5 - 3 support.\"\"\"\n\nimport sys\nimport pymongo\n\n\nif pymongo.version_tuple[0] < 3:\n IS_PYMONGO_3 = False\nelse:\n IS_PYMONGO_3 = True\n\nPY3 = sys.version_info[0] == 3\n\nif PY3:\n import codecs\n from io import BytesIO as StringIO\n\n # return s converted to binary. b('test') should be equivalent to b'test'\n def b(s):\n return codecs.latin_1_encode(s)[0]\n\n bin_type = bytes\n txt_type = str\nelse:\n try:\n from cStringIO import StringIO\n except ImportError:\n from StringIO import StringIO\n\n # Conversion to binary only necessary in Python 3\n def b(s):\n return s\n\n bin_type = str\n txt_type = unicode\n\nstr_types = (bin_type, txt_type)\n", "path": "mongoengine/python_support.py"}], "after_files": [{"content": "\"\"\"Helper functions and types to aid with Python 2.6 - 3 support.\"\"\"\n\nimport sys\nimport warnings\n\nimport pymongo\n\n\n# Show a deprecation warning for people using Python v2.6\n# TODO remove in mongoengine v0.11.0\nif sys.version_info[0] == 2 and sys.version_info[1] == 6:\n warnings.warn(\n 'Python v2.6 support is deprecated and is going to be dropped '\n 'entirely in the upcoming v0.11.0 release. Update your Python '\n 'version if you want to have access to the latest features and '\n 'bug fixes in MongoEngine.',\n DeprecationWarning\n )\n\nif pymongo.version_tuple[0] < 3:\n IS_PYMONGO_3 = False\nelse:\n IS_PYMONGO_3 = True\n\nPY3 = sys.version_info[0] == 3\n\nif PY3:\n import codecs\n from io import BytesIO as StringIO\n\n # return s converted to binary. b('test') should be equivalent to b'test'\n def b(s):\n return codecs.latin_1_encode(s)[0]\n\n bin_type = bytes\n txt_type = str\nelse:\n try:\n from cStringIO import StringIO\n except ImportError:\n from StringIO import StringIO\n\n # Conversion to binary only necessary in Python 3\n def b(s):\n return s\n\n bin_type = str\n txt_type = unicode\n\nstr_types = (bin_type, txt_type)\n", "path": "mongoengine/python_support.py"}]}
702
253
gh_patches_debug_11989
rasdani/github-patches
git_diff
sagemath__sage-36173
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Unoptimal memory complexity of `sage.matrix.berlekamp` The code here is unoptimal: https://github.com/sagemath/sage/blob/6695becb762aebab78ef47d0fb12eae52be5d79d/src/sage/matrix/berlekamp_massey.py#L90-L98 For example, the following code uses a lot of memory: ```python sage: from sage.matrix.berlekamp_massey import berlekamp_massey sage: p = next_prime(2**64) sage: ls = [GF(p).random_element() for _ in range(20000)] sage: berlekamp_massey(ls); ``` To be more specific, the dictionaries are not necessarily and only `f[j - 2]` and `f[j - 1]` are used every time, same for `s`. So they can be stored as temporary variables. ### Additional Information I am fixing it. ### Checklist - [X] I have searched the existing issues for a bug report that matches the one I want to file, without success. - [X] I have read the documentation and troubleshoot guide --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/sage/matrix/berlekamp_massey.py` Content: ``` 1 """ 2 Minimal Polynomials of Linear Recurrence Sequences 3 4 AUTHORS: 5 6 - William Stein 7 """ 8 # **************************************************************************** 9 # Copyright (C) 2005 William Stein <[email protected]> 10 # 11 # Distributed under the terms of the GNU General Public License (GPL) 12 # 13 # This code is distributed in the hope that it will be useful, 14 # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 # General Public License for more details. 17 # 18 # The full text of the GPL is available at: 19 # 20 # https://www.gnu.org/licenses/ 21 # **************************************************************************** 22 23 import sage.rings.rational_field 24 25 26 def berlekamp_massey(a): 27 r""" 28 Use the Berlekamp-Massey algorithm to find the minimal polynomial 29 of a linear recurrence sequence `a`. 30 31 The minimal polynomial of a linear recurrence `\{a_r\}` is 32 by definition the unique monic polynomial `g`, such that if 33 `\{a_r\}` satisfies a linear recurrence 34 `a_{j+k} + b_{j-1} a_{j-1+k} + \cdots + b_0 a_k=0` 35 (for all `k\geq 0`), then `g` divides the 36 polynomial `x^j + \sum_{i=0}^{j-1} b_i x^i`. 37 38 INPUT: 39 40 - ``a`` -- a list of even length of elements of a field (or domain) 41 42 OUTPUT: 43 44 the minimal polynomial of the sequence, as a polynomial over the 45 field in which the entries of `a` live 46 47 .. WARNING:: 48 49 The result is only guaranteed to be correct on the full 50 sequence if there exists a linear recurrence of length less 51 than half the length of `a`. 52 53 EXAMPLES:: 54 55 sage: from sage.matrix.berlekamp_massey import berlekamp_massey 56 sage: berlekamp_massey([1,2,1,2,1,2]) 57 x^2 - 1 58 sage: berlekamp_massey([GF(7)(1), 19, 1, 19]) 59 x^2 + 6 60 sage: berlekamp_massey([2,2,1,2,1,191,393,132]) 61 x^4 - 36727/11711*x^3 + 34213/5019*x^2 + 7024942/35133*x - 335813/1673 62 sage: berlekamp_massey(prime_range(2, 38)) # needs sage.libs.pari 63 x^6 - 14/9*x^5 - 7/9*x^4 + 157/54*x^3 - 25/27*x^2 - 73/18*x + 37/9 64 65 TESTS:: 66 67 sage: berlekamp_massey("banana") 68 Traceback (most recent call last): 69 ... 70 TypeError: argument must be a list or tuple 71 sage: berlekamp_massey([1,2,5]) 72 Traceback (most recent call last): 73 ... 74 ValueError: argument must have an even number of terms 75 """ 76 if not isinstance(a, (list, tuple)): 77 raise TypeError("argument must be a list or tuple") 78 if len(a) % 2: 79 raise ValueError("argument must have an even number of terms") 80 81 M = len(a) // 2 82 83 try: 84 K = a[0].parent().fraction_field() 85 except AttributeError: 86 K = sage.rings.rational_field.RationalField() 87 R = K['x'] 88 x = R.gen() 89 90 f = {-1: R(a), 0: x**(2 * M)} 91 s = {-1: 1, 0: 0} 92 j = 0 93 while f[j].degree() >= M: 94 j += 1 95 qj, f[j] = f[j - 2].quo_rem(f[j - 1]) 96 s[j] = s[j - 2] - qj * s[j - 1] 97 t = s[j].reverse() 98 return ~(t[t.degree()]) * t # make monic (~ is inverse in python) 99 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/sage/matrix/berlekamp_massey.py b/src/sage/matrix/berlekamp_massey.py --- a/src/sage/matrix/berlekamp_massey.py +++ b/src/sage/matrix/berlekamp_massey.py @@ -84,15 +84,11 @@ K = a[0].parent().fraction_field() except AttributeError: K = sage.rings.rational_field.RationalField() - R = K['x'] - x = R.gen() - - f = {-1: R(a), 0: x**(2 * M)} - s = {-1: 1, 0: 0} - j = 0 - while f[j].degree() >= M: - j += 1 - qj, f[j] = f[j - 2].quo_rem(f[j - 1]) - s[j] = s[j - 2] - qj * s[j - 1] - t = s[j].reverse() - return ~(t[t.degree()]) * t # make monic (~ is inverse in python) + + R, x = K['x'].objgen() + f0, f1 = R(a), x**(2 * M) + s0, s1 = 1, 0 + while f1.degree() >= M: + f0, (q, f1) = f1, f0.quo_rem(f1) + s0, s1 = s1, s0 - q * s1 + return s1.reverse().monic()
{"golden_diff": "diff --git a/src/sage/matrix/berlekamp_massey.py b/src/sage/matrix/berlekamp_massey.py\n--- a/src/sage/matrix/berlekamp_massey.py\n+++ b/src/sage/matrix/berlekamp_massey.py\n@@ -84,15 +84,11 @@\n K = a[0].parent().fraction_field()\n except AttributeError:\n K = sage.rings.rational_field.RationalField()\n- R = K['x']\n- x = R.gen()\n-\n- f = {-1: R(a), 0: x**(2 * M)}\n- s = {-1: 1, 0: 0}\n- j = 0\n- while f[j].degree() >= M:\n- j += 1\n- qj, f[j] = f[j - 2].quo_rem(f[j - 1])\n- s[j] = s[j - 2] - qj * s[j - 1]\n- t = s[j].reverse()\n- return ~(t[t.degree()]) * t # make monic (~ is inverse in python)\n+\n+ R, x = K['x'].objgen()\n+ f0, f1 = R(a), x**(2 * M)\n+ s0, s1 = 1, 0\n+ while f1.degree() >= M:\n+ f0, (q, f1) = f1, f0.quo_rem(f1)\n+ s0, s1 = s1, s0 - q * s1\n+ return s1.reverse().monic()\n", "issue": "Unoptimal memory complexity of `sage.matrix.berlekamp`\nThe code here is unoptimal:\r\n\r\nhttps://github.com/sagemath/sage/blob/6695becb762aebab78ef47d0fb12eae52be5d79d/src/sage/matrix/berlekamp_massey.py#L90-L98\r\n\r\nFor example, the following code uses a lot of memory:\r\n\r\n```python\r\nsage: from sage.matrix.berlekamp_massey import berlekamp_massey\r\nsage: p = next_prime(2**64)\r\nsage: ls = [GF(p).random_element() for _ in range(20000)]\r\nsage: berlekamp_massey(ls);\r\n```\r\n\r\nTo be more specific, the dictionaries are not necessarily and only `f[j - 2]` and `f[j - 1]` are used every time, same for `s`. So they can be stored as temporary variables.\r\n\r\n### Additional Information\r\n\r\nI am fixing it.\r\n\r\n### Checklist\r\n\r\n- [X] I have searched the existing issues for a bug report that matches the one I want to file, without success.\r\n- [X] I have read the documentation and troubleshoot guide\n", "before_files": [{"content": "\"\"\"\nMinimal Polynomials of Linear Recurrence Sequences\n\nAUTHORS:\n\n- William Stein\n\"\"\"\n# ****************************************************************************\n# Copyright (C) 2005 William Stein <[email protected]>\n#\n# Distributed under the terms of the GNU General Public License (GPL)\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# General Public License for more details.\n#\n# The full text of the GPL is available at:\n#\n# https://www.gnu.org/licenses/\n# ****************************************************************************\n\nimport sage.rings.rational_field\n\n\ndef berlekamp_massey(a):\n r\"\"\"\n Use the Berlekamp-Massey algorithm to find the minimal polynomial\n of a linear recurrence sequence `a`.\n\n The minimal polynomial of a linear recurrence `\\{a_r\\}` is\n by definition the unique monic polynomial `g`, such that if\n `\\{a_r\\}` satisfies a linear recurrence\n `a_{j+k} + b_{j-1} a_{j-1+k} + \\cdots + b_0 a_k=0`\n (for all `k\\geq 0`), then `g` divides the\n polynomial `x^j + \\sum_{i=0}^{j-1} b_i x^i`.\n\n INPUT:\n\n - ``a`` -- a list of even length of elements of a field (or domain)\n\n OUTPUT:\n\n the minimal polynomial of the sequence, as a polynomial over the\n field in which the entries of `a` live\n\n .. WARNING::\n\n The result is only guaranteed to be correct on the full\n sequence if there exists a linear recurrence of length less\n than half the length of `a`.\n\n EXAMPLES::\n\n sage: from sage.matrix.berlekamp_massey import berlekamp_massey\n sage: berlekamp_massey([1,2,1,2,1,2])\n x^2 - 1\n sage: berlekamp_massey([GF(7)(1), 19, 1, 19])\n x^2 + 6\n sage: berlekamp_massey([2,2,1,2,1,191,393,132])\n x^4 - 36727/11711*x^3 + 34213/5019*x^2 + 7024942/35133*x - 335813/1673\n sage: berlekamp_massey(prime_range(2, 38)) # needs sage.libs.pari\n x^6 - 14/9*x^5 - 7/9*x^4 + 157/54*x^3 - 25/27*x^2 - 73/18*x + 37/9\n\n TESTS::\n\n sage: berlekamp_massey(\"banana\")\n Traceback (most recent call last):\n ...\n TypeError: argument must be a list or tuple\n sage: berlekamp_massey([1,2,5])\n Traceback (most recent call last):\n ...\n ValueError: argument must have an even number of terms\n \"\"\"\n if not isinstance(a, (list, tuple)):\n raise TypeError(\"argument must be a list or tuple\")\n if len(a) % 2:\n raise ValueError(\"argument must have an even number of terms\")\n\n M = len(a) // 2\n\n try:\n K = a[0].parent().fraction_field()\n except AttributeError:\n K = sage.rings.rational_field.RationalField()\n R = K['x']\n x = R.gen()\n\n f = {-1: R(a), 0: x**(2 * M)}\n s = {-1: 1, 0: 0}\n j = 0\n while f[j].degree() >= M:\n j += 1\n qj, f[j] = f[j - 2].quo_rem(f[j - 1])\n s[j] = s[j - 2] - qj * s[j - 1]\n t = s[j].reverse()\n return ~(t[t.degree()]) * t # make monic (~ is inverse in python)\n", "path": "src/sage/matrix/berlekamp_massey.py"}], "after_files": [{"content": "\"\"\"\nMinimal Polynomials of Linear Recurrence Sequences\n\nAUTHORS:\n\n- William Stein\n\"\"\"\n# ****************************************************************************\n# Copyright (C) 2005 William Stein <[email protected]>\n#\n# Distributed under the terms of the GNU General Public License (GPL)\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# General Public License for more details.\n#\n# The full text of the GPL is available at:\n#\n# https://www.gnu.org/licenses/\n# ****************************************************************************\n\nimport sage.rings.rational_field\n\n\ndef berlekamp_massey(a):\n r\"\"\"\n Use the Berlekamp-Massey algorithm to find the minimal polynomial\n of a linear recurrence sequence `a`.\n\n The minimal polynomial of a linear recurrence `\\{a_r\\}` is\n by definition the unique monic polynomial `g`, such that if\n `\\{a_r\\}` satisfies a linear recurrence\n `a_{j+k} + b_{j-1} a_{j-1+k} + \\cdots + b_0 a_k=0`\n (for all `k\\geq 0`), then `g` divides the\n polynomial `x^j + \\sum_{i=0}^{j-1} b_i x^i`.\n\n INPUT:\n\n - ``a`` -- a list of even length of elements of a field (or domain)\n\n OUTPUT:\n\n the minimal polynomial of the sequence, as a polynomial over the\n field in which the entries of `a` live\n\n .. WARNING::\n\n The result is only guaranteed to be correct on the full\n sequence if there exists a linear recurrence of length less\n than half the length of `a`.\n\n EXAMPLES::\n\n sage: from sage.matrix.berlekamp_massey import berlekamp_massey\n sage: berlekamp_massey([1,2,1,2,1,2])\n x^2 - 1\n sage: berlekamp_massey([GF(7)(1),19,1,19])\n x^2 + 6\n sage: berlekamp_massey([2,2,1,2,1,191,393,132])\n x^4 - 36727/11711*x^3 + 34213/5019*x^2 + 7024942/35133*x - 335813/1673\n sage: berlekamp_massey(prime_range(2,38))\n x^6 - 14/9*x^5 - 7/9*x^4 + 157/54*x^3 - 25/27*x^2 - 73/18*x + 37/9\n\n TESTS::\n\n sage: berlekamp_massey(\"banana\")\n Traceback (most recent call last):\n ...\n TypeError: argument must be a list or tuple\n sage: berlekamp_massey([1,2,5])\n Traceback (most recent call last):\n ...\n ValueError: argument must have an even number of terms\n \"\"\"\n if not isinstance(a, (list, tuple)):\n raise TypeError(\"argument must be a list or tuple\")\n if len(a) % 2:\n raise ValueError(\"argument must have an even number of terms\")\n\n M = len(a) // 2\n\n try:\n K = a[0].parent().fraction_field()\n except AttributeError:\n K = sage.rings.rational_field.RationalField()\n\n R, x = K['x'].objgen()\n f0, f1 = R(a), x**(2 * M)\n s0, s1 = 1, 0\n while f1.degree() >= M:\n f0, (q, f1) = f1, f0.quo_rem(f1)\n s0, s1 = s1, s0 - q * s1\n return s1.reverse().monic()\n", "path": "src/sage/matrix/berlekamp_massey.py"}]}
1,705
362
gh_patches_debug_2024
rasdani/github-patches
git_diff
facebookresearch__fairseq-214
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Size Mismatch in AdaptiveSoftmax when targets are not specified Following up on #212 , I'm updating `sequence_generator.py` to generate text from a pre-trained language model (initially trained with adaptive softmax). When computing log probabilities, and the targets are set to none, I receive a size mismatch exception in the line below, possibly because the dictionary size is smaller than the adaptive softmax cut-off: https://github.com/pytorch/fairseq/blob/388c520be21752cacb9fe3b1712038f32e0e9a5f/fairseq/modules/adaptive_softmax.py#L126 I imagine this could be solved by some sort of truncation to the output of tail[i].input --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `fairseq/modules/adaptive_softmax.py` Content: ``` 1 # Copyright (c) 2017-present, Facebook, Inc. 2 # All rights reserved. 3 # 4 # This source code is licensed under the license found in the LICENSE file in 5 # the root directory of this source tree. An additional grant of patent rights 6 # can be found in the PATENTS file in the same directory. 7 8 9 import torch.nn.functional as F 10 from torch import nn 11 12 13 class AdaptiveSoftmax(nn.Module): 14 """ 15 This is an implementation of the efficient softmax approximation for 16 graphical processing units (GPU), described in the paper "Efficient softmax 17 approximation for GPUs" (http://arxiv.org/abs/1609.04309). 18 """ 19 20 def __init__(self, vocab_size, input_dim, cutoff, dropout): 21 super().__init__() 22 23 if vocab_size > cutoff[-1]: 24 cutoff = cutoff + [vocab_size] 25 26 output_dim = cutoff[0] + len(cutoff) - 1 27 28 self.vocab_size = vocab_size 29 self.cutoff = cutoff 30 self.dropout = dropout 31 32 self.lsm = nn.LogSoftmax(dim=1) 33 self.head = nn.Linear(input_dim, output_dim, bias=False) 34 self.tail = nn.ModuleList() 35 36 for i in range(len(cutoff) - 1): 37 self.tail.append( 38 nn.Sequential( 39 nn.Linear(input_dim, input_dim // 4 ** i, bias=False), 40 nn.Dropout(dropout), 41 nn.Linear(input_dim // 4 ** i, cutoff[i + 1] - cutoff[i], bias=False) 42 ) 43 ) 44 45 def init_weights(m): 46 if hasattr(m, 'weight'): 47 nn.init.xavier_uniform_(m.weight) 48 49 self.apply(init_weights) 50 51 def adapt_target(self, target): 52 """ 53 In order to be efficient, the AdaptiveSoftMax does not compute the 54 scores for all the word of the vocabulary for all the examples. It is 55 thus necessary to call the method adapt_target of the AdaptiveSoftMax 56 layer inside each forward pass. 57 """ 58 59 target = target.view(-1) 60 new_target = [target.clone()] 61 target_idxs = [] 62 63 for i in range(len(self.cutoff) - 1): 64 mask = target.ge(self.cutoff[i]).mul(target.lt(self.cutoff[i + 1])) 65 new_target[0][mask] = self.cutoff[0] + i - 1 66 67 if mask.any(): 68 target_idxs.append(mask.nonzero().squeeze(1)) 69 new_target.append(target[mask].add(-self.cutoff[i])) 70 else: 71 target_idxs.append(None) 72 new_target.append(None) 73 74 return new_target, target_idxs 75 76 def forward(self, input, target): 77 """ 78 Args: 79 input: (b x t x d) 80 target: (b x t) 81 Returns: 82 2 lists: output for each cutoff section and new targets by cut off 83 """ 84 85 input = input.contiguous().view(-1, input.size(-1)) 86 input = F.dropout(input, p=self.dropout, training=self.training) 87 88 new_target, target_idxs = self.adapt_target(target) 89 output = [self.head(input)] 90 91 for i in range(len(target_idxs)): 92 if target_idxs[i] is not None: 93 output.append(self.tail[i](input.index_select(0, target_idxs[i]))) 94 else: 95 output.append(None) 96 97 return output, new_target 98 99 def get_log_prob(self, input, target): 100 """ 101 Computes the log probabilities for all the words of the vocabulary, 102 given a 2D tensor of hidden vectors. 103 """ 104 105 bsz, length, dim = input.size() 106 input = input.contiguous().view(-1, dim) 107 108 if target is not None: 109 _, target_idxs = self.adapt_target(target) 110 else: 111 target_idxs = None 112 113 head_y = self.head(input) 114 log_probs = head_y.new_zeros(input.size(0), self.vocab_size) 115 116 head_sz = self.cutoff[0] + len(self.tail) 117 log_probs[:, :head_sz] = self.lsm(head_y) 118 tail_priors = log_probs[:, self.cutoff[0] - 1: head_sz - 1].clone() 119 120 for i in range(len(self.tail)): 121 start = self.cutoff[i] 122 end = self.cutoff[i + 1] 123 124 if target_idxs is None: 125 tail_out = log_probs[:, start:end] 126 tail_out.copy_(self.tail[i](input)) 127 log_probs[:, start:end] = self.lsm(tail_out).add_(tail_priors[:, i, None]) 128 elif target_idxs[i] is not None: 129 idxs = target_idxs[i] 130 tail_out = log_probs[idxs, start:end] 131 tail_out.copy_(self.tail[i](input[idxs])) 132 log_probs[idxs, start:end] = self.lsm(tail_out).add_(tail_priors[idxs, i, None]) 133 134 log_probs = log_probs.view(bsz, length, -1) 135 return log_probs 136 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/fairseq/modules/adaptive_softmax.py b/fairseq/modules/adaptive_softmax.py --- a/fairseq/modules/adaptive_softmax.py +++ b/fairseq/modules/adaptive_softmax.py @@ -22,6 +22,9 @@ if vocab_size > cutoff[-1]: cutoff = cutoff + [vocab_size] + else: + assert vocab_size == cutoff[ + -1], 'cannot specify cutoff smaller than vocab size' output_dim = cutoff[0] + len(cutoff) - 1
{"golden_diff": "diff --git a/fairseq/modules/adaptive_softmax.py b/fairseq/modules/adaptive_softmax.py\n--- a/fairseq/modules/adaptive_softmax.py\n+++ b/fairseq/modules/adaptive_softmax.py\n@@ -22,6 +22,9 @@\n \n if vocab_size > cutoff[-1]:\n cutoff = cutoff + [vocab_size]\n+ else:\n+ assert vocab_size == cutoff[\n+ -1], 'cannot specify cutoff smaller than vocab size'\n \n output_dim = cutoff[0] + len(cutoff) - 1\n", "issue": "Size Mismatch in AdaptiveSoftmax when targets are not specified\nFollowing up on #212 , I'm updating `sequence_generator.py` to generate text from a pre-trained language model (initially trained with adaptive softmax). When computing log probabilities, and the targets are set to none, I receive a size mismatch exception in the line below, possibly because the dictionary size is smaller than the adaptive softmax cut-off: \r\n\r\nhttps://github.com/pytorch/fairseq/blob/388c520be21752cacb9fe3b1712038f32e0e9a5f/fairseq/modules/adaptive_softmax.py#L126\r\n\r\nI imagine this could be solved by some sort of truncation to the output of tail[i].input\n", "before_files": [{"content": "# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\n\nimport torch.nn.functional as F\nfrom torch import nn\n\n\nclass AdaptiveSoftmax(nn.Module):\n \"\"\"\n This is an implementation of the efficient softmax approximation for\n graphical processing units (GPU), described in the paper \"Efficient softmax\n approximation for GPUs\" (http://arxiv.org/abs/1609.04309).\n \"\"\"\n\n def __init__(self, vocab_size, input_dim, cutoff, dropout):\n super().__init__()\n\n if vocab_size > cutoff[-1]:\n cutoff = cutoff + [vocab_size]\n\n output_dim = cutoff[0] + len(cutoff) - 1\n\n self.vocab_size = vocab_size\n self.cutoff = cutoff\n self.dropout = dropout\n\n self.lsm = nn.LogSoftmax(dim=1)\n self.head = nn.Linear(input_dim, output_dim, bias=False)\n self.tail = nn.ModuleList()\n\n for i in range(len(cutoff) - 1):\n self.tail.append(\n nn.Sequential(\n nn.Linear(input_dim, input_dim // 4 ** i, bias=False),\n nn.Dropout(dropout),\n nn.Linear(input_dim // 4 ** i, cutoff[i + 1] - cutoff[i], bias=False)\n )\n )\n\n def init_weights(m):\n if hasattr(m, 'weight'):\n nn.init.xavier_uniform_(m.weight)\n\n self.apply(init_weights)\n\n def adapt_target(self, target):\n \"\"\"\n In order to be efficient, the AdaptiveSoftMax does not compute the\n scores for all the word of the vocabulary for all the examples. It is\n thus necessary to call the method adapt_target of the AdaptiveSoftMax\n layer inside each forward pass.\n \"\"\"\n\n target = target.view(-1)\n new_target = [target.clone()]\n target_idxs = []\n\n for i in range(len(self.cutoff) - 1):\n mask = target.ge(self.cutoff[i]).mul(target.lt(self.cutoff[i + 1]))\n new_target[0][mask] = self.cutoff[0] + i - 1\n\n if mask.any():\n target_idxs.append(mask.nonzero().squeeze(1))\n new_target.append(target[mask].add(-self.cutoff[i]))\n else:\n target_idxs.append(None)\n new_target.append(None)\n\n return new_target, target_idxs\n\n def forward(self, input, target):\n \"\"\"\n Args:\n input: (b x t x d)\n target: (b x t)\n Returns:\n 2 lists: output for each cutoff section and new targets by cut off\n \"\"\"\n\n input = input.contiguous().view(-1, input.size(-1))\n input = F.dropout(input, p=self.dropout, training=self.training)\n\n new_target, target_idxs = self.adapt_target(target)\n output = [self.head(input)]\n\n for i in range(len(target_idxs)):\n if target_idxs[i] is not None:\n output.append(self.tail[i](input.index_select(0, target_idxs[i])))\n else:\n output.append(None)\n\n return output, new_target\n\n def get_log_prob(self, input, target):\n \"\"\"\n Computes the log probabilities for all the words of the vocabulary,\n given a 2D tensor of hidden vectors.\n \"\"\"\n\n bsz, length, dim = input.size()\n input = input.contiguous().view(-1, dim)\n\n if target is not None:\n _, target_idxs = self.adapt_target(target)\n else:\n target_idxs = None\n\n head_y = self.head(input)\n log_probs = head_y.new_zeros(input.size(0), self.vocab_size)\n\n head_sz = self.cutoff[0] + len(self.tail)\n log_probs[:, :head_sz] = self.lsm(head_y)\n tail_priors = log_probs[:, self.cutoff[0] - 1: head_sz - 1].clone()\n\n for i in range(len(self.tail)):\n start = self.cutoff[i]\n end = self.cutoff[i + 1]\n\n if target_idxs is None:\n tail_out = log_probs[:, start:end]\n tail_out.copy_(self.tail[i](input))\n log_probs[:, start:end] = self.lsm(tail_out).add_(tail_priors[:, i, None])\n elif target_idxs[i] is not None:\n idxs = target_idxs[i]\n tail_out = log_probs[idxs, start:end]\n tail_out.copy_(self.tail[i](input[idxs]))\n log_probs[idxs, start:end] = self.lsm(tail_out).add_(tail_priors[idxs, i, None])\n\n log_probs = log_probs.view(bsz, length, -1)\n return log_probs\n", "path": "fairseq/modules/adaptive_softmax.py"}], "after_files": [{"content": "# Copyright (c) 2017-present, Facebook, Inc.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the LICENSE file in\n# the root directory of this source tree. An additional grant of patent rights\n# can be found in the PATENTS file in the same directory.\n\n\nimport torch.nn.functional as F\nfrom torch import nn\n\n\nclass AdaptiveSoftmax(nn.Module):\n \"\"\"\n This is an implementation of the efficient softmax approximation for\n graphical processing units (GPU), described in the paper \"Efficient softmax\n approximation for GPUs\" (http://arxiv.org/abs/1609.04309).\n \"\"\"\n\n def __init__(self, vocab_size, input_dim, cutoff, dropout):\n super().__init__()\n\n if vocab_size > cutoff[-1]:\n cutoff = cutoff + [vocab_size]\n else:\n assert vocab_size == cutoff[\n -1], 'cannot specify cutoff smaller than vocab size'\n\n output_dim = cutoff[0] + len(cutoff) - 1\n\n self.vocab_size = vocab_size\n self.cutoff = cutoff\n self.dropout = dropout\n\n self.lsm = nn.LogSoftmax(dim=1)\n self.head = nn.Linear(input_dim, output_dim, bias=False)\n self.tail = nn.ModuleList()\n\n for i in range(len(cutoff) - 1):\n self.tail.append(\n nn.Sequential(\n nn.Linear(input_dim, input_dim // 4 ** i, bias=False),\n nn.Dropout(dropout),\n nn.Linear(input_dim // 4 ** i, cutoff[i + 1] - cutoff[i], bias=False)\n )\n )\n\n def init_weights(m):\n if hasattr(m, 'weight'):\n nn.init.xavier_uniform_(m.weight)\n\n self.apply(init_weights)\n\n def adapt_target(self, target):\n \"\"\"\n In order to be efficient, the AdaptiveSoftMax does not compute the\n scores for all the word of the vocabulary for all the examples. It is\n thus necessary to call the method adapt_target of the AdaptiveSoftMax\n layer inside each forward pass.\n \"\"\"\n\n target = target.view(-1)\n new_target = [target.clone()]\n target_idxs = []\n\n for i in range(len(self.cutoff) - 1):\n mask = target.ge(self.cutoff[i]).mul(target.lt(self.cutoff[i + 1]))\n new_target[0][mask] = self.cutoff[0] + i - 1\n\n if mask.any():\n target_idxs.append(mask.nonzero().squeeze(1))\n new_target.append(target[mask].add(-self.cutoff[i]))\n else:\n target_idxs.append(None)\n new_target.append(None)\n\n return new_target, target_idxs\n\n def forward(self, input, target):\n \"\"\"\n Args:\n input: (b x t x d)\n target: (b x t)\n Returns:\n 2 lists: output for each cutoff section and new targets by cut off\n \"\"\"\n\n input = input.contiguous().view(-1, input.size(-1))\n input = F.dropout(input, p=self.dropout, training=self.training)\n\n new_target, target_idxs = self.adapt_target(target)\n output = [self.head(input)]\n\n for i in range(len(target_idxs)):\n if target_idxs[i] is not None:\n output.append(self.tail[i](input.index_select(0, target_idxs[i])))\n else:\n output.append(None)\n\n return output, new_target\n\n def get_log_prob(self, input, target):\n \"\"\"\n Computes the log probabilities for all the words of the vocabulary,\n given a 2D tensor of hidden vectors.\n \"\"\"\n\n bsz, length, dim = input.size()\n input = input.contiguous().view(-1, dim)\n\n if target is not None:\n _, target_idxs = self.adapt_target(target)\n else:\n target_idxs = None\n\n head_y = self.head(input)\n log_probs = head_y.new_zeros(input.size(0), self.vocab_size)\n\n head_sz = self.cutoff[0] + len(self.tail)\n log_probs[:, :head_sz] = self.lsm(head_y)\n tail_priors = log_probs[:, self.cutoff[0] - 1: head_sz - 1].clone()\n\n for i in range(len(self.tail)):\n start = self.cutoff[i]\n end = self.cutoff[i + 1]\n\n if target_idxs is None:\n tail_out = log_probs[:, start:end]\n tail_out.copy_(self.tail[i](input))\n log_probs[:, start:end] = self.lsm(tail_out).add_(tail_priors[:, i, None])\n elif target_idxs[i] is not None:\n idxs = target_idxs[i]\n tail_out = log_probs[idxs, start:end]\n tail_out.copy_(self.tail[i](input[idxs]))\n log_probs[idxs, start:end] = self.lsm(tail_out).add_(tail_priors[idxs, i, None])\n\n log_probs = log_probs.view(bsz, length, -1)\n return log_probs\n", "path": "fairseq/modules/adaptive_softmax.py"}]}
1,818
119
gh_patches_debug_34863
rasdani/github-patches
git_diff
microsoft__lisa-836
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Ubuntu 20.04 - platform.dist() is deprecated since Python 3.5 and removed in Python 3.8 Affected distro - ubuntu 20.04 (use python 3.8) Affected case - WALA-VERIFY-VERBOSE-ENABLED-LOGS Use distro.linux_distribution(full_distribution_name=False) instead --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `Testscripts/Linux/WALA-VERIFY-VERBOSE-ENABLED-LOGS.py` Content: ``` 1 #!/usr/bin/env python 2 # Copyright (c) Microsoft Corporation. All rights reserved. 3 # Licensed under the Apache License. 4 from azuremodules import * 5 6 import argparse 7 import os 8 import platform 9 import time 10 11 parser = argparse.ArgumentParser() 12 13 file_path = os.path.dirname(os.path.realpath(__file__)) 14 constants_path = os.path.join(file_path, "constants.sh") 15 params = GetParams(constants_path) 16 passwd = params["PASSWORD"] 17 18 distro = platform.dist() 19 20 21 def RunTest(): 22 UpdateState("TestRunning") 23 if(distro[0] == "CoreOS"): 24 versionOutPut = Run("waagent --version") 25 else: 26 output = Run("pgrep -fa python3.*waagent") 27 if ("python3" in output) : 28 versionOutPut = Run("/usr/bin/python3 /usr/sbin/waagent --version") 29 else : 30 versionOutPut = Run("/usr/sbin/waagent --version") 31 32 RunLog.info("Checking log waagent.log...") 33 if("2.0." in versionOutPut): 34 output = Run("grep -i 'iptables -I INPUT -p udp --dport' /var/log/waagent* | wc -l | tr -d '\n'") 35 RunLog.info("agent version is 2.0") 36 else: 37 output = Run("grep -i 'VERBOSE' /var/log/waagent* | wc -l | tr -d '\n'") 38 RunLog.info("agent version > 2.0") 39 40 if not (output == "0") : 41 RunLog.info('The log file contains the verbose logs') 42 ResultLog.info('PASS') 43 UpdateState("TestCompleted") 44 else : 45 RunLog.error('Verify waagent.log fail, the log file does not contain the verbose logs') 46 ResultLog.error('FAIL') 47 UpdateState("TestCompleted") 48 49 50 def Restartwaagent(): 51 if (distro[0] == "CoreOS"): 52 Run("echo '"+passwd+"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g /usr/share/oem/waagent.conf") 53 elif (DetectDistro()[0] == 'clear-linux-os'): 54 Run("echo '"+passwd+"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g \ 55 /usr/share/defaults/waagent/waagent.conf") 56 else: 57 Run("echo '"+passwd+"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g /etc/waagent.conf") 58 RunLog.info("Restart waagent service...") 59 result = Run("echo '"+passwd+"' | sudo -S find / -name systemctl |wc -l | tr -d '\n'") 60 if (distro[0] == "Ubuntu") or (distro[0] == "debian"): 61 Run("echo '"+passwd+"' | sudo -S service walinuxagent restart") 62 else: 63 if (result == "0") : 64 os.system("echo '"+passwd+"' | sudo -S service waagent restart") 65 else: 66 os.system("echo '"+passwd+"' | sudo -S systemctl restart waagent") 67 time.sleep(60) 68 69 Restartwaagent() 70 RunTest() 71 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/Testscripts/Linux/WALA-VERIFY-VERBOSE-ENABLED-LOGS.py b/Testscripts/Linux/WALA-VERIFY-VERBOSE-ENABLED-LOGS.py --- a/Testscripts/Linux/WALA-VERIFY-VERBOSE-ENABLED-LOGS.py +++ b/Testscripts/Linux/WALA-VERIFY-VERBOSE-ENABLED-LOGS.py @@ -7,6 +7,7 @@ import os import platform import time +import sys parser = argparse.ArgumentParser() @@ -14,13 +15,16 @@ constants_path = os.path.join(file_path, "constants.sh") params = GetParams(constants_path) passwd = params["PASSWORD"] - -distro = platform.dist() +if sys.version_info[0] >= 3: + import distro + distro = distro.linux_distribution(full_distribution_name=False) +else: + distro = platform.dist() def RunTest(): UpdateState("TestRunning") - if(distro[0] == "CoreOS"): + if(distro[0].upper() == "COREOS"): versionOutPut = Run("waagent --version") else: output = Run("pgrep -fa python3.*waagent") @@ -48,7 +52,7 @@ def Restartwaagent(): - if (distro[0] == "CoreOS"): + if (distro[0].upper() == "COREOS"): Run("echo '"+passwd+"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g /usr/share/oem/waagent.conf") elif (DetectDistro()[0] == 'clear-linux-os'): Run("echo '"+passwd+"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g \ @@ -57,7 +61,7 @@ Run("echo '"+passwd+"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g /etc/waagent.conf") RunLog.info("Restart waagent service...") result = Run("echo '"+passwd+"' | sudo -S find / -name systemctl |wc -l | tr -d '\n'") - if (distro[0] == "Ubuntu") or (distro[0] == "debian"): + if (distro[0].upper() == "UBUNTU") or (distro[0].upper() == "DEBIAN"): Run("echo '"+passwd+"' | sudo -S service walinuxagent restart") else: if (result == "0") :
{"golden_diff": "diff --git a/Testscripts/Linux/WALA-VERIFY-VERBOSE-ENABLED-LOGS.py b/Testscripts/Linux/WALA-VERIFY-VERBOSE-ENABLED-LOGS.py\n--- a/Testscripts/Linux/WALA-VERIFY-VERBOSE-ENABLED-LOGS.py\n+++ b/Testscripts/Linux/WALA-VERIFY-VERBOSE-ENABLED-LOGS.py\n@@ -7,6 +7,7 @@\n import os\n import platform\n import time\n+import sys\n \n parser = argparse.ArgumentParser()\n \n@@ -14,13 +15,16 @@\n constants_path = os.path.join(file_path, \"constants.sh\")\n params = GetParams(constants_path)\n passwd = params[\"PASSWORD\"]\n-\n-distro = platform.dist()\n+if sys.version_info[0] >= 3:\n+ import distro\n+ distro = distro.linux_distribution(full_distribution_name=False)\n+else:\n+ distro = platform.dist()\n \n \n def RunTest():\n UpdateState(\"TestRunning\")\n- if(distro[0] == \"CoreOS\"):\n+ if(distro[0].upper() == \"COREOS\"):\n versionOutPut = Run(\"waagent --version\")\n else:\n output = Run(\"pgrep -fa python3.*waagent\")\n@@ -48,7 +52,7 @@\n \n \n def Restartwaagent():\n- if (distro[0] == \"CoreOS\"):\n+ if (distro[0].upper() == \"COREOS\"):\n Run(\"echo '\"+passwd+\"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g /usr/share/oem/waagent.conf\")\n elif (DetectDistro()[0] == 'clear-linux-os'):\n Run(\"echo '\"+passwd+\"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g \\\n@@ -57,7 +61,7 @@\n Run(\"echo '\"+passwd+\"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g /etc/waagent.conf\")\n RunLog.info(\"Restart waagent service...\")\n result = Run(\"echo '\"+passwd+\"' | sudo -S find / -name systemctl |wc -l | tr -d '\\n'\")\n- if (distro[0] == \"Ubuntu\") or (distro[0] == \"debian\"):\n+ if (distro[0].upper() == \"UBUNTU\") or (distro[0].upper() == \"DEBIAN\"):\n Run(\"echo '\"+passwd+\"' | sudo -S service walinuxagent restart\")\n else:\n if (result == \"0\") :\n", "issue": "Ubuntu 20.04 - platform.dist() is deprecated since Python 3.5 and removed in Python 3.8\nAffected distro - ubuntu 20.04 (use python 3.8)\r\nAffected case - WALA-VERIFY-VERBOSE-ENABLED-LOGS\r\nUse distro.linux_distribution(full_distribution_name=False) instead\n", "before_files": [{"content": "#!/usr/bin/env python\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the Apache License.\nfrom azuremodules import *\n\nimport argparse\nimport os\nimport platform\nimport time\n\nparser = argparse.ArgumentParser()\n\nfile_path = os.path.dirname(os.path.realpath(__file__))\nconstants_path = os.path.join(file_path, \"constants.sh\")\nparams = GetParams(constants_path)\npasswd = params[\"PASSWORD\"]\n\ndistro = platform.dist()\n\n\ndef RunTest():\n UpdateState(\"TestRunning\")\n if(distro[0] == \"CoreOS\"):\n versionOutPut = Run(\"waagent --version\")\n else:\n output = Run(\"pgrep -fa python3.*waagent\")\n if (\"python3\" in output) :\n versionOutPut = Run(\"/usr/bin/python3 /usr/sbin/waagent --version\")\n else :\n versionOutPut = Run(\"/usr/sbin/waagent --version\")\n\n RunLog.info(\"Checking log waagent.log...\")\n if(\"2.0.\" in versionOutPut):\n output = Run(\"grep -i 'iptables -I INPUT -p udp --dport' /var/log/waagent* | wc -l | tr -d '\\n'\")\n RunLog.info(\"agent version is 2.0\")\n else:\n output = Run(\"grep -i 'VERBOSE' /var/log/waagent* | wc -l | tr -d '\\n'\")\n RunLog.info(\"agent version > 2.0\")\n\n if not (output == \"0\") :\n RunLog.info('The log file contains the verbose logs')\n ResultLog.info('PASS')\n UpdateState(\"TestCompleted\")\n else :\n RunLog.error('Verify waagent.log fail, the log file does not contain the verbose logs')\n ResultLog.error('FAIL')\n UpdateState(\"TestCompleted\")\n\n\ndef Restartwaagent():\n if (distro[0] == \"CoreOS\"):\n Run(\"echo '\"+passwd+\"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g /usr/share/oem/waagent.conf\")\n elif (DetectDistro()[0] == 'clear-linux-os'):\n Run(\"echo '\"+passwd+\"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g \\\n /usr/share/defaults/waagent/waagent.conf\")\n else:\n Run(\"echo '\"+passwd+\"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g /etc/waagent.conf\")\n RunLog.info(\"Restart waagent service...\")\n result = Run(\"echo '\"+passwd+\"' | sudo -S find / -name systemctl |wc -l | tr -d '\\n'\")\n if (distro[0] == \"Ubuntu\") or (distro[0] == \"debian\"):\n Run(\"echo '\"+passwd+\"' | sudo -S service walinuxagent restart\")\n else:\n if (result == \"0\") :\n os.system(\"echo '\"+passwd+\"' | sudo -S service waagent restart\")\n else:\n os.system(\"echo '\"+passwd+\"' | sudo -S systemctl restart waagent\")\n time.sleep(60)\n\nRestartwaagent()\nRunTest()\n", "path": "Testscripts/Linux/WALA-VERIFY-VERBOSE-ENABLED-LOGS.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the Apache License.\nfrom azuremodules import *\n\nimport argparse\nimport os\nimport platform\nimport time\nimport sys\n\nparser = argparse.ArgumentParser()\n\nfile_path = os.path.dirname(os.path.realpath(__file__))\nconstants_path = os.path.join(file_path, \"constants.sh\")\nparams = GetParams(constants_path)\npasswd = params[\"PASSWORD\"]\nif sys.version_info[0] >= 3:\n import distro\n distro = distro.linux_distribution(full_distribution_name=False)\nelse:\n distro = platform.dist()\n\n\ndef RunTest():\n UpdateState(\"TestRunning\")\n if(distro[0].upper() == \"COREOS\"):\n versionOutPut = Run(\"waagent --version\")\n else:\n output = Run(\"pgrep -fa python3.*waagent\")\n if (\"python3\" in output) :\n versionOutPut = Run(\"/usr/bin/python3 /usr/sbin/waagent --version\")\n else :\n versionOutPut = Run(\"/usr/sbin/waagent --version\")\n\n RunLog.info(\"Checking log waagent.log...\")\n if(\"2.0.\" in versionOutPut):\n output = Run(\"grep -i 'iptables -I INPUT -p udp --dport' /var/log/waagent* | wc -l | tr -d '\\n'\")\n RunLog.info(\"agent version is 2.0\")\n else:\n output = Run(\"grep -i 'VERBOSE' /var/log/waagent* | wc -l | tr -d '\\n'\")\n RunLog.info(\"agent version > 2.0\")\n\n if not (output == \"0\") :\n RunLog.info('The log file contains the verbose logs')\n ResultLog.info('PASS')\n UpdateState(\"TestCompleted\")\n else :\n RunLog.error('Verify waagent.log fail, the log file does not contain the verbose logs')\n ResultLog.error('FAIL')\n UpdateState(\"TestCompleted\")\n\n\ndef Restartwaagent():\n if (distro[0].upper() == \"COREOS\"):\n Run(\"echo '\"+passwd+\"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g /usr/share/oem/waagent.conf\")\n elif (DetectDistro()[0] == 'clear-linux-os'):\n Run(\"echo '\"+passwd+\"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g \\\n /usr/share/defaults/waagent/waagent.conf\")\n else:\n Run(\"echo '\"+passwd+\"' | sudo -S sed -i s/Logs.Verbose=n/Logs.Verbose=y/g /etc/waagent.conf\")\n RunLog.info(\"Restart waagent service...\")\n result = Run(\"echo '\"+passwd+\"' | sudo -S find / -name systemctl |wc -l | tr -d '\\n'\")\n if (distro[0].upper() == \"UBUNTU\") or (distro[0].upper() == \"DEBIAN\"):\n Run(\"echo '\"+passwd+\"' | sudo -S service walinuxagent restart\")\n else:\n if (result == \"0\") :\n os.system(\"echo '\"+passwd+\"' | sudo -S service waagent restart\")\n else:\n os.system(\"echo '\"+passwd+\"' | sudo -S systemctl restart waagent\")\n time.sleep(60)\n\nRestartwaagent()\nRunTest()\n", "path": "Testscripts/Linux/WALA-VERIFY-VERBOSE-ENABLED-LOGS.py"}]}
1,169
564
gh_patches_debug_40646
rasdani/github-patches
git_diff
CONP-PCNO__conp-dataset-675
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Auto-archive script not running on crawled dataset? All OSF or Zenodo datasets are not being archived when running the script `auto_archive.py` with option `--all`. The following information systematically shows: ``` SKIPPED: archive not needed for projects/<dataset_name> ``` but the dataset size is definitely below 20GB. List of affected datasets: - CFMM_7T__MP2RAGE_T1_mapping_version-2020-10-02T12:55:03.086599.tar.gz - Comparing_Perturbation_Modes_for_Evaluating_Instabilities_in_Neuroimaging__Processed_NKI_RS_Subset__08_2019__version-None.tar.gz - Intracellular_Recordings_of_Murine_Neocortical_Neurons_version-2020-04-18T19:34:10.458118.tar.gz - Learning_Naturalistic_Structure__Processed_fMRI_dataset_version-0.1.0.tar.gz - MRI_and_unbiased_averages_of_wild_muskrats__Ondatra_zibethicus__and_red_squirrels__Tamiasciurus_hudsonicus__version-1.0.tar.gz: - Multi-model_functionalization_of_disease-associated_PTEN_missense_mutations_version-1.0.tar.gz - Quantifying_Neural_Cognitive_Relationships_Across_the_Brain_version-2020-11-11T02:30:59.429493.tar.gz - Synthetic_Animated_Mouse__SAM___University_of_British_Columbia__Datasets_and_3D_models_version-2021-04-09T14:12:32.548954.tar.gz: - Systematic_phenomics_analysis_of_autism-associated_genes_version-3.2.tar.gz - VFA_T1_mapping___RTHawk__open__vs_Siemens__commercial__version-v1.0.tar.gz Auto-archive script not running on crawled dataset? All OSF or Zenodo datasets are not being archived when running the script `auto_archive.py` with option `--all`. The following information systematically shows: ``` SKIPPED: archive not needed for projects/<dataset_name> ``` but the dataset size is definitely below 20GB. List of affected datasets: - CFMM_7T__MP2RAGE_T1_mapping_version-2020-10-02T12:55:03.086599.tar.gz - Comparing_Perturbation_Modes_for_Evaluating_Instabilities_in_Neuroimaging__Processed_NKI_RS_Subset__08_2019__version-None.tar.gz - Intracellular_Recordings_of_Murine_Neocortical_Neurons_version-2020-04-18T19:34:10.458118.tar.gz - Learning_Naturalistic_Structure__Processed_fMRI_dataset_version-0.1.0.tar.gz - MRI_and_unbiased_averages_of_wild_muskrats__Ondatra_zibethicus__and_red_squirrels__Tamiasciurus_hudsonicus__version-1.0.tar.gz: - Multi-model_functionalization_of_disease-associated_PTEN_missense_mutations_version-1.0.tar.gz - Quantifying_Neural_Cognitive_Relationships_Across_the_Brain_version-2020-11-11T02:30:59.429493.tar.gz - Synthetic_Animated_Mouse__SAM___University_of_British_Columbia__Datasets_and_3D_models_version-2021-04-09T14:12:32.548954.tar.gz: - Systematic_phenomics_analysis_of_autism-associated_genes_version-3.2.tar.gz - VFA_T1_mapping___RTHawk__open__vs_Siemens__commercial__version-v1.0.tar.gz --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scripts/auto_archive.py` Content: ``` 1 from __future__ import annotations 2 3 import argparse 4 import json 5 import logging 6 import os 7 from datetime import datetime 8 from datetime import timedelta 9 10 import git 11 import humanfriendly 12 from datalad.plugin import export_archive 13 from github import Github 14 15 from scripts.datalad_utils import get_dataset 16 from scripts.datalad_utils import install_dataset 17 from scripts.log import get_logger 18 from tests.functions import get_proper_submodules 19 20 21 logger = get_logger( 22 "CONP-Archive", filename="conp-archive.log", file_level=logging.DEBUG 23 ) 24 25 26 class ArchiveFailed(Exception): 27 pass 28 29 30 def parse_args(): 31 example_text = """Example: 32 PYTHONPATH=$PWD python scripts/auto_archive.py <out_dir> 33 """ 34 35 parser = argparse.ArgumentParser( 36 description="Archiver for the CONP-datasets.", 37 epilog=example_text, 38 formatter_class=argparse.RawDescriptionHelpFormatter, 39 ) 40 41 parser.add_argument( 42 "--out_dir", "-o", type=str, help="Path to store the archived datasets." 43 ) 44 parser.add_argument( 45 "--max-size", 46 type=float, 47 default=20.0, 48 help="Maximum size of dataset to archive in GB.", 49 ) 50 group = parser.add_mutually_exclusive_group() 51 group.add_argument( 52 "--all", 53 action="store_true", 54 help="Archive all the datasets rather than those modified since the last time.", 55 ) 56 group.add_argument( 57 "--dataset", 58 "-d", 59 type=str, 60 nargs="+", 61 help="Restrict the archive to the specified dataset paths.", 62 ) 63 64 return parser.parse_args() 65 66 67 def get_all_datasets(): 68 return {os.path.basename(submodule.path) for submodule in git.Repo().submodules} 69 70 71 def get_modified_datasets( 72 *, 73 since: datetime | None = None, 74 until: datetime | None = None, 75 ) -> set[str]: 76 """Retrieve the modified datasets. 77 78 Requires to set GITHUB_ACCESS_TOKEN as an environment variable. 79 80 Parameters 81 ---------- 82 since : Optional[datetime], optional 83 Start date from which commits are retrieved, by default date of the previous crawl, if never crawled set to 84 one week ago. 85 until : Optional[datetime], optional 86 Latest date at which commit are retrieved, by default `now` 87 88 Returns 89 ------- 90 set[str] 91 Path of the dataset folders. 92 """ 93 now = datetime.now().astimezone() 94 95 if since is None: 96 if os.path.exists(".conp-archive"): 97 with open(".conp-archive") as fin: 98 since = datetime.fromisoformat(fin.read()) 99 else: 100 since = now - timedelta(weeks=1) 101 102 if until is None: 103 until = now 104 105 try: 106 gh_access_token = os.environ.get("GITHUB_ACCESS_TOKEN", None) 107 if gh_access_token is None: 108 raise OSError("GITHUB_ACCESS_TOKEN is not defined.") 109 110 except OSError as e: 111 # The program is not stopped since GitHub allows 60 query per hours with 112 # authentication. However the program will most likely fail. 113 logger.critical(e) 114 115 logger.info(f"Retrieving modified datasets since {since}") 116 repo = Github(gh_access_token).get_repo("CONP-PCNO/conp-dataset") 117 commits = repo.get_commits(since=since, until=until) 118 119 with open(".conp-archive", "w") as fout: 120 fout.write(now.isoformat()) 121 122 modified_datasets: set[str] = { 123 os.path.basename(file_.filename) 124 for commit in commits 125 for file_ in commit.files 126 if file_.filename.startswith("projects/") 127 } 128 129 return modified_datasets 130 131 132 def archive_dataset(dataset_path: str, out_dir: str, version: str) -> None: 133 os.makedirs(os.path.dirname(out_dir), mode=0o755, exist_ok=True) 134 out_filename = f"{out_dir}_version-{version}.tar.gz" 135 logger.info(f"Archiving dataset: {dataset_path} to {out_filename}") 136 137 cwd = os.getcwd() 138 try: 139 datalad_archiver = export_archive.ExportArchive() 140 dataset_repo = git.Repo(dataset_path) 141 142 with open(os.path.join(dataset_path, ".git.log"), "w") as fout: 143 fout.write(dataset_repo.git.log(pretty="format:%H %s")) 144 145 # Export is performed from the dataset root. 146 # This is to avoid failure when a submodule is not downloaded; e.g. for parent 147 # dataset in dataset derivative. 148 os.chdir(os.path.join(cwd, dataset_path)) 149 datalad_archiver(".", filename=out_filename) 150 151 except Exception as e: 152 raise ArchiveFailed( 153 f"FAILURE: could not archive dataset: {dataset_path} to {out_filename}\n{e}" 154 ) 155 finally: 156 os.chdir(cwd) 157 158 159 if __name__ == "__main__": 160 args = parse_args() 161 162 # Only archive the datasets available locally. 163 datasets = get_all_datasets() 164 if args.dataset: 165 target_datasets = {os.path.basename(os.path.normpath(d)) for d in args.dataset} 166 logger.warning( 167 f"The following dataset were not found locally: {target_datasets - datasets}" 168 ) 169 datasets &= target_datasets 170 171 elif not args.all: 172 modified_datasets = get_modified_datasets() 173 logger.warning( 174 f"The following dataset were not found locally: {modified_datasets - datasets}" 175 ) 176 datasets &= modified_datasets 177 178 for dataset_name in datasets: 179 dataset = "projects/" + dataset_name 180 181 try: 182 logger.info(f"Installing dataset: {dataset}") 183 install_dataset(dataset) 184 185 is_public = False 186 version = "" 187 dataset_size = 0.0 188 189 with open(os.path.join(dataset, "DATS.json")) as fin: 190 metadata = json.load(fin) 191 192 is_public = metadata.get("privacy") == "open" 193 version = metadata.get("version") 194 195 for distribution in metadata.get("distributions", list()): 196 dataset_size += humanfriendly.parse_size( 197 f"{distribution['size']} {distribution['unit']['value']}", 198 ) 199 dataset_size //= 1024 ** 3 # Convert to GB 200 201 # Only archive public dataset less than 20GB 202 if dataset_size <= args.max_size and is_public: 203 logger.info(f"Downloading dataset: {dataset}") 204 get_dataset(dataset) 205 for submodule in get_proper_submodules(dataset): 206 get_dataset(submodule) 207 208 archive_dataset( 209 dataset, 210 out_dir=os.path.join(args.out_dir, dataset_name), 211 version=version, 212 ) 213 logger.info(f"SUCCESS: archive created for {dataset}") 214 215 else: 216 logger.info(f"SKIPPED: archive not needed for {dataset}") 217 218 except Exception as e: 219 # TODO implement notification system. 220 # This will alert when a dataset fails the archiving process. 221 logger.exception( 222 f"FAILURE: could not archive dataset: {dataset} to {args.out_dir}.tar.gz\n{e}" 223 ) 224 225 logger.info("Done archiving the datasets.") 226 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scripts/auto_archive.py b/scripts/auto_archive.py --- a/scripts/auto_archive.py +++ b/scripts/auto_archive.py @@ -64,8 +64,11 @@ return parser.parse_args() -def get_all_datasets(): - return {os.path.basename(submodule.path) for submodule in git.Repo().submodules} +def get_datasets_path(): + return { + os.path.basename(submodule.path): submodule.path + for submodule in git.Repo().submodules + } def get_modified_datasets( @@ -129,9 +132,11 @@ return modified_datasets -def archive_dataset(dataset_path: str, out_dir: str, version: str) -> None: +def archive_dataset( + dataset_path: str, out_dir: str, archive_name: str, version: str +) -> None: os.makedirs(os.path.dirname(out_dir), mode=0o755, exist_ok=True) - out_filename = f"{out_dir}_version-{version}.tar.gz" + out_filename = f"{archive_name}_version-{version}.tar.gz" logger.info(f"Archiving dataset: {dataset_path} to {out_filename}") cwd = os.getcwd() @@ -160,7 +165,8 @@ args = parse_args() # Only archive the datasets available locally. - datasets = get_all_datasets() + datasets_path = get_datasets_path() + datasets = datasets_path.keys() if args.dataset: target_datasets = {os.path.basename(os.path.normpath(d)) for d in args.dataset} logger.warning( @@ -176,7 +182,7 @@ datasets &= modified_datasets for dataset_name in datasets: - dataset = "projects/" + dataset_name + dataset = datasets_path[dataset_name] try: logger.info(f"Installing dataset: {dataset}") @@ -189,7 +195,13 @@ with open(os.path.join(dataset, "DATS.json")) as fin: metadata = json.load(fin) - is_public = metadata.get("privacy") == "open" + is_public = ( + metadata.get("distributions", [{}])[0] + .get("access", {}) + .get("authorizations", [{}])[0] + .get("value") + == "public" + ) version = metadata.get("version") for distribution in metadata.get("distributions", list()): @@ -205,9 +217,13 @@ for submodule in get_proper_submodules(dataset): get_dataset(submodule) + archive_name = "__".join( + os.path.relpath(dataset, "projects").split("/") + ) archive_dataset( dataset, out_dir=os.path.join(args.out_dir, dataset_name), + archive_name=archive_name, version=version, ) logger.info(f"SUCCESS: archive created for {dataset}")
{"golden_diff": "diff --git a/scripts/auto_archive.py b/scripts/auto_archive.py\n--- a/scripts/auto_archive.py\n+++ b/scripts/auto_archive.py\n@@ -64,8 +64,11 @@\n return parser.parse_args()\n \n \n-def get_all_datasets():\n- return {os.path.basename(submodule.path) for submodule in git.Repo().submodules}\n+def get_datasets_path():\n+ return {\n+ os.path.basename(submodule.path): submodule.path\n+ for submodule in git.Repo().submodules\n+ }\n \n \n def get_modified_datasets(\n@@ -129,9 +132,11 @@\n return modified_datasets\n \n \n-def archive_dataset(dataset_path: str, out_dir: str, version: str) -> None:\n+def archive_dataset(\n+ dataset_path: str, out_dir: str, archive_name: str, version: str\n+) -> None:\n os.makedirs(os.path.dirname(out_dir), mode=0o755, exist_ok=True)\n- out_filename = f\"{out_dir}_version-{version}.tar.gz\"\n+ out_filename = f\"{archive_name}_version-{version}.tar.gz\"\n logger.info(f\"Archiving dataset: {dataset_path} to {out_filename}\")\n \n cwd = os.getcwd()\n@@ -160,7 +165,8 @@\n args = parse_args()\n \n # Only archive the datasets available locally.\n- datasets = get_all_datasets()\n+ datasets_path = get_datasets_path()\n+ datasets = datasets_path.keys()\n if args.dataset:\n target_datasets = {os.path.basename(os.path.normpath(d)) for d in args.dataset}\n logger.warning(\n@@ -176,7 +182,7 @@\n datasets &= modified_datasets\n \n for dataset_name in datasets:\n- dataset = \"projects/\" + dataset_name\n+ dataset = datasets_path[dataset_name]\n \n try:\n logger.info(f\"Installing dataset: {dataset}\")\n@@ -189,7 +195,13 @@\n with open(os.path.join(dataset, \"DATS.json\")) as fin:\n metadata = json.load(fin)\n \n- is_public = metadata.get(\"privacy\") == \"open\"\n+ is_public = (\n+ metadata.get(\"distributions\", [{}])[0]\n+ .get(\"access\", {})\n+ .get(\"authorizations\", [{}])[0]\n+ .get(\"value\")\n+ == \"public\"\n+ )\n version = metadata.get(\"version\")\n \n for distribution in metadata.get(\"distributions\", list()):\n@@ -205,9 +217,13 @@\n for submodule in get_proper_submodules(dataset):\n get_dataset(submodule)\n \n+ archive_name = \"__\".join(\n+ os.path.relpath(dataset, \"projects\").split(\"/\")\n+ )\n archive_dataset(\n dataset,\n out_dir=os.path.join(args.out_dir, dataset_name),\n+ archive_name=archive_name,\n version=version,\n )\n logger.info(f\"SUCCESS: archive created for {dataset}\")\n", "issue": "Auto-archive script not running on crawled dataset?\nAll OSF or Zenodo datasets are not being archived when running the script `auto_archive.py` with option `--all`.\r\n\r\nThe following information systematically shows:\r\n```\r\nSKIPPED: archive not needed for projects/<dataset_name>\r\n```\r\n but the dataset size is definitely below 20GB.\r\n\r\nList of affected datasets:\r\n\r\n- CFMM_7T__MP2RAGE_T1_mapping_version-2020-10-02T12:55:03.086599.tar.gz\r\n- Comparing_Perturbation_Modes_for_Evaluating_Instabilities_in_Neuroimaging__Processed_NKI_RS_Subset__08_2019__version-None.tar.gz\r\n- Intracellular_Recordings_of_Murine_Neocortical_Neurons_version-2020-04-18T19:34:10.458118.tar.gz\r\n- Learning_Naturalistic_Structure__Processed_fMRI_dataset_version-0.1.0.tar.gz\r\n- MRI_and_unbiased_averages_of_wild_muskrats__Ondatra_zibethicus__and_red_squirrels__Tamiasciurus_hudsonicus__version-1.0.tar.gz:\r\n- Multi-model_functionalization_of_disease-associated_PTEN_missense_mutations_version-1.0.tar.gz\r\n- Quantifying_Neural_Cognitive_Relationships_Across_the_Brain_version-2020-11-11T02:30:59.429493.tar.gz\r\n- Synthetic_Animated_Mouse__SAM___University_of_British_Columbia__Datasets_and_3D_models_version-2021-04-09T14:12:32.548954.tar.gz:\r\n- Systematic_phenomics_analysis_of_autism-associated_genes_version-3.2.tar.gz\r\n- VFA_T1_mapping___RTHawk__open__vs_Siemens__commercial__version-v1.0.tar.gz\r\n\r\n\r\n\nAuto-archive script not running on crawled dataset?\nAll OSF or Zenodo datasets are not being archived when running the script `auto_archive.py` with option `--all`.\r\n\r\nThe following information systematically shows:\r\n```\r\nSKIPPED: archive not needed for projects/<dataset_name>\r\n```\r\n but the dataset size is definitely below 20GB.\r\n\r\nList of affected datasets:\r\n\r\n- CFMM_7T__MP2RAGE_T1_mapping_version-2020-10-02T12:55:03.086599.tar.gz\r\n- Comparing_Perturbation_Modes_for_Evaluating_Instabilities_in_Neuroimaging__Processed_NKI_RS_Subset__08_2019__version-None.tar.gz\r\n- Intracellular_Recordings_of_Murine_Neocortical_Neurons_version-2020-04-18T19:34:10.458118.tar.gz\r\n- Learning_Naturalistic_Structure__Processed_fMRI_dataset_version-0.1.0.tar.gz\r\n- MRI_and_unbiased_averages_of_wild_muskrats__Ondatra_zibethicus__and_red_squirrels__Tamiasciurus_hudsonicus__version-1.0.tar.gz:\r\n- Multi-model_functionalization_of_disease-associated_PTEN_missense_mutations_version-1.0.tar.gz\r\n- Quantifying_Neural_Cognitive_Relationships_Across_the_Brain_version-2020-11-11T02:30:59.429493.tar.gz\r\n- Synthetic_Animated_Mouse__SAM___University_of_British_Columbia__Datasets_and_3D_models_version-2021-04-09T14:12:32.548954.tar.gz:\r\n- Systematic_phenomics_analysis_of_autism-associated_genes_version-3.2.tar.gz\r\n- VFA_T1_mapping___RTHawk__open__vs_Siemens__commercial__version-v1.0.tar.gz\r\n\r\n\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport argparse\nimport json\nimport logging\nimport os\nfrom datetime import datetime\nfrom datetime import timedelta\n\nimport git\nimport humanfriendly\nfrom datalad.plugin import export_archive\nfrom github import Github\n\nfrom scripts.datalad_utils import get_dataset\nfrom scripts.datalad_utils import install_dataset\nfrom scripts.log import get_logger\nfrom tests.functions import get_proper_submodules\n\n\nlogger = get_logger(\n \"CONP-Archive\", filename=\"conp-archive.log\", file_level=logging.DEBUG\n)\n\n\nclass ArchiveFailed(Exception):\n pass\n\n\ndef parse_args():\n example_text = \"\"\"Example:\n PYTHONPATH=$PWD python scripts/auto_archive.py <out_dir>\n \"\"\"\n\n parser = argparse.ArgumentParser(\n description=\"Archiver for the CONP-datasets.\",\n epilog=example_text,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n\n parser.add_argument(\n \"--out_dir\", \"-o\", type=str, help=\"Path to store the archived datasets.\"\n )\n parser.add_argument(\n \"--max-size\",\n type=float,\n default=20.0,\n help=\"Maximum size of dataset to archive in GB.\",\n )\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\n \"--all\",\n action=\"store_true\",\n help=\"Archive all the datasets rather than those modified since the last time.\",\n )\n group.add_argument(\n \"--dataset\",\n \"-d\",\n type=str,\n nargs=\"+\",\n help=\"Restrict the archive to the specified dataset paths.\",\n )\n\n return parser.parse_args()\n\n\ndef get_all_datasets():\n return {os.path.basename(submodule.path) for submodule in git.Repo().submodules}\n\n\ndef get_modified_datasets(\n *,\n since: datetime | None = None,\n until: datetime | None = None,\n) -> set[str]:\n \"\"\"Retrieve the modified datasets.\n\n Requires to set GITHUB_ACCESS_TOKEN as an environment variable.\n\n Parameters\n ----------\n since : Optional[datetime], optional\n Start date from which commits are retrieved, by default date of the previous crawl, if never crawled set to\n one week ago.\n until : Optional[datetime], optional\n Latest date at which commit are retrieved, by default `now`\n\n Returns\n -------\n set[str]\n Path of the dataset folders.\n \"\"\"\n now = datetime.now().astimezone()\n\n if since is None:\n if os.path.exists(\".conp-archive\"):\n with open(\".conp-archive\") as fin:\n since = datetime.fromisoformat(fin.read())\n else:\n since = now - timedelta(weeks=1)\n\n if until is None:\n until = now\n\n try:\n gh_access_token = os.environ.get(\"GITHUB_ACCESS_TOKEN\", None)\n if gh_access_token is None:\n raise OSError(\"GITHUB_ACCESS_TOKEN is not defined.\")\n\n except OSError as e:\n # The program is not stopped since GitHub allows 60 query per hours with\n # authentication. However the program will most likely fail.\n logger.critical(e)\n\n logger.info(f\"Retrieving modified datasets since {since}\")\n repo = Github(gh_access_token).get_repo(\"CONP-PCNO/conp-dataset\")\n commits = repo.get_commits(since=since, until=until)\n\n with open(\".conp-archive\", \"w\") as fout:\n fout.write(now.isoformat())\n\n modified_datasets: set[str] = {\n os.path.basename(file_.filename)\n for commit in commits\n for file_ in commit.files\n if file_.filename.startswith(\"projects/\")\n }\n\n return modified_datasets\n\n\ndef archive_dataset(dataset_path: str, out_dir: str, version: str) -> None:\n os.makedirs(os.path.dirname(out_dir), mode=0o755, exist_ok=True)\n out_filename = f\"{out_dir}_version-{version}.tar.gz\"\n logger.info(f\"Archiving dataset: {dataset_path} to {out_filename}\")\n\n cwd = os.getcwd()\n try:\n datalad_archiver = export_archive.ExportArchive()\n dataset_repo = git.Repo(dataset_path)\n\n with open(os.path.join(dataset_path, \".git.log\"), \"w\") as fout:\n fout.write(dataset_repo.git.log(pretty=\"format:%H %s\"))\n\n # Export is performed from the dataset root.\n # This is to avoid failure when a submodule is not downloaded; e.g. for parent\n # dataset in dataset derivative.\n os.chdir(os.path.join(cwd, dataset_path))\n datalad_archiver(\".\", filename=out_filename)\n\n except Exception as e:\n raise ArchiveFailed(\n f\"FAILURE: could not archive dataset: {dataset_path} to {out_filename}\\n{e}\"\n )\n finally:\n os.chdir(cwd)\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n\n # Only archive the datasets available locally.\n datasets = get_all_datasets()\n if args.dataset:\n target_datasets = {os.path.basename(os.path.normpath(d)) for d in args.dataset}\n logger.warning(\n f\"The following dataset were not found locally: {target_datasets - datasets}\"\n )\n datasets &= target_datasets\n\n elif not args.all:\n modified_datasets = get_modified_datasets()\n logger.warning(\n f\"The following dataset were not found locally: {modified_datasets - datasets}\"\n )\n datasets &= modified_datasets\n\n for dataset_name in datasets:\n dataset = \"projects/\" + dataset_name\n\n try:\n logger.info(f\"Installing dataset: {dataset}\")\n install_dataset(dataset)\n\n is_public = False\n version = \"\"\n dataset_size = 0.0\n\n with open(os.path.join(dataset, \"DATS.json\")) as fin:\n metadata = json.load(fin)\n\n is_public = metadata.get(\"privacy\") == \"open\"\n version = metadata.get(\"version\")\n\n for distribution in metadata.get(\"distributions\", list()):\n dataset_size += humanfriendly.parse_size(\n f\"{distribution['size']} {distribution['unit']['value']}\",\n )\n dataset_size //= 1024 ** 3 # Convert to GB\n\n # Only archive public dataset less than 20GB\n if dataset_size <= args.max_size and is_public:\n logger.info(f\"Downloading dataset: {dataset}\")\n get_dataset(dataset)\n for submodule in get_proper_submodules(dataset):\n get_dataset(submodule)\n\n archive_dataset(\n dataset,\n out_dir=os.path.join(args.out_dir, dataset_name),\n version=version,\n )\n logger.info(f\"SUCCESS: archive created for {dataset}\")\n\n else:\n logger.info(f\"SKIPPED: archive not needed for {dataset}\")\n\n except Exception as e:\n # TODO implement notification system.\n # This will alert when a dataset fails the archiving process.\n logger.exception(\n f\"FAILURE: could not archive dataset: {dataset} to {args.out_dir}.tar.gz\\n{e}\"\n )\n\n logger.info(\"Done archiving the datasets.\")\n", "path": "scripts/auto_archive.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport argparse\nimport json\nimport logging\nimport os\nfrom datetime import datetime\nfrom datetime import timedelta\n\nimport git\nimport humanfriendly\nfrom datalad.plugin import export_archive\nfrom github import Github\n\nfrom scripts.datalad_utils import get_dataset\nfrom scripts.datalad_utils import install_dataset\nfrom scripts.log import get_logger\nfrom tests.functions import get_proper_submodules\n\n\nlogger = get_logger(\n \"CONP-Archive\", filename=\"conp-archive.log\", file_level=logging.DEBUG\n)\n\n\nclass ArchiveFailed(Exception):\n pass\n\n\ndef parse_args():\n example_text = \"\"\"Example:\n PYTHONPATH=$PWD python scripts/auto_archive.py <out_dir>\n \"\"\"\n\n parser = argparse.ArgumentParser(\n description=\"Archiver for the CONP-datasets.\",\n epilog=example_text,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n\n parser.add_argument(\n \"--out_dir\", \"-o\", type=str, help=\"Path to store the archived datasets.\"\n )\n parser.add_argument(\n \"--max-size\",\n type=float,\n default=20.0,\n help=\"Maximum size of dataset to archive in GB.\",\n )\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\n \"--all\",\n action=\"store_true\",\n help=\"Archive all the datasets rather than those modified since the last time.\",\n )\n group.add_argument(\n \"--dataset\",\n \"-d\",\n type=str,\n nargs=\"+\",\n help=\"Restrict the archive to the specified dataset paths.\",\n )\n\n return parser.parse_args()\n\n\ndef get_datasets_path():\n return {\n os.path.basename(submodule.path): submodule.path\n for submodule in git.Repo().submodules\n }\n\n\ndef get_modified_datasets(\n *,\n since: datetime | None = None,\n until: datetime | None = None,\n) -> set[str]:\n \"\"\"Retrieve the modified datasets.\n\n Requires to set GITHUB_ACCESS_TOKEN as an environment variable.\n\n Parameters\n ----------\n since : Optional[datetime], optional\n Start date from which commits are retrieved, by default date of the previous crawl, if never crawled set to\n one week ago.\n until : Optional[datetime], optional\n Latest date at which commit are retrieved, by default `now`\n\n Returns\n -------\n set[str]\n Path of the dataset folders.\n \"\"\"\n now = datetime.now().astimezone()\n\n if since is None:\n if os.path.exists(\".conp-archive\"):\n with open(\".conp-archive\") as fin:\n since = datetime.fromisoformat(fin.read())\n else:\n since = now - timedelta(weeks=1)\n\n if until is None:\n until = now\n\n try:\n gh_access_token = os.environ.get(\"GITHUB_ACCESS_TOKEN\", None)\n if gh_access_token is None:\n raise OSError(\"GITHUB_ACCESS_TOKEN is not defined.\")\n\n except OSError as e:\n # The program is not stopped since GitHub allows 60 query per hours with\n # authentication. However the program will most likely fail.\n logger.critical(e)\n\n logger.info(f\"Retrieving modified datasets since {since}\")\n repo = Github(gh_access_token).get_repo(\"CONP-PCNO/conp-dataset\")\n commits = repo.get_commits(since=since, until=until)\n\n with open(\".conp-archive\", \"w\") as fout:\n fout.write(now.isoformat())\n\n modified_datasets: set[str] = {\n os.path.basename(file_.filename)\n for commit in commits\n for file_ in commit.files\n if file_.filename.startswith(\"projects/\")\n }\n\n return modified_datasets\n\n\ndef archive_dataset(\n dataset_path: str, out_dir: str, archive_name: str, version: str\n) -> None:\n os.makedirs(os.path.dirname(out_dir), mode=0o755, exist_ok=True)\n out_filename = f\"{archive_name}_version-{version}.tar.gz\"\n logger.info(f\"Archiving dataset: {dataset_path} to {out_filename}\")\n\n cwd = os.getcwd()\n try:\n datalad_archiver = export_archive.ExportArchive()\n dataset_repo = git.Repo(dataset_path)\n\n with open(os.path.join(dataset_path, \".git.log\"), \"w\") as fout:\n fout.write(dataset_repo.git.log(pretty=\"format:%H %s\"))\n\n # Export is performed from the dataset root.\n # This is to avoid failure when a submodule is not downloaded; e.g. for parent\n # dataset in dataset derivative.\n os.chdir(os.path.join(cwd, dataset_path))\n datalad_archiver(\".\", filename=out_filename)\n\n except Exception as e:\n raise ArchiveFailed(\n f\"FAILURE: could not archive dataset: {dataset_path} to {out_filename}\\n{e}\"\n )\n finally:\n os.chdir(cwd)\n\n\nif __name__ == \"__main__\":\n args = parse_args()\n\n # Only archive the datasets available locally.\n datasets_path = get_datasets_path()\n datasets = datasets_path.keys()\n if args.dataset:\n target_datasets = {os.path.basename(os.path.normpath(d)) for d in args.dataset}\n logger.warning(\n f\"The following dataset were not found locally: {target_datasets - datasets}\"\n )\n datasets &= target_datasets\n\n elif not args.all:\n modified_datasets = get_modified_datasets()\n logger.warning(\n f\"The following dataset were not found locally: {modified_datasets - datasets}\"\n )\n datasets &= modified_datasets\n\n for dataset_name in datasets:\n dataset = datasets_path[dataset_name]\n\n try:\n logger.info(f\"Installing dataset: {dataset}\")\n install_dataset(dataset)\n\n is_public = False\n version = \"\"\n dataset_size = 0.0\n\n with open(os.path.join(dataset, \"DATS.json\")) as fin:\n metadata = json.load(fin)\n\n is_public = (\n metadata.get(\"distributions\", [{}])[0]\n .get(\"access\", {})\n .get(\"authorizations\", [{}])[0]\n .get(\"value\")\n == \"public\"\n )\n version = metadata.get(\"version\")\n\n for distribution in metadata.get(\"distributions\", list()):\n dataset_size += humanfriendly.parse_size(\n f\"{distribution['size']} {distribution['unit']['value']}\",\n )\n dataset_size //= 1024 ** 3 # Convert to GB\n\n # Only archive public dataset less than 20GB\n if dataset_size <= args.max_size and is_public:\n logger.info(f\"Downloading dataset: {dataset}\")\n get_dataset(dataset)\n for submodule in get_proper_submodules(dataset):\n get_dataset(submodule)\n\n archive_name = \"__\".join(\n os.path.relpath(dataset, \"projects\").split(\"/\")\n )\n archive_dataset(\n dataset,\n out_dir=os.path.join(args.out_dir, dataset_name),\n archive_name=archive_name,\n version=version,\n )\n logger.info(f\"SUCCESS: archive created for {dataset}\")\n\n else:\n logger.info(f\"SKIPPED: archive not needed for {dataset}\")\n\n except Exception as e:\n # TODO implement notification system.\n # This will alert when a dataset fails the archiving process.\n logger.exception(\n f\"FAILURE: could not archive dataset: {dataset} to {args.out_dir}.tar.gz\\n{e}\"\n )\n\n logger.info(\"Done archiving the datasets.\")\n", "path": "scripts/auto_archive.py"}]}
3,260
651
gh_patches_debug_9987
rasdani/github-patches
git_diff
cltk__cltk-906
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Fix broken NER doctest Open to anyone. This simple NER doctest: https://github.com/cltk/cltk/blob/9b9cdb42dcc1c707ab3db3ef8214837bb7c262b5/cltk/tag/ner.py#L78 is all of a sudden failing (example: https://travis-ci.org/cltk/cltk/jobs/525125856#L1935 ). The test expects 4 padded spaces on the left. I have no idea why this would break all of a sudden. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cltk/tag/ner.py` Content: ``` 1 """Named entity recognition (NER).""" 2 3 from cltk.corpus.utils.importer import CorpusImporter 4 from nltk.tokenize.punkt import PunktLanguageVars 5 from cltk.tokenize.word import WordTokenizer 6 import os 7 import importlib.machinery 8 9 __author__ = ['Natasha Voake <[email protected]>'] 10 __license__ = 'MIT License. See LICENSE.' 11 12 NER_DICT = {'greek': '~/cltk_data/greek/model/greek_models_cltk/ner/proper_names.txt', 13 'latin': '~/cltk_data/latin/model/latin_models_cltk/ner/proper_names.txt'} 14 15 16 class NamedEntityReplacer(object): 17 18 def __init__(self): 19 20 self.entities = self._load_necessary_data() 21 22 23 def _load_necessary_data(self): 24 rel_path = os.path.join('~', 'cltk_data', 25 'french', 26 'text', 'french_data_cltk', 27 'named_entities_fr.py') 28 path = os.path.expanduser(rel_path) 29 # logger.info('Loading entries. This may take a minute.') 30 loader = importlib.machinery.SourceFileLoader('entities', path) 31 module = loader.load_module() 32 entities = module.entities 33 return entities 34 35 """tags named entities in a string and outputs a list of tuples in the following format: 36 (name, "entity", kind_of_entity)""" 37 38 def tag_ner_fr(self, input_text, output_type=list): 39 40 entities = self.entities 41 42 for entity in entities: 43 (name, kind) = entity 44 45 word_tokenizer = WordTokenizer('french') 46 tokenized_text = word_tokenizer.tokenize(input_text) 47 ner_tuple_list = [] 48 49 match = False 50 for word in tokenized_text: 51 for name, kind in entities: 52 if word == name: 53 named_things = ([(name, 'entity', kind)]) 54 ner_tuple_list.append(named_things) 55 match = True 56 break 57 else: 58 ner_tuple_list.append((word,)) 59 return ner_tuple_list 60 61 62 def _check_latest_data(lang): 63 """Check for presence of proper names dir, clone if not.""" 64 65 assert lang in NER_DICT.keys(), \ 66 'Invalid language. Choose from: {}'.format(', '.join(NER_DICT.keys())) 67 68 ner_file_path = os.path.expanduser(NER_DICT[lang]) 69 70 if not os.path.isfile(ner_file_path): 71 corpus_importer = CorpusImporter(lang) 72 corpus_importer.import_corpus('{}_models_cltk'.format(lang)) 73 74 75 def tag_ner(lang, input_text, output_type=list): 76 """Run NER for chosen language. 77 Choosing output_type=list, returns a list of tuples: 78 79 >>> tag_ner('latin', input_text='ut Venus, ut Sirius, ut Spica', output_type=list) 80 [('ut',), ('Venus',), (',',), ('ut',), ('Sirius', 'Entity'), (',',), ('ut',), ('Spica', 'Entity')] 81 """ 82 83 _check_latest_data(lang) 84 85 assert lang in NER_DICT.keys(), \ 86 'Invalid language. Choose from: {}'.format(', '.join(NER_DICT.keys())) 87 types = [str, list] 88 assert type(input_text) in types, 'Input must be: {}.'.format(', '.join(types)) 89 assert output_type in types, 'Output must be a {}.'.format(', '.join(types)) 90 91 if type(input_text) == str: 92 punkt = PunktLanguageVars() 93 tokens = punkt.word_tokenize(input_text) 94 new_tokens = [] 95 for word in tokens: 96 if word.endswith('.'): 97 new_tokens.append(word[:-1]) 98 new_tokens.append('.') 99 else: 100 new_tokens.append(word) 101 input_text = new_tokens 102 103 ner_file_path = os.path.expanduser(NER_DICT[lang]) 104 with open(ner_file_path) as file_open: 105 ner_str = file_open.read() 106 ner_list = ner_str.split('\n') 107 108 ner_tuple_list = [] 109 for count, word_token in enumerate(input_text): 110 match = False 111 for ner_word in ner_list: 112 # the replacer slows things down, but is necessary 113 if word_token == ner_word: 114 ner_tuple = (word_token, 'Entity') 115 ner_tuple_list.append(ner_tuple) 116 match = True 117 break 118 if not match: 119 ner_tuple_list.append((word_token,)) 120 121 if output_type is str: 122 string = '' 123 for tup in ner_tuple_list: 124 start_space = ' ' 125 final_space = '' 126 # this is some mediocre string reconstitution 127 # maybe not worth the effort 128 if tup[0] in [',', '.', ';', ':', '?', '!']: 129 start_space = '' 130 if len(tup) == 2: 131 string += start_space + tup[0] + '/' + tup[1] + final_space 132 else: 133 string += start_space + tup[0] + final_space 134 return string 135 136 return ner_tuple_list 137 138 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cltk/tag/ner.py b/cltk/tag/ner.py --- a/cltk/tag/ner.py +++ b/cltk/tag/ner.py @@ -74,10 +74,6 @@ def tag_ner(lang, input_text, output_type=list): """Run NER for chosen language. - Choosing output_type=list, returns a list of tuples: - - >>> tag_ner('latin', input_text='ut Venus, ut Sirius, ut Spica', output_type=list) - [('ut',), ('Venus',), (',',), ('ut',), ('Sirius', 'Entity'), (',',), ('ut',), ('Spica', 'Entity')] """ _check_latest_data(lang) @@ -134,4 +130,3 @@ return string return ner_tuple_list -
{"golden_diff": "diff --git a/cltk/tag/ner.py b/cltk/tag/ner.py\n--- a/cltk/tag/ner.py\n+++ b/cltk/tag/ner.py\n@@ -74,10 +74,6 @@\n \n def tag_ner(lang, input_text, output_type=list):\n \"\"\"Run NER for chosen language.\n- Choosing output_type=list, returns a list of tuples:\n- \n- >>> tag_ner('latin', input_text='ut Venus, ut Sirius, ut Spica', output_type=list)\n- [('ut',), ('Venus',), (',',), ('ut',), ('Sirius', 'Entity'), (',',), ('ut',), ('Spica', 'Entity')]\n \"\"\"\n \n _check_latest_data(lang)\n@@ -134,4 +130,3 @@\n return string\n \n return ner_tuple_list\n-\n", "issue": "Fix broken NER doctest\nOpen to anyone.\r\n\r\nThis simple NER doctest: https://github.com/cltk/cltk/blob/9b9cdb42dcc1c707ab3db3ef8214837bb7c262b5/cltk/tag/ner.py#L78\r\n\r\nis all of a sudden failing (example: https://travis-ci.org/cltk/cltk/jobs/525125856#L1935 ).\r\n\r\nThe test expects 4 padded spaces on the left.\r\n\r\nI have no idea why this would break all of a sudden.\r\n\r\n\n", "before_files": [{"content": "\"\"\"Named entity recognition (NER).\"\"\"\n\nfrom cltk.corpus.utils.importer import CorpusImporter\nfrom nltk.tokenize.punkt import PunktLanguageVars\nfrom cltk.tokenize.word import WordTokenizer\nimport os\nimport importlib.machinery\n\n__author__ = ['Natasha Voake <[email protected]>']\n__license__ = 'MIT License. See LICENSE.'\n\nNER_DICT = {'greek': '~/cltk_data/greek/model/greek_models_cltk/ner/proper_names.txt',\n 'latin': '~/cltk_data/latin/model/latin_models_cltk/ner/proper_names.txt'}\n\n\nclass NamedEntityReplacer(object):\n\n def __init__(self):\n\n self.entities = self._load_necessary_data()\n\n\n def _load_necessary_data(self):\n rel_path = os.path.join('~', 'cltk_data',\n 'french',\n 'text', 'french_data_cltk',\n 'named_entities_fr.py')\n path = os.path.expanduser(rel_path)\n # logger.info('Loading entries. This may take a minute.')\n loader = importlib.machinery.SourceFileLoader('entities', path)\n module = loader.load_module()\n entities = module.entities\n return entities\n\n \"\"\"tags named entities in a string and outputs a list of tuples in the following format:\n (name, \"entity\", kind_of_entity)\"\"\"\n\n def tag_ner_fr(self, input_text, output_type=list):\n\n entities = self.entities\n\n for entity in entities:\n (name, kind) = entity\n\n word_tokenizer = WordTokenizer('french')\n tokenized_text = word_tokenizer.tokenize(input_text)\n ner_tuple_list = []\n\n match = False\n for word in tokenized_text:\n for name, kind in entities:\n if word == name:\n named_things = ([(name, 'entity', kind)])\n ner_tuple_list.append(named_things)\n match = True\n break\n else:\n ner_tuple_list.append((word,))\n return ner_tuple_list\n\n\ndef _check_latest_data(lang):\n \"\"\"Check for presence of proper names dir, clone if not.\"\"\"\n\n assert lang in NER_DICT.keys(), \\\n 'Invalid language. Choose from: {}'.format(', '.join(NER_DICT.keys()))\n\n ner_file_path = os.path.expanduser(NER_DICT[lang])\n\n if not os.path.isfile(ner_file_path):\n corpus_importer = CorpusImporter(lang)\n corpus_importer.import_corpus('{}_models_cltk'.format(lang))\n\n\ndef tag_ner(lang, input_text, output_type=list):\n \"\"\"Run NER for chosen language.\n Choosing output_type=list, returns a list of tuples:\n \n >>> tag_ner('latin', input_text='ut Venus, ut Sirius, ut Spica', output_type=list)\n [('ut',), ('Venus',), (',',), ('ut',), ('Sirius', 'Entity'), (',',), ('ut',), ('Spica', 'Entity')]\n \"\"\"\n\n _check_latest_data(lang)\n\n assert lang in NER_DICT.keys(), \\\n 'Invalid language. Choose from: {}'.format(', '.join(NER_DICT.keys()))\n types = [str, list]\n assert type(input_text) in types, 'Input must be: {}.'.format(', '.join(types))\n assert output_type in types, 'Output must be a {}.'.format(', '.join(types))\n\n if type(input_text) == str:\n punkt = PunktLanguageVars()\n tokens = punkt.word_tokenize(input_text)\n new_tokens = []\n for word in tokens:\n if word.endswith('.'):\n new_tokens.append(word[:-1])\n new_tokens.append('.')\n else:\n new_tokens.append(word)\n input_text = new_tokens\n\n ner_file_path = os.path.expanduser(NER_DICT[lang])\n with open(ner_file_path) as file_open:\n ner_str = file_open.read()\n ner_list = ner_str.split('\\n')\n\n ner_tuple_list = []\n for count, word_token in enumerate(input_text):\n match = False\n for ner_word in ner_list:\n # the replacer slows things down, but is necessary\n if word_token == ner_word:\n ner_tuple = (word_token, 'Entity')\n ner_tuple_list.append(ner_tuple)\n match = True\n break\n if not match:\n ner_tuple_list.append((word_token,))\n\n if output_type is str:\n string = ''\n for tup in ner_tuple_list:\n start_space = ' '\n final_space = ''\n # this is some mediocre string reconstitution\n # maybe not worth the effort\n if tup[0] in [',', '.', ';', ':', '?', '!']:\n start_space = ''\n if len(tup) == 2:\n string += start_space + tup[0] + '/' + tup[1] + final_space\n else:\n string += start_space + tup[0] + final_space\n return string\n\n return ner_tuple_list\n\n", "path": "cltk/tag/ner.py"}], "after_files": [{"content": "\"\"\"Named entity recognition (NER).\"\"\"\n\nfrom cltk.corpus.utils.importer import CorpusImporter\nfrom nltk.tokenize.punkt import PunktLanguageVars\nfrom cltk.tokenize.word import WordTokenizer\nimport os\nimport importlib.machinery\n\n__author__ = ['Natasha Voake <[email protected]>']\n__license__ = 'MIT License. See LICENSE.'\n\nNER_DICT = {'greek': '~/cltk_data/greek/model/greek_models_cltk/ner/proper_names.txt',\n 'latin': '~/cltk_data/latin/model/latin_models_cltk/ner/proper_names.txt'}\n\n\nclass NamedEntityReplacer(object):\n\n def __init__(self):\n\n self.entities = self._load_necessary_data()\n\n\n def _load_necessary_data(self):\n rel_path = os.path.join('~', 'cltk_data',\n 'french',\n 'text', 'french_data_cltk',\n 'named_entities_fr.py')\n path = os.path.expanduser(rel_path)\n # logger.info('Loading entries. This may take a minute.')\n loader = importlib.machinery.SourceFileLoader('entities', path)\n module = loader.load_module()\n entities = module.entities\n return entities\n\n \"\"\"tags named entities in a string and outputs a list of tuples in the following format:\n (name, \"entity\", kind_of_entity)\"\"\"\n\n def tag_ner_fr(self, input_text, output_type=list):\n\n entities = self.entities\n\n for entity in entities:\n (name, kind) = entity\n\n word_tokenizer = WordTokenizer('french')\n tokenized_text = word_tokenizer.tokenize(input_text)\n ner_tuple_list = []\n\n match = False\n for word in tokenized_text:\n for name, kind in entities:\n if word == name:\n named_things = ([(name, 'entity', kind)])\n ner_tuple_list.append(named_things)\n match = True\n break\n else:\n ner_tuple_list.append((word,))\n return ner_tuple_list\n\n\ndef _check_latest_data(lang):\n \"\"\"Check for presence of proper names dir, clone if not.\"\"\"\n\n assert lang in NER_DICT.keys(), \\\n 'Invalid language. Choose from: {}'.format(', '.join(NER_DICT.keys()))\n\n ner_file_path = os.path.expanduser(NER_DICT[lang])\n\n if not os.path.isfile(ner_file_path):\n corpus_importer = CorpusImporter(lang)\n corpus_importer.import_corpus('{}_models_cltk'.format(lang))\n\n\ndef tag_ner(lang, input_text, output_type=list):\n \"\"\"Run NER for chosen language.\n \"\"\"\n\n _check_latest_data(lang)\n\n assert lang in NER_DICT.keys(), \\\n 'Invalid language. Choose from: {}'.format(', '.join(NER_DICT.keys()))\n types = [str, list]\n assert type(input_text) in types, 'Input must be: {}.'.format(', '.join(types))\n assert output_type in types, 'Output must be a {}.'.format(', '.join(types))\n\n if type(input_text) == str:\n punkt = PunktLanguageVars()\n tokens = punkt.word_tokenize(input_text)\n new_tokens = []\n for word in tokens:\n if word.endswith('.'):\n new_tokens.append(word[:-1])\n new_tokens.append('.')\n else:\n new_tokens.append(word)\n input_text = new_tokens\n\n ner_file_path = os.path.expanduser(NER_DICT[lang])\n with open(ner_file_path) as file_open:\n ner_str = file_open.read()\n ner_list = ner_str.split('\\n')\n\n ner_tuple_list = []\n for count, word_token in enumerate(input_text):\n match = False\n for ner_word in ner_list:\n # the replacer slows things down, but is necessary\n if word_token == ner_word:\n ner_tuple = (word_token, 'Entity')\n ner_tuple_list.append(ner_tuple)\n match = True\n break\n if not match:\n ner_tuple_list.append((word_token,))\n\n if output_type is str:\n string = ''\n for tup in ner_tuple_list:\n start_space = ' '\n final_space = ''\n # this is some mediocre string reconstitution\n # maybe not worth the effort\n if tup[0] in [',', '.', ';', ':', '?', '!']:\n start_space = ''\n if len(tup) == 2:\n string += start_space + tup[0] + '/' + tup[1] + final_space\n else:\n string += start_space + tup[0] + final_space\n return string\n\n return ner_tuple_list\n", "path": "cltk/tag/ner.py"}]}
1,786
189
gh_patches_debug_10497
rasdani/github-patches
git_diff
lhotse-speech__lhotse-138
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Bug: broken import from augmentations Hi I installed the latest pip version of lhotse and I am getting an import error when using the lhotse CLI: Setup: ``` python3.7.0 lhotse==0.2.0 ``` To reproduce, try either from the following lines: ``` lhotse convert-kaldi <data-dir> 16000 <other-data-dir> python -c "from lhotse.augmentation import available_wav_augmentations" ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lhotse/augmentation/__init__.py` Content: ``` 1 from .common import AugmentFn 2 from .torchaudio import * 3 from .wavaugment import WavAugmenter, is_wav_augment_available 4 ``` Path: `setup.py` Content: ``` 1 # coding=utf-8 2 import os 3 from pathlib import Path 4 5 from setuptools import find_packages, setup 6 7 project_root = Path(__file__).parent 8 9 install_requires = (project_root / 'requirements.txt').read_text().splitlines() 10 docs_require = (project_root / 'docs' / 'requirements.txt').read_text().splitlines() 11 tests_require = ['pytest==5.4.3', 'flake8==3.8.3', 'coverage==5.1', 'hypothesis==5.41.2'] 12 dev_requires = docs_require + tests_require + ['jupyterlab', 'matplotlib', 'isort'] 13 14 if os.environ.get('READTHEDOCS', False): 15 # When building documentation, omit torchaudio installation and mock it instead. 16 # This works around the inability to install libsoundfile1 in read-the-docs env, 17 # which caused the documentation builds to silently crash. 18 install_requires = [req for req in install_requires if not req.startswith('torchaudio')] 19 20 setup( 21 name='lhotse', 22 version='0.2.0', 23 python_requires='>=3.7.0', 24 description='Data preparation for speech processing models training.', 25 author='The Lhotse Development Team', 26 author_email="[email protected]", 27 long_description=(project_root / 'README.md').read_text(), 28 long_description_content_type="text/markdown", 29 license='Apache-2.0 License', 30 packages=find_packages(), 31 # The line below makes every script in the list an executable that's inserted in PATH 32 # as long as the virtualenv/conda env is active; they can be used like any other shell program 33 scripts=['lhotse/bin/lhotse'], 34 install_requires=install_requires, 35 extras_require={ 36 'docs': docs_require, 37 'tests': tests_require, 38 'dev': docs_require + tests_require 39 }, 40 classifiers=[ 41 "Development Status :: 3 - Alpha", 42 "Programming Language :: Python :: 3.7", 43 "Programming Language :: Python :: 3.8", 44 "Intended Audience :: Science/Research", 45 "Operating System :: POSIX :: Linux", 46 "Operating System :: MacOS :: MacOS X", 47 "License :: OSI Approved :: Apache Software License", 48 "Topic :: Multimedia :: Sound/Audio :: Speech", 49 "Topic :: Scientific/Engineering :: Artificial Intelligence", 50 "Topic :: Software Development :: Libraries :: Python Modules", 51 "Typing :: Typed" 52 ], 53 ) 54 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lhotse/augmentation/__init__.py b/lhotse/augmentation/__init__.py --- a/lhotse/augmentation/__init__.py +++ b/lhotse/augmentation/__init__.py @@ -1,3 +1,3 @@ from .common import AugmentFn from .torchaudio import * -from .wavaugment import WavAugmenter, is_wav_augment_available +from .wavaugment import WavAugmenter, is_wav_augment_available, available_wav_augmentations diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -19,7 +19,7 @@ setup( name='lhotse', - version='0.2.0', + version='0.2.1', python_requires='>=3.7.0', description='Data preparation for speech processing models training.', author='The Lhotse Development Team',
{"golden_diff": "diff --git a/lhotse/augmentation/__init__.py b/lhotse/augmentation/__init__.py\n--- a/lhotse/augmentation/__init__.py\n+++ b/lhotse/augmentation/__init__.py\n@@ -1,3 +1,3 @@\n from .common import AugmentFn\n from .torchaudio import *\n-from .wavaugment import WavAugmenter, is_wav_augment_available\n+from .wavaugment import WavAugmenter, is_wav_augment_available, available_wav_augmentations\ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -19,7 +19,7 @@\n \n setup(\n name='lhotse',\n- version='0.2.0',\n+ version='0.2.1',\n python_requires='>=3.7.0',\n description='Data preparation for speech processing models training.',\n author='The Lhotse Development Team',\n", "issue": "Bug: broken import from augmentations\nHi I installed the latest pip version of lhotse and I am getting an import error when using the lhotse CLI:\r\n\r\nSetup:\r\n```\r\npython3.7.0 \r\nlhotse==0.2.0\r\n```\r\n\r\nTo reproduce, try either from the following lines:\r\n```\r\nlhotse convert-kaldi <data-dir> 16000 <other-data-dir>\r\npython -c \"from lhotse.augmentation import available_wav_augmentations\"\r\n```\n", "before_files": [{"content": "from .common import AugmentFn\nfrom .torchaudio import *\nfrom .wavaugment import WavAugmenter, is_wav_augment_available\n", "path": "lhotse/augmentation/__init__.py"}, {"content": "# coding=utf-8\nimport os\nfrom pathlib import Path\n\nfrom setuptools import find_packages, setup\n\nproject_root = Path(__file__).parent\n\ninstall_requires = (project_root / 'requirements.txt').read_text().splitlines()\ndocs_require = (project_root / 'docs' / 'requirements.txt').read_text().splitlines()\ntests_require = ['pytest==5.4.3', 'flake8==3.8.3', 'coverage==5.1', 'hypothesis==5.41.2']\ndev_requires = docs_require + tests_require + ['jupyterlab', 'matplotlib', 'isort']\n\nif os.environ.get('READTHEDOCS', False):\n # When building documentation, omit torchaudio installation and mock it instead.\n # This works around the inability to install libsoundfile1 in read-the-docs env,\n # which caused the documentation builds to silently crash.\n install_requires = [req for req in install_requires if not req.startswith('torchaudio')]\n\nsetup(\n name='lhotse',\n version='0.2.0',\n python_requires='>=3.7.0',\n description='Data preparation for speech processing models training.',\n author='The Lhotse Development Team',\n author_email=\"[email protected]\",\n long_description=(project_root / 'README.md').read_text(),\n long_description_content_type=\"text/markdown\",\n license='Apache-2.0 License',\n packages=find_packages(),\n # The line below makes every script in the list an executable that's inserted in PATH\n # as long as the virtualenv/conda env is active; they can be used like any other shell program\n scripts=['lhotse/bin/lhotse'],\n install_requires=install_requires,\n extras_require={\n 'docs': docs_require,\n 'tests': tests_require,\n 'dev': docs_require + tests_require\n },\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Intended Audience :: Science/Research\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: MacOS :: MacOS X\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Multimedia :: Sound/Audio :: Speech\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Typing :: Typed\"\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "from .common import AugmentFn\nfrom .torchaudio import *\nfrom .wavaugment import WavAugmenter, is_wav_augment_available, available_wav_augmentations\n", "path": "lhotse/augmentation/__init__.py"}, {"content": "# coding=utf-8\nimport os\nfrom pathlib import Path\n\nfrom setuptools import find_packages, setup\n\nproject_root = Path(__file__).parent\n\ninstall_requires = (project_root / 'requirements.txt').read_text().splitlines()\ndocs_require = (project_root / 'docs' / 'requirements.txt').read_text().splitlines()\ntests_require = ['pytest==5.4.3', 'flake8==3.8.3', 'coverage==5.1', 'hypothesis==5.41.2']\ndev_requires = docs_require + tests_require + ['jupyterlab', 'matplotlib', 'isort']\n\nif os.environ.get('READTHEDOCS', False):\n # When building documentation, omit torchaudio installation and mock it instead.\n # This works around the inability to install libsoundfile1 in read-the-docs env,\n # which caused the documentation builds to silently crash.\n install_requires = [req for req in install_requires if not req.startswith('torchaudio')]\n\nsetup(\n name='lhotse',\n version='0.2.1',\n python_requires='>=3.7.0',\n description='Data preparation for speech processing models training.',\n author='The Lhotse Development Team',\n author_email=\"[email protected]\",\n long_description=(project_root / 'README.md').read_text(),\n long_description_content_type=\"text/markdown\",\n license='Apache-2.0 License',\n packages=find_packages(),\n # The line below makes every script in the list an executable that's inserted in PATH\n # as long as the virtualenv/conda env is active; they can be used like any other shell program\n scripts=['lhotse/bin/lhotse'],\n install_requires=install_requires,\n extras_require={\n 'docs': docs_require,\n 'tests': tests_require,\n 'dev': docs_require + tests_require\n },\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Intended Audience :: Science/Research\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: MacOS :: MacOS X\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Topic :: Multimedia :: Sound/Audio :: Speech\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n \"Typing :: Typed\"\n ],\n)\n", "path": "setup.py"}]}
1,057
219
gh_patches_debug_15590
rasdani/github-patches
git_diff
plone__Products.CMFPlone-3688
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Update package metadata in PyPi --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 from setuptools import find_packages 2 from setuptools import setup 3 4 5 version = '6.0.0rc2.dev0' 6 7 8 setup( 9 name='Products.CMFPlone', 10 version=version, 11 description="The Plone Content Management System (core)", 12 long_description=open("README.rst").read() + "\n" + 13 open("CHANGES.rst").read(), 14 classifiers=[ 15 "Development Status :: 5 - Production/Stable", 16 "Environment :: Web Environment", 17 "Framework :: Plone", 18 "Framework :: Plone :: 6.0", 19 "Framework :: Plone :: Core", 20 "Framework :: Zope :: 5", 21 "License :: OSI Approved :: GNU General Public License v2 (GPLv2)", 22 "Operating System :: OS Independent", 23 "Programming Language :: Python", 24 "Programming Language :: Python :: 3.8", 25 "Programming Language :: Python :: 3.9", 26 "Programming Language :: Python :: 3.10", 27 "Programming Language :: Python :: 3.11", 28 ], 29 python_requires='>=3.8', 30 keywords='Plone CMF Python Zope CMS Webapplication', 31 author='Plone Foundation', 32 author_email='[email protected]', 33 url='https://plone.org', 34 license='GPL version 2', 35 packages=find_packages(), 36 namespace_packages=['Products'], 37 include_package_data=True, 38 zip_safe=False, 39 install_requires=[ 40 'borg.localrole', 41 'five.customerize', 42 'lxml', 43 'plone.api >= 1.4.4', 44 'plone.app.content', 45 'plone.app.contentlisting', 46 'plone.app.contentmenu >= 2.0.1', 47 'plone.app.contentrules', 48 'plone.app.contenttypes', 49 'plone.app.customerize', 50 'plone.app.dexterity', 51 'plone.app.discussion', 52 'plone.app.i18n', 53 'plone.app.layout >= 2.5.15', 54 'plone.app.linkintegrity >=1.0.3', 55 'plone.app.locales', 56 'plone.app.multilingual', 57 'plone.app.portlets', 58 'plone.app.redirector', 59 'plone.app.registry', 60 'plone.app.theming', 61 'plone.app.users', 62 'plone.app.uuid', 63 'plone.app.viewletmanager', 64 'plone.app.vocabularies', 65 'plone.app.workflow', 66 'plone.base', 67 'plone.browserlayer >= 2.1.5', 68 'plone.contentrules', 69 'plone.folder', 70 'plone.i18n >= 4.0.5', 71 'plone.indexer', 72 'plone.intelligenttext', 73 'plone.locking', 74 'plone.memoize', 75 'plone.outputfilters', 76 'plone.portlet.collection', 77 'plone.portlet.static', 78 'plone.portlets', 79 'plone.protect >= 3.0.0', 80 'plone.resource', 81 'plone.schema', 82 'plone.session', 83 'plone.staticresources', 84 'plone.theme', 85 'plonetheme.barceloneta', 86 'Products.CMFEditions', 87 'Products.DCWorkflow', 88 'Products.ExtendedPathIndex', 89 'Products.isurlinportal', 90 'Products.MimetypesRegistry', 91 'Products.PlonePAS', 92 'Products.PortalTransforms', 93 'Products.SiteErrorLog', 94 'Products.statusmessages', 95 'setuptools>=36.2', 96 'plone.autoinclude', 97 'webresource>=1.1', 98 'Zope[wsgi] >= 5.0', 99 'zope.app.locales >= 3.6.0', 100 'zope.cachedescriptors', 101 'zope.deferredimport', 102 'zope.deprecation', 103 'zope.dottedname', 104 'zope.i18n', 105 'zope.i18nmessageid', 106 'zope.structuredtext', 107 ], 108 extras_require={ 109 'test': [ 110 'lxml', 111 'mock', 112 'plone.app.robotframework>=1.0', 113 'robotframework-debuglibrary', 114 'plone.app.testing', 115 'zope.globalrequest', 116 'zope.testing', 117 'gunicorn', 118 ] 119 }, 120 ) 121 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -32,6 +32,19 @@ author_email='[email protected]', url='https://plone.org', license='GPL version 2', + project_urls={ + "Homepage": "https://plone.org", + "Documentation": "https://docs.plone.org", + "PyPI": "https://pypi.python.org/pypi/Products.CMFPlone", + "Source": "https://github.com/plone/Products.CMFPlone", + "Issues": "https://github.com/plone/plone.org/Products.CMFPlone", + "Forum": "https://community.plone.org/", + "Chat": "https://discord.gg/zFY3EBbjaj", + "Mastodon": "https://plone.social/@plone", + "Twitter": "https://twitter.com/plone", + "Videos": "https://youtube.com/@plonecms", + "Sponsor": "https://github.com/sponsors/plone", + }, packages=find_packages(), namespace_packages=['Products'], include_package_data=True,
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -32,6 +32,19 @@\n author_email='[email protected]',\n url='https://plone.org',\n license='GPL version 2',\n+ project_urls={\n+ \"Homepage\": \"https://plone.org\",\n+ \"Documentation\": \"https://docs.plone.org\",\n+ \"PyPI\": \"https://pypi.python.org/pypi/Products.CMFPlone\",\n+ \"Source\": \"https://github.com/plone/Products.CMFPlone\",\n+ \"Issues\": \"https://github.com/plone/plone.org/Products.CMFPlone\",\n+ \"Forum\": \"https://community.plone.org/\",\n+ \"Chat\": \"https://discord.gg/zFY3EBbjaj\",\n+ \"Mastodon\": \"https://plone.social/@plone\",\n+ \"Twitter\": \"https://twitter.com/plone\",\n+ \"Videos\": \"https://youtube.com/@plonecms\",\n+ \"Sponsor\": \"https://github.com/sponsors/plone\",\n+ },\n packages=find_packages(),\n namespace_packages=['Products'],\n include_package_data=True,\n", "issue": "Update package metadata in PyPi\n\n", "before_files": [{"content": "from setuptools import find_packages\nfrom setuptools import setup\n\n\nversion = '6.0.0rc2.dev0'\n\n\nsetup(\n name='Products.CMFPlone',\n version=version,\n description=\"The Plone Content Management System (core)\",\n long_description=open(\"README.rst\").read() + \"\\n\" +\n open(\"CHANGES.rst\").read(),\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Framework :: Plone\",\n \"Framework :: Plone :: 6.0\",\n \"Framework :: Plone :: Core\",\n \"Framework :: Zope :: 5\",\n \"License :: OSI Approved :: GNU General Public License v2 (GPLv2)\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n ],\n python_requires='>=3.8',\n keywords='Plone CMF Python Zope CMS Webapplication',\n author='Plone Foundation',\n author_email='[email protected]',\n url='https://plone.org',\n license='GPL version 2',\n packages=find_packages(),\n namespace_packages=['Products'],\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'borg.localrole',\n 'five.customerize',\n 'lxml',\n 'plone.api >= 1.4.4',\n 'plone.app.content',\n 'plone.app.contentlisting',\n 'plone.app.contentmenu >= 2.0.1',\n 'plone.app.contentrules',\n 'plone.app.contenttypes',\n 'plone.app.customerize',\n 'plone.app.dexterity',\n 'plone.app.discussion',\n 'plone.app.i18n',\n 'plone.app.layout >= 2.5.15',\n 'plone.app.linkintegrity >=1.0.3',\n 'plone.app.locales',\n 'plone.app.multilingual',\n 'plone.app.portlets',\n 'plone.app.redirector',\n 'plone.app.registry',\n 'plone.app.theming',\n 'plone.app.users',\n 'plone.app.uuid',\n 'plone.app.viewletmanager',\n 'plone.app.vocabularies',\n 'plone.app.workflow',\n 'plone.base',\n 'plone.browserlayer >= 2.1.5',\n 'plone.contentrules',\n 'plone.folder',\n 'plone.i18n >= 4.0.5',\n 'plone.indexer',\n 'plone.intelligenttext',\n 'plone.locking',\n 'plone.memoize',\n 'plone.outputfilters',\n 'plone.portlet.collection',\n 'plone.portlet.static',\n 'plone.portlets',\n 'plone.protect >= 3.0.0',\n 'plone.resource',\n 'plone.schema',\n 'plone.session',\n 'plone.staticresources',\n 'plone.theme',\n 'plonetheme.barceloneta',\n 'Products.CMFEditions',\n 'Products.DCWorkflow',\n 'Products.ExtendedPathIndex',\n 'Products.isurlinportal',\n 'Products.MimetypesRegistry',\n 'Products.PlonePAS',\n 'Products.PortalTransforms',\n 'Products.SiteErrorLog',\n 'Products.statusmessages',\n 'setuptools>=36.2',\n 'plone.autoinclude',\n 'webresource>=1.1',\n 'Zope[wsgi] >= 5.0',\n 'zope.app.locales >= 3.6.0',\n 'zope.cachedescriptors',\n 'zope.deferredimport',\n 'zope.deprecation',\n 'zope.dottedname',\n 'zope.i18n',\n 'zope.i18nmessageid',\n 'zope.structuredtext',\n ],\n extras_require={\n 'test': [\n 'lxml',\n 'mock',\n 'plone.app.robotframework>=1.0',\n 'robotframework-debuglibrary',\n 'plone.app.testing',\n 'zope.globalrequest',\n 'zope.testing',\n 'gunicorn',\n ]\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import find_packages\nfrom setuptools import setup\n\n\nversion = '6.0.0rc2.dev0'\n\n\nsetup(\n name='Products.CMFPlone',\n version=version,\n description=\"The Plone Content Management System (core)\",\n long_description=open(\"README.rst\").read() + \"\\n\" +\n open(\"CHANGES.rst\").read(),\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Framework :: Plone\",\n \"Framework :: Plone :: 6.0\",\n \"Framework :: Plone :: Core\",\n \"Framework :: Zope :: 5\",\n \"License :: OSI Approved :: GNU General Public License v2 (GPLv2)\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n ],\n python_requires='>=3.8',\n keywords='Plone CMF Python Zope CMS Webapplication',\n author='Plone Foundation',\n author_email='[email protected]',\n url='https://plone.org',\n license='GPL version 2',\n project_urls={\n \"Homepage\": \"https://plone.org\",\n \"Documentation\": \"https://docs.plone.org\",\n \"PyPI\": \"https://pypi.python.org/pypi/Products.CMFPlone\",\n \"Source\": \"https://github.com/plone/Products.CMFPlone\",\n \"Issues\": \"https://github.com/plone/plone.org/Products.CMFPlone\",\n \"Forum\": \"https://community.plone.org/\",\n \"Chat\": \"https://discord.gg/zFY3EBbjaj\",\n \"Mastodon\": \"https://plone.social/@plone\",\n \"Twitter\": \"https://twitter.com/plone\",\n \"Videos\": \"https://youtube.com/@plonecms\",\n \"Sponsor\": \"https://github.com/sponsors/plone\",\n },\n packages=find_packages(),\n namespace_packages=['Products'],\n include_package_data=True,\n zip_safe=False,\n install_requires=[\n 'borg.localrole',\n 'five.customerize',\n 'lxml',\n 'plone.api >= 1.4.4',\n 'plone.app.content',\n 'plone.app.contentlisting',\n 'plone.app.contentmenu >= 2.0.1',\n 'plone.app.contentrules',\n 'plone.app.contenttypes',\n 'plone.app.customerize',\n 'plone.app.dexterity',\n 'plone.app.discussion',\n 'plone.app.i18n',\n 'plone.app.layout >= 2.5.15',\n 'plone.app.linkintegrity >=1.0.3',\n 'plone.app.locales',\n 'plone.app.multilingual',\n 'plone.app.portlets',\n 'plone.app.redirector',\n 'plone.app.registry',\n 'plone.app.theming',\n 'plone.app.users',\n 'plone.app.uuid',\n 'plone.app.viewletmanager',\n 'plone.app.vocabularies',\n 'plone.app.workflow',\n 'plone.base',\n 'plone.browserlayer >= 2.1.5',\n 'plone.contentrules',\n 'plone.folder',\n 'plone.i18n >= 4.0.5',\n 'plone.indexer',\n 'plone.intelligenttext',\n 'plone.locking',\n 'plone.memoize',\n 'plone.outputfilters',\n 'plone.portlet.collection',\n 'plone.portlet.static',\n 'plone.portlets',\n 'plone.protect >= 3.0.0',\n 'plone.resource',\n 'plone.schema',\n 'plone.session',\n 'plone.staticresources',\n 'plone.theme',\n 'plonetheme.barceloneta',\n 'Products.CMFEditions',\n 'Products.DCWorkflow',\n 'Products.ExtendedPathIndex',\n 'Products.isurlinportal',\n 'Products.MimetypesRegistry',\n 'Products.PlonePAS',\n 'Products.PortalTransforms',\n 'Products.SiteErrorLog',\n 'Products.statusmessages',\n 'setuptools>=36.2',\n 'plone.autoinclude',\n 'webresource>=1.1',\n 'Zope[wsgi] >= 5.0',\n 'zope.app.locales >= 3.6.0',\n 'zope.cachedescriptors',\n 'zope.deferredimport',\n 'zope.deprecation',\n 'zope.dottedname',\n 'zope.i18n',\n 'zope.i18nmessageid',\n 'zope.structuredtext',\n ],\n extras_require={\n 'test': [\n 'lxml',\n 'mock',\n 'plone.app.robotframework>=1.0',\n 'robotframework-debuglibrary',\n 'plone.app.testing',\n 'zope.globalrequest',\n 'zope.testing',\n 'gunicorn',\n ]\n },\n)\n", "path": "setup.py"}]}
1,474
268
gh_patches_debug_14795
rasdani/github-patches
git_diff
open-mmlab__mmpretrain-1251
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [Bug] The precision are not aligned between val during train and test ### Branch master branch (0.24 or other 0.x version) ### Describe the bug The precision of val in the train is 73, and the precision of test 76, they are not aligned ### Environment A100 or NPU ### Other information _No response_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mmcls/models/heads/multi_label_head.py` Content: ``` 1 # Copyright (c) OpenMMLab. All rights reserved. 2 import torch 3 4 from ..builder import HEADS, build_loss 5 from ..utils import is_tracing 6 from .base_head import BaseHead 7 8 9 @HEADS.register_module() 10 class MultiLabelClsHead(BaseHead): 11 """Classification head for multilabel task. 12 13 Args: 14 loss (dict): Config of classification loss. 15 """ 16 17 def __init__(self, 18 loss=dict( 19 type='CrossEntropyLoss', 20 use_sigmoid=True, 21 reduction='mean', 22 loss_weight=1.0), 23 init_cfg=None): 24 super(MultiLabelClsHead, self).__init__(init_cfg=init_cfg) 25 26 assert isinstance(loss, dict) 27 28 self.compute_loss = build_loss(loss) 29 30 def loss(self, cls_score, gt_label): 31 gt_label = gt_label.type_as(cls_score) 32 num_samples = len(cls_score) 33 losses = dict() 34 35 # map difficult examples to positive ones 36 _gt_label = torch.abs(gt_label) 37 # compute loss 38 loss = self.compute_loss(cls_score, _gt_label, avg_factor=num_samples) 39 losses['loss'] = loss 40 return losses 41 42 def forward_train(self, cls_score, gt_label, **kwargs): 43 if isinstance(cls_score, tuple): 44 cls_score = cls_score[-1] 45 gt_label = gt_label.type_as(cls_score) 46 losses = self.loss(cls_score, gt_label, **kwargs) 47 return losses 48 49 def pre_logits(self, x): 50 if isinstance(x, tuple): 51 x = x[-1] 52 53 from mmcls.utils import get_root_logger 54 logger = get_root_logger() 55 logger.warning( 56 'The input of MultiLabelClsHead should be already logits. ' 57 'Please modify the backbone if you want to get pre-logits feature.' 58 ) 59 return x 60 61 def simple_test(self, x, sigmoid=True, post_process=True): 62 """Inference without augmentation. 63 64 Args: 65 cls_score (tuple[Tensor]): The input classification score logits. 66 Multi-stage inputs are acceptable but only the last stage will 67 be used to classify. The shape of every item should be 68 ``(num_samples, num_classes)``. 69 sigmoid (bool): Whether to sigmoid the classification score. 70 post_process (bool): Whether to do post processing the 71 inference results. It will convert the output to a list. 72 73 Returns: 74 Tensor | list: The inference results. 75 76 - If no post processing, the output is a tensor with shape 77 ``(num_samples, num_classes)``. 78 - If post processing, the output is a multi-dimentional list of 79 float and the dimensions are ``(num_samples, num_classes)``. 80 """ 81 if isinstance(x, tuple): 82 x = x[-1] 83 84 if sigmoid: 85 pred = torch.sigmoid(x) if x is not None else None 86 else: 87 pred = x 88 89 if post_process: 90 return self.post_process(pred) 91 else: 92 return pred 93 94 def post_process(self, pred): 95 on_trace = is_tracing() 96 if torch.onnx.is_in_onnx_export() or on_trace: 97 return pred 98 pred = list(pred.detach().cpu().numpy()) 99 return pred 100 ``` Path: `mmcls/models/heads/multi_label_linear_head.py` Content: ``` 1 # Copyright (c) OpenMMLab. All rights reserved. 2 import torch 3 import torch.nn as nn 4 5 from ..builder import HEADS 6 from .multi_label_head import MultiLabelClsHead 7 8 9 @HEADS.register_module() 10 class MultiLabelLinearClsHead(MultiLabelClsHead): 11 """Linear classification head for multilabel task. 12 13 Args: 14 num_classes (int): Number of categories. 15 in_channels (int): Number of channels in the input feature map. 16 loss (dict): Config of classification loss. 17 init_cfg (dict | optional): The extra init config of layers. 18 Defaults to use dict(type='Normal', layer='Linear', std=0.01). 19 """ 20 21 def __init__(self, 22 num_classes, 23 in_channels, 24 loss=dict( 25 type='CrossEntropyLoss', 26 use_sigmoid=True, 27 reduction='mean', 28 loss_weight=1.0), 29 init_cfg=dict(type='Normal', layer='Linear', std=0.01)): 30 super(MultiLabelLinearClsHead, self).__init__( 31 loss=loss, init_cfg=init_cfg) 32 33 if num_classes <= 0: 34 raise ValueError( 35 f'num_classes={num_classes} must be a positive integer') 36 37 self.in_channels = in_channels 38 self.num_classes = num_classes 39 40 self.fc = nn.Linear(self.in_channels, self.num_classes) 41 42 def pre_logits(self, x): 43 if isinstance(x, tuple): 44 x = x[-1] 45 return x 46 47 def forward_train(self, x, gt_label, **kwargs): 48 x = self.pre_logits(x) 49 gt_label = gt_label.type_as(x) 50 cls_score = self.fc(x) 51 losses = self.loss(cls_score, gt_label, **kwargs) 52 return losses 53 54 def simple_test(self, x, sigmoid=True, post_process=True): 55 """Inference without augmentation. 56 57 Args: 58 x (tuple[Tensor]): The input features. 59 Multi-stage inputs are acceptable but only the last stage will 60 be used to classify. The shape of every item should be 61 ``(num_samples, in_channels)``. 62 sigmoid (bool): Whether to sigmoid the classification score. 63 post_process (bool): Whether to do post processing the 64 inference results. It will convert the output to a list. 65 66 Returns: 67 Tensor | list: The inference results. 68 69 - If no post processing, the output is a tensor with shape 70 ``(num_samples, num_classes)``. 71 - If post processing, the output is a multi-dimentional list of 72 float and the dimensions are ``(num_samples, num_classes)``. 73 """ 74 x = self.pre_logits(x) 75 cls_score = self.fc(x) 76 77 if sigmoid: 78 pred = torch.sigmoid(cls_score) if cls_score is not None else None 79 else: 80 pred = cls_score 81 82 if post_process: 83 return self.post_process(pred) 84 else: 85 return pred 86 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mmcls/models/heads/multi_label_head.py b/mmcls/models/heads/multi_label_head.py --- a/mmcls/models/heads/multi_label_head.py +++ b/mmcls/models/heads/multi_label_head.py @@ -82,7 +82,8 @@ x = x[-1] if sigmoid: - pred = torch.sigmoid(x) if x is not None else None + # Convert to full precision because sigmoid is sensitive. + pred = torch.sigmoid(x.float()) if x is not None else None else: pred = x diff --git a/mmcls/models/heads/multi_label_linear_head.py b/mmcls/models/heads/multi_label_linear_head.py --- a/mmcls/models/heads/multi_label_linear_head.py +++ b/mmcls/models/heads/multi_label_linear_head.py @@ -75,7 +75,9 @@ cls_score = self.fc(x) if sigmoid: - pred = torch.sigmoid(cls_score) if cls_score is not None else None + # Convert to full precision because sigmoid is sensitive. + pred = torch.sigmoid( + cls_score.float()) if cls_score is not None else None else: pred = cls_score
{"golden_diff": "diff --git a/mmcls/models/heads/multi_label_head.py b/mmcls/models/heads/multi_label_head.py\n--- a/mmcls/models/heads/multi_label_head.py\n+++ b/mmcls/models/heads/multi_label_head.py\n@@ -82,7 +82,8 @@\n x = x[-1]\n \n if sigmoid:\n- pred = torch.sigmoid(x) if x is not None else None\n+ # Convert to full precision because sigmoid is sensitive.\n+ pred = torch.sigmoid(x.float()) if x is not None else None\n else:\n pred = x\n \ndiff --git a/mmcls/models/heads/multi_label_linear_head.py b/mmcls/models/heads/multi_label_linear_head.py\n--- a/mmcls/models/heads/multi_label_linear_head.py\n+++ b/mmcls/models/heads/multi_label_linear_head.py\n@@ -75,7 +75,9 @@\n cls_score = self.fc(x)\n \n if sigmoid:\n- pred = torch.sigmoid(cls_score) if cls_score is not None else None\n+ # Convert to full precision because sigmoid is sensitive.\n+ pred = torch.sigmoid(\n+ cls_score.float()) if cls_score is not None else None\n else:\n pred = cls_score\n", "issue": "[Bug] The precision are not aligned between val during train and test \n### Branch\n\nmaster branch (0.24 or other 0.x version)\n\n### Describe the bug\n\nThe precision of val in the train is 73, and the precision of test 76, they are not aligned\r\n\r\n\n\n### Environment\n\nA100 or NPU\n\n### Other information\n\n_No response_\n", "before_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom ..builder import HEADS, build_loss\nfrom ..utils import is_tracing\nfrom .base_head import BaseHead\n\n\[email protected]_module()\nclass MultiLabelClsHead(BaseHead):\n \"\"\"Classification head for multilabel task.\n\n Args:\n loss (dict): Config of classification loss.\n \"\"\"\n\n def __init__(self,\n loss=dict(\n type='CrossEntropyLoss',\n use_sigmoid=True,\n reduction='mean',\n loss_weight=1.0),\n init_cfg=None):\n super(MultiLabelClsHead, self).__init__(init_cfg=init_cfg)\n\n assert isinstance(loss, dict)\n\n self.compute_loss = build_loss(loss)\n\n def loss(self, cls_score, gt_label):\n gt_label = gt_label.type_as(cls_score)\n num_samples = len(cls_score)\n losses = dict()\n\n # map difficult examples to positive ones\n _gt_label = torch.abs(gt_label)\n # compute loss\n loss = self.compute_loss(cls_score, _gt_label, avg_factor=num_samples)\n losses['loss'] = loss\n return losses\n\n def forward_train(self, cls_score, gt_label, **kwargs):\n if isinstance(cls_score, tuple):\n cls_score = cls_score[-1]\n gt_label = gt_label.type_as(cls_score)\n losses = self.loss(cls_score, gt_label, **kwargs)\n return losses\n\n def pre_logits(self, x):\n if isinstance(x, tuple):\n x = x[-1]\n\n from mmcls.utils import get_root_logger\n logger = get_root_logger()\n logger.warning(\n 'The input of MultiLabelClsHead should be already logits. '\n 'Please modify the backbone if you want to get pre-logits feature.'\n )\n return x\n\n def simple_test(self, x, sigmoid=True, post_process=True):\n \"\"\"Inference without augmentation.\n\n Args:\n cls_score (tuple[Tensor]): The input classification score logits.\n Multi-stage inputs are acceptable but only the last stage will\n be used to classify. The shape of every item should be\n ``(num_samples, num_classes)``.\n sigmoid (bool): Whether to sigmoid the classification score.\n post_process (bool): Whether to do post processing the\n inference results. It will convert the output to a list.\n\n Returns:\n Tensor | list: The inference results.\n\n - If no post processing, the output is a tensor with shape\n ``(num_samples, num_classes)``.\n - If post processing, the output is a multi-dimentional list of\n float and the dimensions are ``(num_samples, num_classes)``.\n \"\"\"\n if isinstance(x, tuple):\n x = x[-1]\n\n if sigmoid:\n pred = torch.sigmoid(x) if x is not None else None\n else:\n pred = x\n\n if post_process:\n return self.post_process(pred)\n else:\n return pred\n\n def post_process(self, pred):\n on_trace = is_tracing()\n if torch.onnx.is_in_onnx_export() or on_trace:\n return pred\n pred = list(pred.detach().cpu().numpy())\n return pred\n", "path": "mmcls/models/heads/multi_label_head.py"}, {"content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\n\nfrom ..builder import HEADS\nfrom .multi_label_head import MultiLabelClsHead\n\n\[email protected]_module()\nclass MultiLabelLinearClsHead(MultiLabelClsHead):\n \"\"\"Linear classification head for multilabel task.\n\n Args:\n num_classes (int): Number of categories.\n in_channels (int): Number of channels in the input feature map.\n loss (dict): Config of classification loss.\n init_cfg (dict | optional): The extra init config of layers.\n Defaults to use dict(type='Normal', layer='Linear', std=0.01).\n \"\"\"\n\n def __init__(self,\n num_classes,\n in_channels,\n loss=dict(\n type='CrossEntropyLoss',\n use_sigmoid=True,\n reduction='mean',\n loss_weight=1.0),\n init_cfg=dict(type='Normal', layer='Linear', std=0.01)):\n super(MultiLabelLinearClsHead, self).__init__(\n loss=loss, init_cfg=init_cfg)\n\n if num_classes <= 0:\n raise ValueError(\n f'num_classes={num_classes} must be a positive integer')\n\n self.in_channels = in_channels\n self.num_classes = num_classes\n\n self.fc = nn.Linear(self.in_channels, self.num_classes)\n\n def pre_logits(self, x):\n if isinstance(x, tuple):\n x = x[-1]\n return x\n\n def forward_train(self, x, gt_label, **kwargs):\n x = self.pre_logits(x)\n gt_label = gt_label.type_as(x)\n cls_score = self.fc(x)\n losses = self.loss(cls_score, gt_label, **kwargs)\n return losses\n\n def simple_test(self, x, sigmoid=True, post_process=True):\n \"\"\"Inference without augmentation.\n\n Args:\n x (tuple[Tensor]): The input features.\n Multi-stage inputs are acceptable but only the last stage will\n be used to classify. The shape of every item should be\n ``(num_samples, in_channels)``.\n sigmoid (bool): Whether to sigmoid the classification score.\n post_process (bool): Whether to do post processing the\n inference results. It will convert the output to a list.\n\n Returns:\n Tensor | list: The inference results.\n\n - If no post processing, the output is a tensor with shape\n ``(num_samples, num_classes)``.\n - If post processing, the output is a multi-dimentional list of\n float and the dimensions are ``(num_samples, num_classes)``.\n \"\"\"\n x = self.pre_logits(x)\n cls_score = self.fc(x)\n\n if sigmoid:\n pred = torch.sigmoid(cls_score) if cls_score is not None else None\n else:\n pred = cls_score\n\n if post_process:\n return self.post_process(pred)\n else:\n return pred\n", "path": "mmcls/models/heads/multi_label_linear_head.py"}], "after_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom ..builder import HEADS, build_loss\nfrom ..utils import is_tracing\nfrom .base_head import BaseHead\n\n\[email protected]_module()\nclass MultiLabelClsHead(BaseHead):\n \"\"\"Classification head for multilabel task.\n\n Args:\n loss (dict): Config of classification loss.\n \"\"\"\n\n def __init__(self,\n loss=dict(\n type='CrossEntropyLoss',\n use_sigmoid=True,\n reduction='mean',\n loss_weight=1.0),\n init_cfg=None):\n super(MultiLabelClsHead, self).__init__(init_cfg=init_cfg)\n\n assert isinstance(loss, dict)\n\n self.compute_loss = build_loss(loss)\n\n def loss(self, cls_score, gt_label):\n gt_label = gt_label.type_as(cls_score)\n num_samples = len(cls_score)\n losses = dict()\n\n # map difficult examples to positive ones\n _gt_label = torch.abs(gt_label)\n # compute loss\n loss = self.compute_loss(cls_score, _gt_label, avg_factor=num_samples)\n losses['loss'] = loss\n return losses\n\n def forward_train(self, cls_score, gt_label, **kwargs):\n if isinstance(cls_score, tuple):\n cls_score = cls_score[-1]\n gt_label = gt_label.type_as(cls_score)\n losses = self.loss(cls_score, gt_label, **kwargs)\n return losses\n\n def pre_logits(self, x):\n if isinstance(x, tuple):\n x = x[-1]\n\n from mmcls.utils import get_root_logger\n logger = get_root_logger()\n logger.warning(\n 'The input of MultiLabelClsHead should be already logits. '\n 'Please modify the backbone if you want to get pre-logits feature.'\n )\n return x\n\n def simple_test(self, x, sigmoid=True, post_process=True):\n \"\"\"Inference without augmentation.\n\n Args:\n cls_score (tuple[Tensor]): The input classification score logits.\n Multi-stage inputs are acceptable but only the last stage will\n be used to classify. The shape of every item should be\n ``(num_samples, num_classes)``.\n sigmoid (bool): Whether to sigmoid the classification score.\n post_process (bool): Whether to do post processing the\n inference results. It will convert the output to a list.\n\n Returns:\n Tensor | list: The inference results.\n\n - If no post processing, the output is a tensor with shape\n ``(num_samples, num_classes)``.\n - If post processing, the output is a multi-dimentional list of\n float and the dimensions are ``(num_samples, num_classes)``.\n \"\"\"\n if isinstance(x, tuple):\n x = x[-1]\n\n if sigmoid:\n # Convert to full precision because sigmoid is sensitive.\n pred = torch.sigmoid(x.float()) if x is not None else None\n else:\n pred = x\n\n if post_process:\n return self.post_process(pred)\n else:\n return pred\n\n def post_process(self, pred):\n on_trace = is_tracing()\n if torch.onnx.is_in_onnx_export() or on_trace:\n return pred\n pred = list(pred.detach().cpu().numpy())\n return pred\n", "path": "mmcls/models/heads/multi_label_head.py"}, {"content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\n\nfrom ..builder import HEADS\nfrom .multi_label_head import MultiLabelClsHead\n\n\[email protected]_module()\nclass MultiLabelLinearClsHead(MultiLabelClsHead):\n \"\"\"Linear classification head for multilabel task.\n\n Args:\n num_classes (int): Number of categories.\n in_channels (int): Number of channels in the input feature map.\n loss (dict): Config of classification loss.\n init_cfg (dict | optional): The extra init config of layers.\n Defaults to use dict(type='Normal', layer='Linear', std=0.01).\n \"\"\"\n\n def __init__(self,\n num_classes,\n in_channels,\n loss=dict(\n type='CrossEntropyLoss',\n use_sigmoid=True,\n reduction='mean',\n loss_weight=1.0),\n init_cfg=dict(type='Normal', layer='Linear', std=0.01)):\n super(MultiLabelLinearClsHead, self).__init__(\n loss=loss, init_cfg=init_cfg)\n\n if num_classes <= 0:\n raise ValueError(\n f'num_classes={num_classes} must be a positive integer')\n\n self.in_channels = in_channels\n self.num_classes = num_classes\n\n self.fc = nn.Linear(self.in_channels, self.num_classes)\n\n def pre_logits(self, x):\n if isinstance(x, tuple):\n x = x[-1]\n return x\n\n def forward_train(self, x, gt_label, **kwargs):\n x = self.pre_logits(x)\n gt_label = gt_label.type_as(x)\n cls_score = self.fc(x)\n losses = self.loss(cls_score, gt_label, **kwargs)\n return losses\n\n def simple_test(self, x, sigmoid=True, post_process=True):\n \"\"\"Inference without augmentation.\n\n Args:\n x (tuple[Tensor]): The input features.\n Multi-stage inputs are acceptable but only the last stage will\n be used to classify. The shape of every item should be\n ``(num_samples, in_channels)``.\n sigmoid (bool): Whether to sigmoid the classification score.\n post_process (bool): Whether to do post processing the\n inference results. It will convert the output to a list.\n\n Returns:\n Tensor | list: The inference results.\n\n - If no post processing, the output is a tensor with shape\n ``(num_samples, num_classes)``.\n - If post processing, the output is a multi-dimentional list of\n float and the dimensions are ``(num_samples, num_classes)``.\n \"\"\"\n x = self.pre_logits(x)\n cls_score = self.fc(x)\n\n if sigmoid:\n # Convert to full precision because sigmoid is sensitive.\n pred = torch.sigmoid(\n cls_score.float()) if cls_score is not None else None\n else:\n pred = cls_score\n\n if post_process:\n return self.post_process(pred)\n else:\n return pred\n", "path": "mmcls/models/heads/multi_label_linear_head.py"}]}
2,065
271
gh_patches_debug_42382
rasdani/github-patches
git_diff
lutris__lutris-2973
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add option to turn columns on/off in List View When right-clicking to table headbar in List View, you expect to get a menu for turning columns on/off, but you just select first game in the list. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lutris/gui/views/list.py` Content: ``` 1 """TreeView based game list""" 2 from gettext import gettext as _ 3 4 # Third Party Libraries 5 # pylint: disable=no-member 6 from gi.repository import Gtk, Pango 7 8 # Lutris Modules 9 from lutris import settings 10 from lutris.gui.views import ( 11 COL_ICON, COL_INSTALLED_AT, COL_INSTALLED_AT_TEXT, COL_LASTPLAYED, COL_LASTPLAYED_TEXT, COL_NAME, COL_PLATFORM, 12 COL_PLAYTIME, COL_PLAYTIME_TEXT, COL_RUNNER_HUMAN_NAME, COL_YEAR, COLUMN_NAMES 13 ) 14 from lutris.gui.views.base import GameView 15 from lutris.gui.views.store import sort_func 16 17 18 class GameListView(Gtk.TreeView, GameView): 19 20 """Show the main list of games.""" 21 22 __gsignals__ = GameView.__gsignals__ 23 24 def __init__(self, store): 25 self.game_store = store 26 self.model = self.game_store.modelsort 27 super().__init__(self.model) 28 self.set_rules_hint(True) 29 30 # Icon column 31 image_cell = Gtk.CellRendererPixbuf() 32 column = Gtk.TreeViewColumn("", image_cell, pixbuf=COL_ICON) 33 column.set_reorderable(True) 34 column.set_sort_indicator(False) 35 self.append_column(column) 36 37 # Text columns 38 default_text_cell = self.set_text_cell() 39 name_cell = self.set_text_cell() 40 name_cell.set_padding(5, 0) 41 42 self.set_column(name_cell, _("Name"), COL_NAME, 200) 43 self.set_column(default_text_cell, _("Year"), COL_YEAR, 60) 44 self.set_column(default_text_cell, _("Runner"), COL_RUNNER_HUMAN_NAME, 120) 45 self.set_column(default_text_cell, _("Platform"), COL_PLATFORM, 120) 46 self.set_column(default_text_cell, _("Last Played"), COL_LASTPLAYED_TEXT, 120) 47 self.set_sort_with_column(COL_LASTPLAYED_TEXT, COL_LASTPLAYED) 48 self.set_column(default_text_cell, _("Installed At"), COL_INSTALLED_AT_TEXT, 120) 49 self.set_sort_with_column(COL_INSTALLED_AT_TEXT, COL_INSTALLED_AT) 50 self.set_column(default_text_cell, _("Play Time"), COL_PLAYTIME_TEXT, 100) 51 self.set_sort_with_column(COL_PLAYTIME_TEXT, COL_PLAYTIME) 52 53 self.get_selection().set_mode(Gtk.SelectionMode.SINGLE) 54 55 self.connect_signals() 56 self.connect("row-activated", self.on_row_activated) 57 self.get_selection().connect("changed", self.on_cursor_changed) 58 59 @staticmethod 60 def set_text_cell(): 61 text_cell = Gtk.CellRendererText() 62 text_cell.set_padding(10, 0) 63 text_cell.set_property("ellipsize", Pango.EllipsizeMode.END) 64 return text_cell 65 66 def set_column(self, cell, header, column_id, default_width, sort_id=None): 67 column = Gtk.TreeViewColumn(header, cell, markup=column_id) 68 column.set_sort_indicator(True) 69 column.set_sort_column_id(column_id if sort_id is None else sort_id) 70 self.set_column_sort(column_id if sort_id is None else sort_id) 71 column.set_resizable(True) 72 column.set_reorderable(True) 73 width = settings.read_setting("%s_column_width" % COLUMN_NAMES[column_id], "list view") 74 column.set_fixed_width(int(width) if width else default_width) 75 self.append_column(column) 76 column.connect("notify::width", self.on_column_width_changed) 77 return column 78 79 def set_column_sort(self, col): 80 """Sort a column and fallback to sorting by name and runner.""" 81 self.model.set_sort_func(col, sort_func, col) 82 83 def set_sort_with_column(self, col, sort_col): 84 """Sort a column by using another column's data""" 85 self.model.set_sort_func(col, sort_func, sort_col) 86 87 def get_selected_item(self): 88 """Return the currently selected game's id.""" 89 selection = self.get_selection() 90 if not selection: 91 return None 92 _model, select_iter = selection.get_selected() 93 if select_iter: 94 return select_iter 95 96 def select(self): 97 self.set_cursor(self.current_path[0]) 98 99 def set_selected_game(self, game_id): 100 row = self.game_store.get_row_by_id(game_id, filtered=True) 101 if row: 102 self.set_cursor(row.path) 103 104 def on_row_activated(self, widget, line=None, column=None): 105 """Handles double clicks""" 106 selected_item = self.get_selected_item() 107 if selected_item: 108 selected_game = self.get_selected_game(selected_item) 109 else: 110 selected_game = None 111 self.emit("game-activated", selected_game) 112 113 def on_cursor_changed(self, widget, _line=None, _column=None): 114 selected_item = self.get_selected_item() 115 if selected_item: 116 self.selected_game = self.get_selected_game(selected_item) 117 else: 118 self.selected_game = None 119 self.emit("game-selected", self.selected_game) 120 121 @staticmethod 122 def on_column_width_changed(col, *args): 123 col_name = col.get_title() 124 if col_name: 125 settings.write_setting( 126 col_name.replace(" ", "") + "_column_width", 127 col.get_fixed_width(), 128 "list view", 129 ) 130 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lutris/gui/views/list.py b/lutris/gui/views/list.py --- a/lutris/gui/views/list.py +++ b/lutris/gui/views/list.py @@ -39,7 +39,7 @@ name_cell = self.set_text_cell() name_cell.set_padding(5, 0) - self.set_column(name_cell, _("Name"), COL_NAME, 200) + self.set_column(name_cell, _("Name"), COL_NAME, 200, always_visible=True) self.set_column(default_text_cell, _("Year"), COL_YEAR, 60) self.set_column(default_text_cell, _("Runner"), COL_RUNNER_HUMAN_NAME, 120) self.set_column(default_text_cell, _("Platform"), COL_PLATFORM, 120) @@ -63,7 +63,7 @@ text_cell.set_property("ellipsize", Pango.EllipsizeMode.END) return text_cell - def set_column(self, cell, header, column_id, default_width, sort_id=None): + def set_column(self, cell, header, column_id, default_width, always_visible=False, sort_id=None): column = Gtk.TreeViewColumn(header, cell, markup=column_id) column.set_sort_indicator(True) column.set_sort_column_id(column_id if sort_id is None else sort_id) @@ -71,9 +71,12 @@ column.set_resizable(True) column.set_reorderable(True) width = settings.read_setting("%s_column_width" % COLUMN_NAMES[column_id], "list view") + is_visible = settings.read_setting("%s_visible" % COLUMN_NAMES[column_id], "list view") column.set_fixed_width(int(width) if width else default_width) + column.set_visible(is_visible == "True" or always_visible if is_visible else True) self.append_column(column) column.connect("notify::width", self.on_column_width_changed) + column.get_button().connect('button-press-event', self.on_column_header_button_pressed) return column def set_column_sort(self, col): @@ -101,6 +104,13 @@ if row: self.set_cursor(row.path) + def on_column_header_button_pressed(self, button, event): + """Handles column header button press events""" + if event.button == 3: + menu = GameListColumnToggleMenu(self.get_columns()) + menu.popup_at_pointer(None) + return True + def on_row_activated(self, widget, line=None, column=None): """Handles double clicks""" selected_item = self.get_selected_item() @@ -127,3 +137,37 @@ col.get_fixed_width(), "list view", ) + + +class GameListColumnToggleMenu(Gtk.Menu): + + def __init__(self, columns): + super().__init__() + self.columns = columns + self.column_map = {} + self.create_menuitems() + self.show_all() + + def create_menuitems(self): + for column in self.columns: + title = column.get_title() + if title == "": + continue + checkbox = Gtk.CheckMenuItem(title) + checkbox.set_active(column.get_visible()) + if title == _("Name"): + checkbox.set_sensitive(False) + else: + checkbox.connect("toggled", self.on_toggle_column) + self.column_map[checkbox] = column + self.append(checkbox) + + def on_toggle_column(self, check_menu_item): + column = self.column_map[check_menu_item] + is_visible = check_menu_item.get_active() + column.set_visible(is_visible) + settings.write_setting( + column.get_title().replace(" ", "") + "_visible", + str(is_visible), + "list view", + )
{"golden_diff": "diff --git a/lutris/gui/views/list.py b/lutris/gui/views/list.py\n--- a/lutris/gui/views/list.py\n+++ b/lutris/gui/views/list.py\n@@ -39,7 +39,7 @@\n name_cell = self.set_text_cell()\n name_cell.set_padding(5, 0)\n \n- self.set_column(name_cell, _(\"Name\"), COL_NAME, 200)\n+ self.set_column(name_cell, _(\"Name\"), COL_NAME, 200, always_visible=True)\n self.set_column(default_text_cell, _(\"Year\"), COL_YEAR, 60)\n self.set_column(default_text_cell, _(\"Runner\"), COL_RUNNER_HUMAN_NAME, 120)\n self.set_column(default_text_cell, _(\"Platform\"), COL_PLATFORM, 120)\n@@ -63,7 +63,7 @@\n text_cell.set_property(\"ellipsize\", Pango.EllipsizeMode.END)\n return text_cell\n \n- def set_column(self, cell, header, column_id, default_width, sort_id=None):\n+ def set_column(self, cell, header, column_id, default_width, always_visible=False, sort_id=None):\n column = Gtk.TreeViewColumn(header, cell, markup=column_id)\n column.set_sort_indicator(True)\n column.set_sort_column_id(column_id if sort_id is None else sort_id)\n@@ -71,9 +71,12 @@\n column.set_resizable(True)\n column.set_reorderable(True)\n width = settings.read_setting(\"%s_column_width\" % COLUMN_NAMES[column_id], \"list view\")\n+ is_visible = settings.read_setting(\"%s_visible\" % COLUMN_NAMES[column_id], \"list view\")\n column.set_fixed_width(int(width) if width else default_width)\n+ column.set_visible(is_visible == \"True\" or always_visible if is_visible else True)\n self.append_column(column)\n column.connect(\"notify::width\", self.on_column_width_changed)\n+ column.get_button().connect('button-press-event', self.on_column_header_button_pressed)\n return column\n \n def set_column_sort(self, col):\n@@ -101,6 +104,13 @@\n if row:\n self.set_cursor(row.path)\n \n+ def on_column_header_button_pressed(self, button, event):\n+ \"\"\"Handles column header button press events\"\"\"\n+ if event.button == 3:\n+ menu = GameListColumnToggleMenu(self.get_columns())\n+ menu.popup_at_pointer(None)\n+ return True\n+\n def on_row_activated(self, widget, line=None, column=None):\n \"\"\"Handles double clicks\"\"\"\n selected_item = self.get_selected_item()\n@@ -127,3 +137,37 @@\n col.get_fixed_width(),\n \"list view\",\n )\n+\n+\n+class GameListColumnToggleMenu(Gtk.Menu):\n+\n+ def __init__(self, columns):\n+ super().__init__()\n+ self.columns = columns\n+ self.column_map = {}\n+ self.create_menuitems()\n+ self.show_all()\n+\n+ def create_menuitems(self):\n+ for column in self.columns:\n+ title = column.get_title()\n+ if title == \"\":\n+ continue\n+ checkbox = Gtk.CheckMenuItem(title)\n+ checkbox.set_active(column.get_visible())\n+ if title == _(\"Name\"):\n+ checkbox.set_sensitive(False)\n+ else:\n+ checkbox.connect(\"toggled\", self.on_toggle_column)\n+ self.column_map[checkbox] = column\n+ self.append(checkbox)\n+\n+ def on_toggle_column(self, check_menu_item):\n+ column = self.column_map[check_menu_item]\n+ is_visible = check_menu_item.get_active()\n+ column.set_visible(is_visible)\n+ settings.write_setting(\n+ column.get_title().replace(\" \", \"\") + \"_visible\",\n+ str(is_visible),\n+ \"list view\",\n+ )\n", "issue": "Add option to turn columns on/off in List View\nWhen right-clicking to table headbar in List View, you expect to get a menu for turning columns on/off, but you just select first game in the list.\n", "before_files": [{"content": "\"\"\"TreeView based game list\"\"\"\nfrom gettext import gettext as _\n\n# Third Party Libraries\n# pylint: disable=no-member\nfrom gi.repository import Gtk, Pango\n\n# Lutris Modules\nfrom lutris import settings\nfrom lutris.gui.views import (\n COL_ICON, COL_INSTALLED_AT, COL_INSTALLED_AT_TEXT, COL_LASTPLAYED, COL_LASTPLAYED_TEXT, COL_NAME, COL_PLATFORM,\n COL_PLAYTIME, COL_PLAYTIME_TEXT, COL_RUNNER_HUMAN_NAME, COL_YEAR, COLUMN_NAMES\n)\nfrom lutris.gui.views.base import GameView\nfrom lutris.gui.views.store import sort_func\n\n\nclass GameListView(Gtk.TreeView, GameView):\n\n \"\"\"Show the main list of games.\"\"\"\n\n __gsignals__ = GameView.__gsignals__\n\n def __init__(self, store):\n self.game_store = store\n self.model = self.game_store.modelsort\n super().__init__(self.model)\n self.set_rules_hint(True)\n\n # Icon column\n image_cell = Gtk.CellRendererPixbuf()\n column = Gtk.TreeViewColumn(\"\", image_cell, pixbuf=COL_ICON)\n column.set_reorderable(True)\n column.set_sort_indicator(False)\n self.append_column(column)\n\n # Text columns\n default_text_cell = self.set_text_cell()\n name_cell = self.set_text_cell()\n name_cell.set_padding(5, 0)\n\n self.set_column(name_cell, _(\"Name\"), COL_NAME, 200)\n self.set_column(default_text_cell, _(\"Year\"), COL_YEAR, 60)\n self.set_column(default_text_cell, _(\"Runner\"), COL_RUNNER_HUMAN_NAME, 120)\n self.set_column(default_text_cell, _(\"Platform\"), COL_PLATFORM, 120)\n self.set_column(default_text_cell, _(\"Last Played\"), COL_LASTPLAYED_TEXT, 120)\n self.set_sort_with_column(COL_LASTPLAYED_TEXT, COL_LASTPLAYED)\n self.set_column(default_text_cell, _(\"Installed At\"), COL_INSTALLED_AT_TEXT, 120)\n self.set_sort_with_column(COL_INSTALLED_AT_TEXT, COL_INSTALLED_AT)\n self.set_column(default_text_cell, _(\"Play Time\"), COL_PLAYTIME_TEXT, 100)\n self.set_sort_with_column(COL_PLAYTIME_TEXT, COL_PLAYTIME)\n\n self.get_selection().set_mode(Gtk.SelectionMode.SINGLE)\n\n self.connect_signals()\n self.connect(\"row-activated\", self.on_row_activated)\n self.get_selection().connect(\"changed\", self.on_cursor_changed)\n\n @staticmethod\n def set_text_cell():\n text_cell = Gtk.CellRendererText()\n text_cell.set_padding(10, 0)\n text_cell.set_property(\"ellipsize\", Pango.EllipsizeMode.END)\n return text_cell\n\n def set_column(self, cell, header, column_id, default_width, sort_id=None):\n column = Gtk.TreeViewColumn(header, cell, markup=column_id)\n column.set_sort_indicator(True)\n column.set_sort_column_id(column_id if sort_id is None else sort_id)\n self.set_column_sort(column_id if sort_id is None else sort_id)\n column.set_resizable(True)\n column.set_reorderable(True)\n width = settings.read_setting(\"%s_column_width\" % COLUMN_NAMES[column_id], \"list view\")\n column.set_fixed_width(int(width) if width else default_width)\n self.append_column(column)\n column.connect(\"notify::width\", self.on_column_width_changed)\n return column\n\n def set_column_sort(self, col):\n \"\"\"Sort a column and fallback to sorting by name and runner.\"\"\"\n self.model.set_sort_func(col, sort_func, col)\n\n def set_sort_with_column(self, col, sort_col):\n \"\"\"Sort a column by using another column's data\"\"\"\n self.model.set_sort_func(col, sort_func, sort_col)\n\n def get_selected_item(self):\n \"\"\"Return the currently selected game's id.\"\"\"\n selection = self.get_selection()\n if not selection:\n return None\n _model, select_iter = selection.get_selected()\n if select_iter:\n return select_iter\n\n def select(self):\n self.set_cursor(self.current_path[0])\n\n def set_selected_game(self, game_id):\n row = self.game_store.get_row_by_id(game_id, filtered=True)\n if row:\n self.set_cursor(row.path)\n\n def on_row_activated(self, widget, line=None, column=None):\n \"\"\"Handles double clicks\"\"\"\n selected_item = self.get_selected_item()\n if selected_item:\n selected_game = self.get_selected_game(selected_item)\n else:\n selected_game = None\n self.emit(\"game-activated\", selected_game)\n\n def on_cursor_changed(self, widget, _line=None, _column=None):\n selected_item = self.get_selected_item()\n if selected_item:\n self.selected_game = self.get_selected_game(selected_item)\n else:\n self.selected_game = None\n self.emit(\"game-selected\", self.selected_game)\n\n @staticmethod\n def on_column_width_changed(col, *args):\n col_name = col.get_title()\n if col_name:\n settings.write_setting(\n col_name.replace(\" \", \"\") + \"_column_width\",\n col.get_fixed_width(),\n \"list view\",\n )\n", "path": "lutris/gui/views/list.py"}], "after_files": [{"content": "\"\"\"TreeView based game list\"\"\"\nfrom gettext import gettext as _\n\n# Third Party Libraries\n# pylint: disable=no-member\nfrom gi.repository import Gtk, Pango\n\n# Lutris Modules\nfrom lutris import settings\nfrom lutris.gui.views import (\n COL_ICON, COL_INSTALLED_AT, COL_INSTALLED_AT_TEXT, COL_LASTPLAYED, COL_LASTPLAYED_TEXT, COL_NAME, COL_PLATFORM,\n COL_PLAYTIME, COL_PLAYTIME_TEXT, COL_RUNNER_HUMAN_NAME, COL_YEAR, COLUMN_NAMES\n)\nfrom lutris.gui.views.base import GameView\nfrom lutris.gui.views.store import sort_func\n\n\nclass GameListView(Gtk.TreeView, GameView):\n\n \"\"\"Show the main list of games.\"\"\"\n\n __gsignals__ = GameView.__gsignals__\n\n def __init__(self, store):\n self.game_store = store\n self.model = self.game_store.modelsort\n super().__init__(self.model)\n self.set_rules_hint(True)\n\n # Icon column\n image_cell = Gtk.CellRendererPixbuf()\n column = Gtk.TreeViewColumn(\"\", image_cell, pixbuf=COL_ICON)\n column.set_reorderable(True)\n column.set_sort_indicator(False)\n self.append_column(column)\n\n # Text columns\n default_text_cell = self.set_text_cell()\n name_cell = self.set_text_cell()\n name_cell.set_padding(5, 0)\n\n self.set_column(name_cell, _(\"Name\"), COL_NAME, 200, always_visible=True)\n self.set_column(default_text_cell, _(\"Year\"), COL_YEAR, 60)\n self.set_column(default_text_cell, _(\"Runner\"), COL_RUNNER_HUMAN_NAME, 120)\n self.set_column(default_text_cell, _(\"Platform\"), COL_PLATFORM, 120)\n self.set_column(default_text_cell, _(\"Last Played\"), COL_LASTPLAYED_TEXT, 120)\n self.set_sort_with_column(COL_LASTPLAYED_TEXT, COL_LASTPLAYED)\n self.set_column(default_text_cell, _(\"Installed At\"), COL_INSTALLED_AT_TEXT, 120)\n self.set_sort_with_column(COL_INSTALLED_AT_TEXT, COL_INSTALLED_AT)\n self.set_column(default_text_cell, _(\"Play Time\"), COL_PLAYTIME_TEXT, 100)\n self.set_sort_with_column(COL_PLAYTIME_TEXT, COL_PLAYTIME)\n\n self.get_selection().set_mode(Gtk.SelectionMode.SINGLE)\n\n self.connect_signals()\n self.connect(\"row-activated\", self.on_row_activated)\n self.get_selection().connect(\"changed\", self.on_cursor_changed)\n\n @staticmethod\n def set_text_cell():\n text_cell = Gtk.CellRendererText()\n text_cell.set_padding(10, 0)\n text_cell.set_property(\"ellipsize\", Pango.EllipsizeMode.END)\n return text_cell\n\n def set_column(self, cell, header, column_id, default_width, always_visible=False, sort_id=None):\n column = Gtk.TreeViewColumn(header, cell, markup=column_id)\n column.set_sort_indicator(True)\n column.set_sort_column_id(column_id if sort_id is None else sort_id)\n self.set_column_sort(column_id if sort_id is None else sort_id)\n column.set_resizable(True)\n column.set_reorderable(True)\n width = settings.read_setting(\"%s_column_width\" % COLUMN_NAMES[column_id], \"list view\")\n is_visible = settings.read_setting(\"%s_visible\" % COLUMN_NAMES[column_id], \"list view\")\n column.set_fixed_width(int(width) if width else default_width)\n column.set_visible(is_visible == \"True\" or always_visible if is_visible else True)\n self.append_column(column)\n column.connect(\"notify::width\", self.on_column_width_changed)\n column.get_button().connect('button-press-event', self.on_column_header_button_pressed)\n return column\n\n def set_column_sort(self, col):\n \"\"\"Sort a column and fallback to sorting by name and runner.\"\"\"\n self.model.set_sort_func(col, sort_func, col)\n\n def set_sort_with_column(self, col, sort_col):\n \"\"\"Sort a column by using another column's data\"\"\"\n self.model.set_sort_func(col, sort_func, sort_col)\n\n def get_selected_item(self):\n \"\"\"Return the currently selected game's id.\"\"\"\n selection = self.get_selection()\n if not selection:\n return None\n _model, select_iter = selection.get_selected()\n if select_iter:\n return select_iter\n\n def select(self):\n self.set_cursor(self.current_path[0])\n\n def set_selected_game(self, game_id):\n row = self.game_store.get_row_by_id(game_id, filtered=True)\n if row:\n self.set_cursor(row.path)\n\n def on_column_header_button_pressed(self, button, event):\n \"\"\"Handles column header button press events\"\"\"\n if event.button == 3:\n menu = GameListColumnToggleMenu(self.get_columns())\n menu.popup_at_pointer(None)\n return True\n\n def on_row_activated(self, widget, line=None, column=None):\n \"\"\"Handles double clicks\"\"\"\n selected_item = self.get_selected_item()\n if selected_item:\n selected_game = self.get_selected_game(selected_item)\n else:\n selected_game = None\n self.emit(\"game-activated\", selected_game)\n\n def on_cursor_changed(self, widget, _line=None, _column=None):\n selected_item = self.get_selected_item()\n if selected_item:\n self.selected_game = self.get_selected_game(selected_item)\n else:\n self.selected_game = None\n self.emit(\"game-selected\", self.selected_game)\n\n @staticmethod\n def on_column_width_changed(col, *args):\n col_name = col.get_title()\n if col_name:\n settings.write_setting(\n col_name.replace(\" \", \"\") + \"_column_width\",\n col.get_fixed_width(),\n \"list view\",\n )\n\n\nclass GameListColumnToggleMenu(Gtk.Menu):\n\n def __init__(self, columns):\n super().__init__()\n self.columns = columns\n self.column_map = {}\n self.create_menuitems()\n self.show_all()\n\n def create_menuitems(self):\n for column in self.columns:\n title = column.get_title()\n if title == \"\":\n continue\n checkbox = Gtk.CheckMenuItem(title)\n checkbox.set_active(column.get_visible())\n if title == _(\"Name\"):\n checkbox.set_sensitive(False)\n else:\n checkbox.connect(\"toggled\", self.on_toggle_column)\n self.column_map[checkbox] = column\n self.append(checkbox)\n\n def on_toggle_column(self, check_menu_item):\n column = self.column_map[check_menu_item]\n is_visible = check_menu_item.get_active()\n column.set_visible(is_visible)\n settings.write_setting(\n column.get_title().replace(\" \", \"\") + \"_visible\",\n str(is_visible),\n \"list view\",\n )\n", "path": "lutris/gui/views/list.py"}]}
1,718
839
gh_patches_debug_35481
rasdani/github-patches
git_diff
mdn__kuma-6144
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Magic is incorrectly using image/svg as MIME type for SVG files, preventing uploads of SVG files Magic or libmagic is incorrectly using `image/svg` as the MIME type for SVG uploads, rather than `image/svg+xml`, which is the only correct MIME type [1] (and the one we've always used in the past). Normally, we keep SVG uploads disabled for security reasons since SVG can contain JavaScript code. However, we like to use it for diagrams and the like because they can be translated, and they are more efficient for that purpose in many cases. When we want to upload SVG, we go to the admin panel in Django and in the Constance panel, add "image/svg+xml" to the `WIKI_ATTACHMENT_ALLOWED_TYPES` string. Then we upload the file and restore the previous string to that preference. This is no longer working; even with `image/svg+xml` listed in the preference, uploads fail with an error announcing that "Files of this type are not permitted." This should work, however. This issue is blocking work on WebXR and potentially other documentation that requires the use of diagrams to explain things properly. Hopefully the fix is not complicated. [1] https://www.iana.org/assignments/media-types/media-types.xhtml#image --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kuma/attachments/forms.py` Content: ``` 1 import magic 2 from constance import config 3 from django import forms 4 from django.core.validators import EMPTY_VALUES 5 from django.utils.translation import ugettext_lazy as _ 6 7 from .models import AttachmentRevision 8 9 10 MIME_TYPE_INVALID = _('Files of this type are not permitted.') 11 12 13 class AttachmentRevisionForm(forms.ModelForm): 14 """ 15 Unlike the DocumentForm/RevisionForm split, we have only one 16 form for file attachments. The handling view will determine if 17 this is a new revision of an existing file, or the first version 18 of a new file. 19 20 As a result of this, calling save(commit=True) is off-limits. 21 """ 22 class Meta: 23 model = AttachmentRevision 24 fields = ('file', 'title', 'description', 'comment') 25 26 def __init__(self, *args, **kwargs): 27 super(AttachmentRevisionForm, self).__init__(*args, **kwargs) 28 self.mime_type = None 29 30 def clean(self): 31 """ 32 Check the submitted file for its MIME type in case the provided 33 MIME type is missing or is the default MIME type as given in the 34 model field definition. 35 36 That allows overriding the MIME type via the admin UI. 37 """ 38 cleaned_data = super(AttachmentRevisionForm, self).clean() 39 nulls = EMPTY_VALUES + (AttachmentRevision.DEFAULT_MIME_TYPE,) 40 submitted_mime_type = cleaned_data.get('mime_type') 41 42 if (submitted_mime_type in nulls) and ('file' in cleaned_data): 43 self.mime_type = self.mime_type_from_file(cleaned_data['file']) 44 allowed_mime_types = config.WIKI_ATTACHMENT_ALLOWED_TYPES.split() 45 if self.mime_type not in allowed_mime_types: 46 raise forms.ValidationError(MIME_TYPE_INVALID, code='invalid') 47 48 return cleaned_data 49 50 def save(self, *args, **kwargs): 51 revision = super(AttachmentRevisionForm, self).save(*args, **kwargs) 52 if self.mime_type is not None: 53 revision.mime_type = self.mime_type 54 return revision 55 56 def mime_type_from_file(self, file): 57 m_mime = magic.Magic(mime=True) 58 mime_type = m_mime.from_buffer(file.read(1024)).split(';')[0] 59 file.seek(0) 60 return mime_type 61 62 63 class AdminAttachmentRevisionForm(AttachmentRevisionForm): 64 class Meta(AttachmentRevisionForm.Meta): 65 fields = ['attachment', 'file', 'title', 'mime_type', 'description', 66 'is_approved'] 67 ``` Path: `kuma/attachments/views.py` Content: ``` 1 import mimetypes 2 3 from django.conf import settings 4 from django.core.exceptions import PermissionDenied 5 from django.http import Http404, StreamingHttpResponse 6 from django.shortcuts import get_object_or_404, redirect, render 7 from django.views.decorators.cache import cache_control, never_cache 8 from django.views.decorators.clickjacking import xframe_options_sameorigin 9 10 from kuma.core.decorators import (ensure_wiki_domain, login_required, 11 shared_cache_control) 12 from kuma.core.utils import is_untrusted 13 from kuma.wiki.decorators import process_document_path 14 from kuma.wiki.models import Document 15 16 from .forms import AttachmentRevisionForm 17 from .models import Attachment 18 from .utils import allow_add_attachment_by, convert_to_http_date 19 20 21 # Mime types used on MDN 22 OVERRIDE_MIMETYPES = { 23 'image/jpeg': '.jpeg, .jpg, .jpe', 24 'image/vnd.adobe.photoshop': '.psd', 25 } 26 27 IMAGE_MIMETYPES = ['image/png', 'image/jpeg', 'image/jpg', 'image/gif'] 28 29 30 def guess_extension(_type): 31 return OVERRIDE_MIMETYPES.get(_type, mimetypes.guess_extension(_type)) 32 33 34 @cache_control(public=True, max_age=60 * 15) 35 def raw_file(request, attachment_id, filename): 36 """ 37 Serve up an attachment's file. 38 """ 39 qs = Attachment.objects.select_related('current_revision') 40 attachment = get_object_or_404(qs, pk=attachment_id) 41 if attachment.current_revision is None: 42 raise Http404 43 44 if is_untrusted(request): 45 rev = attachment.current_revision 46 response = StreamingHttpResponse(rev.file, content_type=rev.mime_type) 47 response['Content-Length'] = rev.file.size 48 response['Last-Modified'] = convert_to_http_date(rev.created) 49 response['X-Frame-Options'] = 'ALLOW-FROM %s' % settings.DOMAIN 50 return response 51 52 return redirect(attachment.get_file_url(), permanent=True) 53 54 55 @shared_cache_control(s_maxage=60 * 60 * 24 * 30) 56 def mindtouch_file_redirect(request, file_id, filename): 57 """Redirect an old MindTouch file URL to a new kuma file URL.""" 58 attachment = get_object_or_404(Attachment, mindtouch_attachment_id=file_id) 59 return redirect(attachment.get_file_url(), permanent=True) 60 61 62 @ensure_wiki_domain 63 @never_cache 64 @xframe_options_sameorigin 65 @login_required 66 @process_document_path 67 def edit_attachment(request, document_slug, document_locale): 68 """ 69 Create a new Attachment object and populate its initial 70 revision or show a separate form view that allows to fix form submission 71 errors. 72 73 Redirects back to the document's editing URL on success. 74 """ 75 document = get_object_or_404( 76 Document, 77 locale=document_locale, 78 slug=document_slug, 79 ) 80 if request.method != 'POST': 81 return redirect(document.get_edit_url()) 82 83 # No access if no permissions to upload 84 if not allow_add_attachment_by(request.user): 85 raise PermissionDenied 86 87 form = AttachmentRevisionForm(data=request.POST, files=request.FILES) 88 if form.is_valid(): 89 revision = form.save(commit=False) 90 revision.creator = request.user 91 attachment = Attachment.objects.create(title=revision.title) 92 revision.attachment = attachment 93 revision.save() 94 # adding the attachment to the document's files (M2M) 95 attachment.attach(document, request.user, revision) 96 return redirect(document.get_edit_url()) 97 else: 98 context = { 99 'form': form, 100 'document': document, 101 } 102 return render(request, 'attachments/edit_attachment.html', context) 103 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kuma/attachments/forms.py b/kuma/attachments/forms.py --- a/kuma/attachments/forms.py +++ b/kuma/attachments/forms.py @@ -24,6 +24,7 @@ fields = ('file', 'title', 'description', 'comment') def __init__(self, *args, **kwargs): + self.allow_svg_uploads = kwargs.pop('allow_svg_uploads', False) super(AttachmentRevisionForm, self).__init__(*args, **kwargs) self.mime_type = None @@ -39,11 +40,21 @@ nulls = EMPTY_VALUES + (AttachmentRevision.DEFAULT_MIME_TYPE,) submitted_mime_type = cleaned_data.get('mime_type') - if (submitted_mime_type in nulls) and ('file' in cleaned_data): + if submitted_mime_type in nulls and 'file' in cleaned_data: self.mime_type = self.mime_type_from_file(cleaned_data['file']) - allowed_mime_types = config.WIKI_ATTACHMENT_ALLOWED_TYPES.split() - if self.mime_type not in allowed_mime_types: - raise forms.ValidationError(MIME_TYPE_INVALID, code='invalid') + if self.mime_type.startswith('image/svg') and self.allow_svg_uploads: + # The `magic.Magic()` will, for unknown reasons, sometimes + # think an SVG image's mime type is `image/svg` which not + # a valid mime type actually. + # See https://www.iana.org/assignments/media-types/media-types.xhtml#image + # So correct that. + if self.mime_type == 'image/svg': + self.mime_type = 'image/svg+xml' + else: + allowed_mime_types = config.WIKI_ATTACHMENT_ALLOWED_TYPES.split() + if self.mime_type not in allowed_mime_types: + raise forms.ValidationError( + MIME_TYPE_INVALID, code='invalid') return cleaned_data diff --git a/kuma/attachments/views.py b/kuma/attachments/views.py --- a/kuma/attachments/views.py +++ b/kuma/attachments/views.py @@ -84,7 +84,13 @@ if not allow_add_attachment_by(request.user): raise PermissionDenied - form = AttachmentRevisionForm(data=request.POST, files=request.FILES) + form = AttachmentRevisionForm( + data=request.POST, + files=request.FILES, + # Only staff users are allowed to upload SVG files because SVG files + # can contain embedded inline scripts. + allow_svg_uploads=request.user.is_staff + ) if form.is_valid(): revision = form.save(commit=False) revision.creator = request.user
{"golden_diff": "diff --git a/kuma/attachments/forms.py b/kuma/attachments/forms.py\n--- a/kuma/attachments/forms.py\n+++ b/kuma/attachments/forms.py\n@@ -24,6 +24,7 @@\n fields = ('file', 'title', 'description', 'comment')\n \n def __init__(self, *args, **kwargs):\n+ self.allow_svg_uploads = kwargs.pop('allow_svg_uploads', False)\n super(AttachmentRevisionForm, self).__init__(*args, **kwargs)\n self.mime_type = None\n \n@@ -39,11 +40,21 @@\n nulls = EMPTY_VALUES + (AttachmentRevision.DEFAULT_MIME_TYPE,)\n submitted_mime_type = cleaned_data.get('mime_type')\n \n- if (submitted_mime_type in nulls) and ('file' in cleaned_data):\n+ if submitted_mime_type in nulls and 'file' in cleaned_data:\n self.mime_type = self.mime_type_from_file(cleaned_data['file'])\n- allowed_mime_types = config.WIKI_ATTACHMENT_ALLOWED_TYPES.split()\n- if self.mime_type not in allowed_mime_types:\n- raise forms.ValidationError(MIME_TYPE_INVALID, code='invalid')\n+ if self.mime_type.startswith('image/svg') and self.allow_svg_uploads:\n+ # The `magic.Magic()` will, for unknown reasons, sometimes\n+ # think an SVG image's mime type is `image/svg` which not\n+ # a valid mime type actually.\n+ # See https://www.iana.org/assignments/media-types/media-types.xhtml#image\n+ # So correct that.\n+ if self.mime_type == 'image/svg':\n+ self.mime_type = 'image/svg+xml'\n+ else:\n+ allowed_mime_types = config.WIKI_ATTACHMENT_ALLOWED_TYPES.split()\n+ if self.mime_type not in allowed_mime_types:\n+ raise forms.ValidationError(\n+ MIME_TYPE_INVALID, code='invalid')\n \n return cleaned_data\n \ndiff --git a/kuma/attachments/views.py b/kuma/attachments/views.py\n--- a/kuma/attachments/views.py\n+++ b/kuma/attachments/views.py\n@@ -84,7 +84,13 @@\n if not allow_add_attachment_by(request.user):\n raise PermissionDenied\n \n- form = AttachmentRevisionForm(data=request.POST, files=request.FILES)\n+ form = AttachmentRevisionForm(\n+ data=request.POST,\n+ files=request.FILES,\n+ # Only staff users are allowed to upload SVG files because SVG files\n+ # can contain embedded inline scripts.\n+ allow_svg_uploads=request.user.is_staff\n+ )\n if form.is_valid():\n revision = form.save(commit=False)\n revision.creator = request.user\n", "issue": "Magic is incorrectly using image/svg as MIME type for SVG files, preventing uploads of SVG files\nMagic or libmagic is incorrectly using `image/svg` as the MIME type for SVG uploads, rather than `image/svg+xml`, which is the only correct MIME type [1] (and the one we've always used in the past).\r\n\r\nNormally, we keep SVG uploads disabled for security reasons since SVG can contain JavaScript code. However, we like to use it for diagrams and the like because they can be translated, and they are more efficient for that purpose in many cases. When we want to upload SVG, we go to the admin panel in Django and in the Constance panel, add \"image/svg+xml\" to the `WIKI_ATTACHMENT_ALLOWED_TYPES` string. Then we upload the file and restore the previous string to that preference.\r\n\r\nThis is no longer working; even with `image/svg+xml` listed in the preference, uploads fail with an error announcing that \"Files of this type are not permitted.\" This should work, however.\r\n\r\nThis issue is blocking work on WebXR and potentially other documentation that requires the use of diagrams to explain things properly. Hopefully the fix is not complicated.\r\n\r\n[1] https://www.iana.org/assignments/media-types/media-types.xhtml#image\n", "before_files": [{"content": "import magic\nfrom constance import config\nfrom django import forms\nfrom django.core.validators import EMPTY_VALUES\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom .models import AttachmentRevision\n\n\nMIME_TYPE_INVALID = _('Files of this type are not permitted.')\n\n\nclass AttachmentRevisionForm(forms.ModelForm):\n \"\"\"\n Unlike the DocumentForm/RevisionForm split, we have only one\n form for file attachments. The handling view will determine if\n this is a new revision of an existing file, or the first version\n of a new file.\n\n As a result of this, calling save(commit=True) is off-limits.\n \"\"\"\n class Meta:\n model = AttachmentRevision\n fields = ('file', 'title', 'description', 'comment')\n\n def __init__(self, *args, **kwargs):\n super(AttachmentRevisionForm, self).__init__(*args, **kwargs)\n self.mime_type = None\n\n def clean(self):\n \"\"\"\n Check the submitted file for its MIME type in case the provided\n MIME type is missing or is the default MIME type as given in the\n model field definition.\n\n That allows overriding the MIME type via the admin UI.\n \"\"\"\n cleaned_data = super(AttachmentRevisionForm, self).clean()\n nulls = EMPTY_VALUES + (AttachmentRevision.DEFAULT_MIME_TYPE,)\n submitted_mime_type = cleaned_data.get('mime_type')\n\n if (submitted_mime_type in nulls) and ('file' in cleaned_data):\n self.mime_type = self.mime_type_from_file(cleaned_data['file'])\n allowed_mime_types = config.WIKI_ATTACHMENT_ALLOWED_TYPES.split()\n if self.mime_type not in allowed_mime_types:\n raise forms.ValidationError(MIME_TYPE_INVALID, code='invalid')\n\n return cleaned_data\n\n def save(self, *args, **kwargs):\n revision = super(AttachmentRevisionForm, self).save(*args, **kwargs)\n if self.mime_type is not None:\n revision.mime_type = self.mime_type\n return revision\n\n def mime_type_from_file(self, file):\n m_mime = magic.Magic(mime=True)\n mime_type = m_mime.from_buffer(file.read(1024)).split(';')[0]\n file.seek(0)\n return mime_type\n\n\nclass AdminAttachmentRevisionForm(AttachmentRevisionForm):\n class Meta(AttachmentRevisionForm.Meta):\n fields = ['attachment', 'file', 'title', 'mime_type', 'description',\n 'is_approved']\n", "path": "kuma/attachments/forms.py"}, {"content": "import mimetypes\n\nfrom django.conf import settings\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import Http404, StreamingHttpResponse\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.views.decorators.cache import cache_control, never_cache\nfrom django.views.decorators.clickjacking import xframe_options_sameorigin\n\nfrom kuma.core.decorators import (ensure_wiki_domain, login_required,\n shared_cache_control)\nfrom kuma.core.utils import is_untrusted\nfrom kuma.wiki.decorators import process_document_path\nfrom kuma.wiki.models import Document\n\nfrom .forms import AttachmentRevisionForm\nfrom .models import Attachment\nfrom .utils import allow_add_attachment_by, convert_to_http_date\n\n\n# Mime types used on MDN\nOVERRIDE_MIMETYPES = {\n 'image/jpeg': '.jpeg, .jpg, .jpe',\n 'image/vnd.adobe.photoshop': '.psd',\n}\n\nIMAGE_MIMETYPES = ['image/png', 'image/jpeg', 'image/jpg', 'image/gif']\n\n\ndef guess_extension(_type):\n return OVERRIDE_MIMETYPES.get(_type, mimetypes.guess_extension(_type))\n\n\n@cache_control(public=True, max_age=60 * 15)\ndef raw_file(request, attachment_id, filename):\n \"\"\"\n Serve up an attachment's file.\n \"\"\"\n qs = Attachment.objects.select_related('current_revision')\n attachment = get_object_or_404(qs, pk=attachment_id)\n if attachment.current_revision is None:\n raise Http404\n\n if is_untrusted(request):\n rev = attachment.current_revision\n response = StreamingHttpResponse(rev.file, content_type=rev.mime_type)\n response['Content-Length'] = rev.file.size\n response['Last-Modified'] = convert_to_http_date(rev.created)\n response['X-Frame-Options'] = 'ALLOW-FROM %s' % settings.DOMAIN\n return response\n\n return redirect(attachment.get_file_url(), permanent=True)\n\n\n@shared_cache_control(s_maxage=60 * 60 * 24 * 30)\ndef mindtouch_file_redirect(request, file_id, filename):\n \"\"\"Redirect an old MindTouch file URL to a new kuma file URL.\"\"\"\n attachment = get_object_or_404(Attachment, mindtouch_attachment_id=file_id)\n return redirect(attachment.get_file_url(), permanent=True)\n\n\n@ensure_wiki_domain\n@never_cache\n@xframe_options_sameorigin\n@login_required\n@process_document_path\ndef edit_attachment(request, document_slug, document_locale):\n \"\"\"\n Create a new Attachment object and populate its initial\n revision or show a separate form view that allows to fix form submission\n errors.\n\n Redirects back to the document's editing URL on success.\n \"\"\"\n document = get_object_or_404(\n Document,\n locale=document_locale,\n slug=document_slug,\n )\n if request.method != 'POST':\n return redirect(document.get_edit_url())\n\n # No access if no permissions to upload\n if not allow_add_attachment_by(request.user):\n raise PermissionDenied\n\n form = AttachmentRevisionForm(data=request.POST, files=request.FILES)\n if form.is_valid():\n revision = form.save(commit=False)\n revision.creator = request.user\n attachment = Attachment.objects.create(title=revision.title)\n revision.attachment = attachment\n revision.save()\n # adding the attachment to the document's files (M2M)\n attachment.attach(document, request.user, revision)\n return redirect(document.get_edit_url())\n else:\n context = {\n 'form': form,\n 'document': document,\n }\n return render(request, 'attachments/edit_attachment.html', context)\n", "path": "kuma/attachments/views.py"}], "after_files": [{"content": "import magic\nfrom constance import config\nfrom django import forms\nfrom django.core.validators import EMPTY_VALUES\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom .models import AttachmentRevision\n\n\nMIME_TYPE_INVALID = _('Files of this type are not permitted.')\n\n\nclass AttachmentRevisionForm(forms.ModelForm):\n \"\"\"\n Unlike the DocumentForm/RevisionForm split, we have only one\n form for file attachments. The handling view will determine if\n this is a new revision of an existing file, or the first version\n of a new file.\n\n As a result of this, calling save(commit=True) is off-limits.\n \"\"\"\n class Meta:\n model = AttachmentRevision\n fields = ('file', 'title', 'description', 'comment')\n\n def __init__(self, *args, **kwargs):\n self.allow_svg_uploads = kwargs.pop('allow_svg_uploads', False)\n super(AttachmentRevisionForm, self).__init__(*args, **kwargs)\n self.mime_type = None\n\n def clean(self):\n \"\"\"\n Check the submitted file for its MIME type in case the provided\n MIME type is missing or is the default MIME type as given in the\n model field definition.\n\n That allows overriding the MIME type via the admin UI.\n \"\"\"\n cleaned_data = super(AttachmentRevisionForm, self).clean()\n nulls = EMPTY_VALUES + (AttachmentRevision.DEFAULT_MIME_TYPE,)\n submitted_mime_type = cleaned_data.get('mime_type')\n\n if submitted_mime_type in nulls and 'file' in cleaned_data:\n self.mime_type = self.mime_type_from_file(cleaned_data['file'])\n if self.mime_type.startswith('image/svg') and self.allow_svg_uploads:\n # The `magic.Magic()` will, for unknown reasons, sometimes\n # think an SVG image's mime type is `image/svg` which not\n # a valid mime type actually.\n # See https://www.iana.org/assignments/media-types/media-types.xhtml#image\n # So correct that.\n if self.mime_type == 'image/svg':\n self.mime_type = 'image/svg+xml'\n else:\n allowed_mime_types = config.WIKI_ATTACHMENT_ALLOWED_TYPES.split()\n if self.mime_type not in allowed_mime_types:\n raise forms.ValidationError(\n MIME_TYPE_INVALID, code='invalid')\n\n return cleaned_data\n\n def save(self, *args, **kwargs):\n revision = super(AttachmentRevisionForm, self).save(*args, **kwargs)\n if self.mime_type is not None:\n revision.mime_type = self.mime_type\n return revision\n\n def mime_type_from_file(self, file):\n m_mime = magic.Magic(mime=True)\n mime_type = m_mime.from_buffer(file.read(1024)).split(';')[0]\n file.seek(0)\n return mime_type\n\n\nclass AdminAttachmentRevisionForm(AttachmentRevisionForm):\n class Meta(AttachmentRevisionForm.Meta):\n fields = ['attachment', 'file', 'title', 'mime_type', 'description',\n 'is_approved']\n", "path": "kuma/attachments/forms.py"}, {"content": "import mimetypes\n\nfrom django.conf import settings\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import Http404, StreamingHttpResponse\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.views.decorators.cache import cache_control, never_cache\nfrom django.views.decorators.clickjacking import xframe_options_sameorigin\n\nfrom kuma.core.decorators import (ensure_wiki_domain, login_required,\n shared_cache_control)\nfrom kuma.core.utils import is_untrusted\nfrom kuma.wiki.decorators import process_document_path\nfrom kuma.wiki.models import Document\n\nfrom .forms import AttachmentRevisionForm\nfrom .models import Attachment\nfrom .utils import allow_add_attachment_by, convert_to_http_date\n\n\n# Mime types used on MDN\nOVERRIDE_MIMETYPES = {\n 'image/jpeg': '.jpeg, .jpg, .jpe',\n 'image/vnd.adobe.photoshop': '.psd',\n}\n\nIMAGE_MIMETYPES = ['image/png', 'image/jpeg', 'image/jpg', 'image/gif']\n\n\ndef guess_extension(_type):\n return OVERRIDE_MIMETYPES.get(_type, mimetypes.guess_extension(_type))\n\n\n@cache_control(public=True, max_age=60 * 15)\ndef raw_file(request, attachment_id, filename):\n \"\"\"\n Serve up an attachment's file.\n \"\"\"\n qs = Attachment.objects.select_related('current_revision')\n attachment = get_object_or_404(qs, pk=attachment_id)\n if attachment.current_revision is None:\n raise Http404\n\n if is_untrusted(request):\n rev = attachment.current_revision\n response = StreamingHttpResponse(rev.file, content_type=rev.mime_type)\n response['Content-Length'] = rev.file.size\n response['Last-Modified'] = convert_to_http_date(rev.created)\n response['X-Frame-Options'] = 'ALLOW-FROM %s' % settings.DOMAIN\n return response\n\n return redirect(attachment.get_file_url(), permanent=True)\n\n\n@shared_cache_control(s_maxage=60 * 60 * 24 * 30)\ndef mindtouch_file_redirect(request, file_id, filename):\n \"\"\"Redirect an old MindTouch file URL to a new kuma file URL.\"\"\"\n attachment = get_object_or_404(Attachment, mindtouch_attachment_id=file_id)\n return redirect(attachment.get_file_url(), permanent=True)\n\n\n@ensure_wiki_domain\n@never_cache\n@xframe_options_sameorigin\n@login_required\n@process_document_path\ndef edit_attachment(request, document_slug, document_locale):\n \"\"\"\n Create a new Attachment object and populate its initial\n revision or show a separate form view that allows to fix form submission\n errors.\n\n Redirects back to the document's editing URL on success.\n \"\"\"\n document = get_object_or_404(\n Document,\n locale=document_locale,\n slug=document_slug,\n )\n if request.method != 'POST':\n return redirect(document.get_edit_url())\n\n # No access if no permissions to upload\n if not allow_add_attachment_by(request.user):\n raise PermissionDenied\n\n form = AttachmentRevisionForm(\n data=request.POST,\n files=request.FILES,\n # Only staff users are allowed to upload SVG files because SVG files\n # can contain embedded inline scripts.\n allow_svg_uploads=request.user.is_staff\n )\n if form.is_valid():\n revision = form.save(commit=False)\n revision.creator = request.user\n attachment = Attachment.objects.create(title=revision.title)\n revision.attachment = attachment\n revision.save()\n # adding the attachment to the document's files (M2M)\n attachment.attach(document, request.user, revision)\n return redirect(document.get_edit_url())\n else:\n context = {\n 'form': form,\n 'document': document,\n }\n return render(request, 'attachments/edit_attachment.html', context)\n", "path": "kuma/attachments/views.py"}]}
2,182
575
gh_patches_debug_4556
rasdani/github-patches
git_diff
scrapy__scrapy-4599
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- KeyError in is_generator_with_return_value ```Python traceback Traceback (most recent call last): File "/usr/local/lib/python3.8/site-packages/twisted/internet/defer.py", line 1418, in _inlineCallbacks result = g.send(result) File "/app/python/lib/python3.8/site-packages/scrapy/core/downloader/middleware.py", line 42, in process_request defer.returnValue((yield download_func(request=request, spider=spider))) File "/usr/local/lib/python3.8/site-packages/twisted/internet/defer.py", line 1362, in returnValue raise _DefGen_Return(val) twisted.internet.defer._DefGen_Return: <200 https://www.example.com> During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/app/python/lib/python3.8/site-packages/scrapy/utils/defer.py", line 55, in mustbe_deferred result = f(*args, **kw) File "/app/python/lib/python3.8/site-packages/scrapy/core/spidermw.py", line 60, in process_spider_input return scrape_func(response, request, spider) File "/app/python/lib/python3.8/site-packages/scrapy/core/scraper.py", line 148, in call_spider warn_on_generator_with_return_value(spider, callback) File "/app/python/lib/python3.8/site-packages/scrapy/utils/misc.py", line 202, in warn_on_generator_with_return_value if is_generator_with_return_value(callable): File "/app/python/lib/python3.8/site-packages/scrapy/utils/misc.py", line 180, in is_generator_with_return_value return _generator_callbacks_cache[callable] File "/app/python/lib/python3.8/site-packages/scrapy/utils/datatypes.py", line 281, in __getitem__ return super(LocalWeakReferencedCache, self).__getitem__(key) File "/usr/local/lib/python3.8/weakref.py", line 383, in __getitem__ return self.data[ref(key)] KeyError: <weakref at 0x7f06ff011720; to 'method' at 0x7f07042b5e00 (parse_foo)> ``` This is Scrapy 2.0.1. The problem happens only sometimes, but in different spiders in the same project. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scrapy/utils/datatypes.py` Content: ``` 1 """ 2 This module contains data types used by Scrapy which are not included in the 3 Python Standard Library. 4 5 This module must not depend on any module outside the Standard Library. 6 """ 7 8 import collections 9 import weakref 10 from collections.abc import Mapping 11 12 13 class CaselessDict(dict): 14 15 __slots__ = () 16 17 def __init__(self, seq=None): 18 super(CaselessDict, self).__init__() 19 if seq: 20 self.update(seq) 21 22 def __getitem__(self, key): 23 return dict.__getitem__(self, self.normkey(key)) 24 25 def __setitem__(self, key, value): 26 dict.__setitem__(self, self.normkey(key), self.normvalue(value)) 27 28 def __delitem__(self, key): 29 dict.__delitem__(self, self.normkey(key)) 30 31 def __contains__(self, key): 32 return dict.__contains__(self, self.normkey(key)) 33 has_key = __contains__ 34 35 def __copy__(self): 36 return self.__class__(self) 37 copy = __copy__ 38 39 def normkey(self, key): 40 """Method to normalize dictionary key access""" 41 return key.lower() 42 43 def normvalue(self, value): 44 """Method to normalize values prior to be setted""" 45 return value 46 47 def get(self, key, def_val=None): 48 return dict.get(self, self.normkey(key), self.normvalue(def_val)) 49 50 def setdefault(self, key, def_val=None): 51 return dict.setdefault(self, self.normkey(key), self.normvalue(def_val)) 52 53 def update(self, seq): 54 seq = seq.items() if isinstance(seq, Mapping) else seq 55 iseq = ((self.normkey(k), self.normvalue(v)) for k, v in seq) 56 super(CaselessDict, self).update(iseq) 57 58 @classmethod 59 def fromkeys(cls, keys, value=None): 60 return cls((k, value) for k in keys) 61 62 def pop(self, key, *args): 63 return dict.pop(self, self.normkey(key), *args) 64 65 66 class LocalCache(collections.OrderedDict): 67 """Dictionary with a finite number of keys. 68 69 Older items expires first. 70 """ 71 72 def __init__(self, limit=None): 73 super(LocalCache, self).__init__() 74 self.limit = limit 75 76 def __setitem__(self, key, value): 77 if self.limit: 78 while len(self) >= self.limit: 79 self.popitem(last=False) 80 super(LocalCache, self).__setitem__(key, value) 81 82 83 class LocalWeakReferencedCache(weakref.WeakKeyDictionary): 84 """ 85 A weakref.WeakKeyDictionary implementation that uses LocalCache as its 86 underlying data structure, making it ordered and capable of being size-limited. 87 88 Useful for memoization, while avoiding keeping received 89 arguments in memory only because of the cached references. 90 91 Note: like LocalCache and unlike weakref.WeakKeyDictionary, 92 it cannot be instantiated with an initial dictionary. 93 """ 94 95 def __init__(self, limit=None): 96 super(LocalWeakReferencedCache, self).__init__() 97 self.data = LocalCache(limit=limit) 98 99 def __setitem__(self, key, value): 100 try: 101 super(LocalWeakReferencedCache, self).__setitem__(key, value) 102 except TypeError: 103 pass # key is not weak-referenceable, skip caching 104 105 def __getitem__(self, key): 106 try: 107 return super(LocalWeakReferencedCache, self).__getitem__(key) 108 except TypeError: 109 return None # key is not weak-referenceable, it's not cached 110 111 112 class SequenceExclude: 113 """Object to test if an item is NOT within some sequence.""" 114 115 def __init__(self, seq): 116 self.seq = seq 117 118 def __contains__(self, item): 119 return item not in self.seq 120 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scrapy/utils/datatypes.py b/scrapy/utils/datatypes.py --- a/scrapy/utils/datatypes.py +++ b/scrapy/utils/datatypes.py @@ -105,8 +105,8 @@ def __getitem__(self, key): try: return super(LocalWeakReferencedCache, self).__getitem__(key) - except TypeError: - return None # key is not weak-referenceable, it's not cached + except (TypeError, KeyError): + return None # key is either not weak-referenceable or not cached class SequenceExclude:
{"golden_diff": "diff --git a/scrapy/utils/datatypes.py b/scrapy/utils/datatypes.py\n--- a/scrapy/utils/datatypes.py\n+++ b/scrapy/utils/datatypes.py\n@@ -105,8 +105,8 @@\n def __getitem__(self, key):\n try:\n return super(LocalWeakReferencedCache, self).__getitem__(key)\n- except TypeError:\n- return None # key is not weak-referenceable, it's not cached\n+ except (TypeError, KeyError):\n+ return None # key is either not weak-referenceable or not cached\n \n \n class SequenceExclude:\n", "issue": "KeyError in is_generator_with_return_value\n\r\n```Python traceback\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.8/site-packages/twisted/internet/defer.py\", line 1418, in _inlineCallbacks\r\n result = g.send(result)\r\n File \"/app/python/lib/python3.8/site-packages/scrapy/core/downloader/middleware.py\", line 42, in process_request\r\n defer.returnValue((yield download_func(request=request, spider=spider)))\r\n File \"/usr/local/lib/python3.8/site-packages/twisted/internet/defer.py\", line 1362, in returnValue\r\n raise _DefGen_Return(val)\r\ntwisted.internet.defer._DefGen_Return: <200 https://www.example.com>\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"/app/python/lib/python3.8/site-packages/scrapy/utils/defer.py\", line 55, in mustbe_deferred\r\n result = f(*args, **kw)\r\n File \"/app/python/lib/python3.8/site-packages/scrapy/core/spidermw.py\", line 60, in process_spider_input\r\n return scrape_func(response, request, spider)\r\n File \"/app/python/lib/python3.8/site-packages/scrapy/core/scraper.py\", line 148, in call_spider\r\n warn_on_generator_with_return_value(spider, callback)\r\n File \"/app/python/lib/python3.8/site-packages/scrapy/utils/misc.py\", line 202, in warn_on_generator_with_return_value\r\n if is_generator_with_return_value(callable):\r\n File \"/app/python/lib/python3.8/site-packages/scrapy/utils/misc.py\", line 180, in is_generator_with_return_value\r\n return _generator_callbacks_cache[callable]\r\n File \"/app/python/lib/python3.8/site-packages/scrapy/utils/datatypes.py\", line 281, in __getitem__\r\n return super(LocalWeakReferencedCache, self).__getitem__(key)\r\n File \"/usr/local/lib/python3.8/weakref.py\", line 383, in __getitem__\r\n return self.data[ref(key)]\r\nKeyError: <weakref at 0x7f06ff011720; to 'method' at 0x7f07042b5e00 (parse_foo)>\r\n```\r\n\r\nThis is Scrapy 2.0.1. The problem happens only sometimes, but in different spiders in the same project.\n", "before_files": [{"content": "\"\"\"\nThis module contains data types used by Scrapy which are not included in the\nPython Standard Library.\n\nThis module must not depend on any module outside the Standard Library.\n\"\"\"\n\nimport collections\nimport weakref\nfrom collections.abc import Mapping\n\n\nclass CaselessDict(dict):\n\n __slots__ = ()\n\n def __init__(self, seq=None):\n super(CaselessDict, self).__init__()\n if seq:\n self.update(seq)\n\n def __getitem__(self, key):\n return dict.__getitem__(self, self.normkey(key))\n\n def __setitem__(self, key, value):\n dict.__setitem__(self, self.normkey(key), self.normvalue(value))\n\n def __delitem__(self, key):\n dict.__delitem__(self, self.normkey(key))\n\n def __contains__(self, key):\n return dict.__contains__(self, self.normkey(key))\n has_key = __contains__\n\n def __copy__(self):\n return self.__class__(self)\n copy = __copy__\n\n def normkey(self, key):\n \"\"\"Method to normalize dictionary key access\"\"\"\n return key.lower()\n\n def normvalue(self, value):\n \"\"\"Method to normalize values prior to be setted\"\"\"\n return value\n\n def get(self, key, def_val=None):\n return dict.get(self, self.normkey(key), self.normvalue(def_val))\n\n def setdefault(self, key, def_val=None):\n return dict.setdefault(self, self.normkey(key), self.normvalue(def_val))\n\n def update(self, seq):\n seq = seq.items() if isinstance(seq, Mapping) else seq\n iseq = ((self.normkey(k), self.normvalue(v)) for k, v in seq)\n super(CaselessDict, self).update(iseq)\n\n @classmethod\n def fromkeys(cls, keys, value=None):\n return cls((k, value) for k in keys)\n\n def pop(self, key, *args):\n return dict.pop(self, self.normkey(key), *args)\n\n\nclass LocalCache(collections.OrderedDict):\n \"\"\"Dictionary with a finite number of keys.\n\n Older items expires first.\n \"\"\"\n\n def __init__(self, limit=None):\n super(LocalCache, self).__init__()\n self.limit = limit\n\n def __setitem__(self, key, value):\n if self.limit:\n while len(self) >= self.limit:\n self.popitem(last=False)\n super(LocalCache, self).__setitem__(key, value)\n\n\nclass LocalWeakReferencedCache(weakref.WeakKeyDictionary):\n \"\"\"\n A weakref.WeakKeyDictionary implementation that uses LocalCache as its\n underlying data structure, making it ordered and capable of being size-limited.\n\n Useful for memoization, while avoiding keeping received\n arguments in memory only because of the cached references.\n\n Note: like LocalCache and unlike weakref.WeakKeyDictionary,\n it cannot be instantiated with an initial dictionary.\n \"\"\"\n\n def __init__(self, limit=None):\n super(LocalWeakReferencedCache, self).__init__()\n self.data = LocalCache(limit=limit)\n\n def __setitem__(self, key, value):\n try:\n super(LocalWeakReferencedCache, self).__setitem__(key, value)\n except TypeError:\n pass # key is not weak-referenceable, skip caching\n\n def __getitem__(self, key):\n try:\n return super(LocalWeakReferencedCache, self).__getitem__(key)\n except TypeError:\n return None # key is not weak-referenceable, it's not cached\n\n\nclass SequenceExclude:\n \"\"\"Object to test if an item is NOT within some sequence.\"\"\"\n\n def __init__(self, seq):\n self.seq = seq\n\n def __contains__(self, item):\n return item not in self.seq\n", "path": "scrapy/utils/datatypes.py"}], "after_files": [{"content": "\"\"\"\nThis module contains data types used by Scrapy which are not included in the\nPython Standard Library.\n\nThis module must not depend on any module outside the Standard Library.\n\"\"\"\n\nimport collections\nimport weakref\nfrom collections.abc import Mapping\n\n\nclass CaselessDict(dict):\n\n __slots__ = ()\n\n def __init__(self, seq=None):\n super(CaselessDict, self).__init__()\n if seq:\n self.update(seq)\n\n def __getitem__(self, key):\n return dict.__getitem__(self, self.normkey(key))\n\n def __setitem__(self, key, value):\n dict.__setitem__(self, self.normkey(key), self.normvalue(value))\n\n def __delitem__(self, key):\n dict.__delitem__(self, self.normkey(key))\n\n def __contains__(self, key):\n return dict.__contains__(self, self.normkey(key))\n has_key = __contains__\n\n def __copy__(self):\n return self.__class__(self)\n copy = __copy__\n\n def normkey(self, key):\n \"\"\"Method to normalize dictionary key access\"\"\"\n return key.lower()\n\n def normvalue(self, value):\n \"\"\"Method to normalize values prior to be setted\"\"\"\n return value\n\n def get(self, key, def_val=None):\n return dict.get(self, self.normkey(key), self.normvalue(def_val))\n\n def setdefault(self, key, def_val=None):\n return dict.setdefault(self, self.normkey(key), self.normvalue(def_val))\n\n def update(self, seq):\n seq = seq.items() if isinstance(seq, Mapping) else seq\n iseq = ((self.normkey(k), self.normvalue(v)) for k, v in seq)\n super(CaselessDict, self).update(iseq)\n\n @classmethod\n def fromkeys(cls, keys, value=None):\n return cls((k, value) for k in keys)\n\n def pop(self, key, *args):\n return dict.pop(self, self.normkey(key), *args)\n\n\nclass LocalCache(collections.OrderedDict):\n \"\"\"Dictionary with a finite number of keys.\n\n Older items expires first.\n \"\"\"\n\n def __init__(self, limit=None):\n super(LocalCache, self).__init__()\n self.limit = limit\n\n def __setitem__(self, key, value):\n if self.limit:\n while len(self) >= self.limit:\n self.popitem(last=False)\n super(LocalCache, self).__setitem__(key, value)\n\n\nclass LocalWeakReferencedCache(weakref.WeakKeyDictionary):\n \"\"\"\n A weakref.WeakKeyDictionary implementation that uses LocalCache as its\n underlying data structure, making it ordered and capable of being size-limited.\n\n Useful for memoization, while avoiding keeping received\n arguments in memory only because of the cached references.\n\n Note: like LocalCache and unlike weakref.WeakKeyDictionary,\n it cannot be instantiated with an initial dictionary.\n \"\"\"\n\n def __init__(self, limit=None):\n super(LocalWeakReferencedCache, self).__init__()\n self.data = LocalCache(limit=limit)\n\n def __setitem__(self, key, value):\n try:\n super(LocalWeakReferencedCache, self).__setitem__(key, value)\n except TypeError:\n pass # key is not weak-referenceable, skip caching\n\n def __getitem__(self, key):\n try:\n return super(LocalWeakReferencedCache, self).__getitem__(key)\n except (TypeError, KeyError):\n return None # key is either not weak-referenceable or not cached\n\n\nclass SequenceExclude:\n \"\"\"Object to test if an item is NOT within some sequence.\"\"\"\n\n def __init__(self, seq):\n self.seq = seq\n\n def __contains__(self, item):\n return item not in self.seq\n", "path": "scrapy/utils/datatypes.py"}]}
1,895
130
gh_patches_debug_23019
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-contrib-523
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Compatible with web.py? Is it possible to use opentelemetry-insrumentation-wsgi with web.py framework? If yes, some examples would be appreciated. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py` Content: ``` 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 """ 15 This library provides a WSGI middleware that can be used on any WSGI framework 16 (such as Django / Flask) to track requests timing through OpenTelemetry. 17 18 Usage (Flask) 19 ------------- 20 21 .. code-block:: python 22 23 from flask import Flask 24 from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware 25 26 app = Flask(__name__) 27 app.wsgi_app = OpenTelemetryMiddleware(app.wsgi_app) 28 29 @app.route("/") 30 def hello(): 31 return "Hello!" 32 33 if __name__ == "__main__": 34 app.run(debug=True) 35 36 37 Usage (Django) 38 -------------- 39 40 Modify the application's ``wsgi.py`` file as shown below. 41 42 .. code-block:: python 43 44 import os 45 from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware 46 from django.core.wsgi import get_wsgi_application 47 48 os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'application.settings') 49 50 application = get_wsgi_application() 51 application = OpenTelemetryMiddleware(application) 52 53 API 54 --- 55 """ 56 57 import functools 58 import typing 59 import wsgiref.util as wsgiref_util 60 61 from opentelemetry import context, trace 62 from opentelemetry.instrumentation.utils import http_status_to_status_code 63 from opentelemetry.instrumentation.wsgi.version import __version__ 64 from opentelemetry.propagate import extract 65 from opentelemetry.propagators.textmap import Getter 66 from opentelemetry.semconv.trace import SpanAttributes 67 from opentelemetry.trace.status import Status, StatusCode 68 from opentelemetry.util.http import remove_url_credentials 69 70 _HTTP_VERSION_PREFIX = "HTTP/" 71 _CARRIER_KEY_PREFIX = "HTTP_" 72 _CARRIER_KEY_PREFIX_LEN = len(_CARRIER_KEY_PREFIX) 73 74 75 class WSGIGetter(Getter): 76 def get( 77 self, carrier: dict, key: str 78 ) -> typing.Optional[typing.List[str]]: 79 """Getter implementation to retrieve a HTTP header value from the 80 PEP3333-conforming WSGI environ 81 82 Args: 83 carrier: WSGI environ object 84 key: header name in environ object 85 Returns: 86 A list with a single string with the header value if it exists, 87 else None. 88 """ 89 environ_key = "HTTP_" + key.upper().replace("-", "_") 90 value = carrier.get(environ_key) 91 if value is not None: 92 return [value] 93 return None 94 95 def keys(self, carrier): 96 return [ 97 key[_CARRIER_KEY_PREFIX_LEN:].lower().replace("_", "-") 98 for key in carrier 99 if key.startswith(_CARRIER_KEY_PREFIX) 100 ] 101 102 103 wsgi_getter = WSGIGetter() 104 105 106 def setifnotnone(dic, key, value): 107 if value is not None: 108 dic[key] = value 109 110 111 def collect_request_attributes(environ): 112 """Collects HTTP request attributes from the PEP3333-conforming 113 WSGI environ and returns a dictionary to be used as span creation attributes.""" 114 115 result = { 116 SpanAttributes.HTTP_METHOD: environ.get("REQUEST_METHOD"), 117 SpanAttributes.HTTP_SERVER_NAME: environ.get("SERVER_NAME"), 118 SpanAttributes.HTTP_SCHEME: environ.get("wsgi.url_scheme"), 119 } 120 121 host_port = environ.get("SERVER_PORT") 122 if host_port is not None and not host_port == "": 123 result.update({SpanAttributes.NET_HOST_PORT: int(host_port)}) 124 125 setifnotnone(result, SpanAttributes.HTTP_HOST, environ.get("HTTP_HOST")) 126 target = environ.get("RAW_URI") 127 if target is None: # Note: `"" or None is None` 128 target = environ.get("REQUEST_URI") 129 if target is not None: 130 result[SpanAttributes.HTTP_TARGET] = target 131 else: 132 result[SpanAttributes.HTTP_URL] = remove_url_credentials( 133 wsgiref_util.request_uri(environ) 134 ) 135 136 remote_addr = environ.get("REMOTE_ADDR") 137 if remote_addr: 138 result[SpanAttributes.NET_PEER_IP] = remote_addr 139 remote_host = environ.get("REMOTE_HOST") 140 if remote_host and remote_host != remote_addr: 141 result[SpanAttributes.NET_PEER_NAME] = remote_host 142 143 user_agent = environ.get("HTTP_USER_AGENT") 144 if user_agent is not None and len(user_agent) > 0: 145 result[SpanAttributes.HTTP_USER_AGENT] = user_agent 146 147 setifnotnone( 148 result, SpanAttributes.NET_PEER_PORT, environ.get("REMOTE_PORT") 149 ) 150 flavor = environ.get("SERVER_PROTOCOL", "") 151 if flavor.upper().startswith(_HTTP_VERSION_PREFIX): 152 flavor = flavor[len(_HTTP_VERSION_PREFIX) :] 153 if flavor: 154 result[SpanAttributes.HTTP_FLAVOR] = flavor 155 156 return result 157 158 159 def add_response_attributes( 160 span, start_response_status, response_headers 161 ): # pylint: disable=unused-argument 162 """Adds HTTP response attributes to span using the arguments 163 passed to a PEP3333-conforming start_response callable.""" 164 if not span.is_recording(): 165 return 166 status_code, _ = start_response_status.split(" ", 1) 167 168 try: 169 status_code = int(status_code) 170 except ValueError: 171 span.set_status( 172 Status( 173 StatusCode.ERROR, 174 "Non-integer HTTP status: " + repr(status_code), 175 ) 176 ) 177 else: 178 span.set_attribute(SpanAttributes.HTTP_STATUS_CODE, status_code) 179 span.set_status(Status(http_status_to_status_code(status_code))) 180 181 182 def get_default_span_name(environ): 183 """Default implementation for name_callback, returns HTTP {METHOD_NAME}.""" 184 return "HTTP {}".format(environ.get("REQUEST_METHOD", "")).strip() 185 186 187 class OpenTelemetryMiddleware: 188 """The WSGI application middleware. 189 190 This class is a PEP 3333 conforming WSGI middleware that starts and 191 annotates spans for any requests it is invoked with. 192 193 Args: 194 wsgi: The WSGI application callable to forward requests to. 195 request_hook: Optional callback which is called with the server span and WSGI 196 environ object for every incoming request. 197 response_hook: Optional callback which is called with the server span, 198 WSGI environ, status_code and response_headers for every 199 incoming request. 200 tracer_provider: Optional tracer provider to use. If omitted the current 201 globally configured one is used. 202 """ 203 204 def __init__( 205 self, wsgi, request_hook=None, response_hook=None, tracer_provider=None 206 ): 207 self.wsgi = wsgi 208 self.tracer = trace.get_tracer(__name__, __version__, tracer_provider) 209 self.request_hook = request_hook 210 self.response_hook = response_hook 211 212 @staticmethod 213 def _create_start_response(span, start_response, response_hook): 214 @functools.wraps(start_response) 215 def _start_response(status, response_headers, *args, **kwargs): 216 add_response_attributes(span, status, response_headers) 217 if response_hook: 218 response_hook(status, response_headers) 219 return start_response(status, response_headers, *args, **kwargs) 220 221 return _start_response 222 223 def __call__(self, environ, start_response): 224 """The WSGI application 225 226 Args: 227 environ: A WSGI environment. 228 start_response: The WSGI start_response callable. 229 """ 230 231 token = context.attach(extract(environ, getter=wsgi_getter)) 232 233 span = self.tracer.start_span( 234 get_default_span_name(environ), 235 kind=trace.SpanKind.SERVER, 236 attributes=collect_request_attributes(environ), 237 ) 238 239 if self.request_hook: 240 self.request_hook(span, environ) 241 242 response_hook = self.response_hook 243 if response_hook: 244 response_hook = functools.partial(response_hook, span, environ) 245 246 try: 247 with trace.use_span(span): 248 start_response = self._create_start_response( 249 span, start_response, response_hook 250 ) 251 iterable = self.wsgi(environ, start_response) 252 return _end_span_after_iterating( 253 iterable, span, self.tracer, token 254 ) 255 except Exception as ex: 256 if span.is_recording(): 257 span.set_status(Status(StatusCode.ERROR, str(ex))) 258 span.end() 259 context.detach(token) 260 raise 261 262 263 # Put this in a subfunction to not delay the call to the wrapped 264 # WSGI application (instrumentation should change the application 265 # behavior as little as possible). 266 def _end_span_after_iterating(iterable, span, tracer, token): 267 try: 268 with trace.use_span(span): 269 for yielded in iterable: 270 yield yielded 271 finally: 272 close = getattr(iterable, "close", None) 273 if close: 274 close() 275 span.end() 276 context.detach(token) 277 278 279 # TODO: inherit from opentelemetry.instrumentation.propagators.Setter 280 281 282 class ResponsePropagationSetter: 283 def set(self, carrier, key, value): # pylint: disable=no-self-use 284 carrier.append((key, value)) 285 286 287 default_response_propagation_setter = ResponsePropagationSetter() 288 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py b/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py --- a/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py +++ b/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py @@ -13,7 +13,7 @@ # limitations under the License. """ This library provides a WSGI middleware that can be used on any WSGI framework -(such as Django / Flask) to track requests timing through OpenTelemetry. +(such as Django / Flask / Web.py) to track requests timing through OpenTelemetry. Usage (Flask) ------------- @@ -50,6 +50,35 @@ application = get_wsgi_application() application = OpenTelemetryMiddleware(application) +Usage (Web.py) +-------------- + +.. code-block:: python + + import web + from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware + from cheroot import wsgi + + urls = ('/', 'index') + + + class index: + + def GET(self): + return "Hello, world!" + + + if __name__ == "__main__": + app = web.application(urls, globals()) + func = app.wsgifunc() + + func = OpenTelemetryMiddleware(func) + + server = wsgi.WSGIServer( + ("localhost", 5100), func, server_name="localhost" + ) + server.start() + API --- """
{"golden_diff": "diff --git a/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py b/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py\n--- a/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py\n+++ b/instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py\n@@ -13,7 +13,7 @@\n # limitations under the License.\n \"\"\"\n This library provides a WSGI middleware that can be used on any WSGI framework\n-(such as Django / Flask) to track requests timing through OpenTelemetry.\n+(such as Django / Flask / Web.py) to track requests timing through OpenTelemetry.\n \n Usage (Flask)\n -------------\n@@ -50,6 +50,35 @@\n application = get_wsgi_application()\n application = OpenTelemetryMiddleware(application)\n \n+Usage (Web.py)\n+--------------\n+\n+.. code-block:: python\n+\n+ import web\n+ from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware\n+ from cheroot import wsgi\n+\n+ urls = ('/', 'index')\n+\n+\n+ class index:\n+\n+ def GET(self):\n+ return \"Hello, world!\"\n+\n+\n+ if __name__ == \"__main__\":\n+ app = web.application(urls, globals())\n+ func = app.wsgifunc()\n+\n+ func = OpenTelemetryMiddleware(func)\n+\n+ server = wsgi.WSGIServer(\n+ (\"localhost\", 5100), func, server_name=\"localhost\"\n+ )\n+ server.start()\n+\n API\n ---\n \"\"\"\n", "issue": "Compatible with web.py?\nIs it possible to use opentelemetry-insrumentation-wsgi with web.py framework? If yes, some examples would be appreciated.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis library provides a WSGI middleware that can be used on any WSGI framework\n(such as Django / Flask) to track requests timing through OpenTelemetry.\n\nUsage (Flask)\n-------------\n\n.. code-block:: python\n\n from flask import Flask\n from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware\n\n app = Flask(__name__)\n app.wsgi_app = OpenTelemetryMiddleware(app.wsgi_app)\n\n @app.route(\"/\")\n def hello():\n return \"Hello!\"\n\n if __name__ == \"__main__\":\n app.run(debug=True)\n\n\nUsage (Django)\n--------------\n\nModify the application's ``wsgi.py`` file as shown below.\n\n.. code-block:: python\n\n import os\n from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware\n from django.core.wsgi import get_wsgi_application\n\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'application.settings')\n\n application = get_wsgi_application()\n application = OpenTelemetryMiddleware(application)\n\nAPI\n---\n\"\"\"\n\nimport functools\nimport typing\nimport wsgiref.util as wsgiref_util\n\nfrom opentelemetry import context, trace\nfrom opentelemetry.instrumentation.utils import http_status_to_status_code\nfrom opentelemetry.instrumentation.wsgi.version import __version__\nfrom opentelemetry.propagate import extract\nfrom opentelemetry.propagators.textmap import Getter\nfrom opentelemetry.semconv.trace import SpanAttributes\nfrom opentelemetry.trace.status import Status, StatusCode\nfrom opentelemetry.util.http import remove_url_credentials\n\n_HTTP_VERSION_PREFIX = \"HTTP/\"\n_CARRIER_KEY_PREFIX = \"HTTP_\"\n_CARRIER_KEY_PREFIX_LEN = len(_CARRIER_KEY_PREFIX)\n\n\nclass WSGIGetter(Getter):\n def get(\n self, carrier: dict, key: str\n ) -> typing.Optional[typing.List[str]]:\n \"\"\"Getter implementation to retrieve a HTTP header value from the\n PEP3333-conforming WSGI environ\n\n Args:\n carrier: WSGI environ object\n key: header name in environ object\n Returns:\n A list with a single string with the header value if it exists,\n else None.\n \"\"\"\n environ_key = \"HTTP_\" + key.upper().replace(\"-\", \"_\")\n value = carrier.get(environ_key)\n if value is not None:\n return [value]\n return None\n\n def keys(self, carrier):\n return [\n key[_CARRIER_KEY_PREFIX_LEN:].lower().replace(\"_\", \"-\")\n for key in carrier\n if key.startswith(_CARRIER_KEY_PREFIX)\n ]\n\n\nwsgi_getter = WSGIGetter()\n\n\ndef setifnotnone(dic, key, value):\n if value is not None:\n dic[key] = value\n\n\ndef collect_request_attributes(environ):\n \"\"\"Collects HTTP request attributes from the PEP3333-conforming\n WSGI environ and returns a dictionary to be used as span creation attributes.\"\"\"\n\n result = {\n SpanAttributes.HTTP_METHOD: environ.get(\"REQUEST_METHOD\"),\n SpanAttributes.HTTP_SERVER_NAME: environ.get(\"SERVER_NAME\"),\n SpanAttributes.HTTP_SCHEME: environ.get(\"wsgi.url_scheme\"),\n }\n\n host_port = environ.get(\"SERVER_PORT\")\n if host_port is not None and not host_port == \"\":\n result.update({SpanAttributes.NET_HOST_PORT: int(host_port)})\n\n setifnotnone(result, SpanAttributes.HTTP_HOST, environ.get(\"HTTP_HOST\"))\n target = environ.get(\"RAW_URI\")\n if target is None: # Note: `\"\" or None is None`\n target = environ.get(\"REQUEST_URI\")\n if target is not None:\n result[SpanAttributes.HTTP_TARGET] = target\n else:\n result[SpanAttributes.HTTP_URL] = remove_url_credentials(\n wsgiref_util.request_uri(environ)\n )\n\n remote_addr = environ.get(\"REMOTE_ADDR\")\n if remote_addr:\n result[SpanAttributes.NET_PEER_IP] = remote_addr\n remote_host = environ.get(\"REMOTE_HOST\")\n if remote_host and remote_host != remote_addr:\n result[SpanAttributes.NET_PEER_NAME] = remote_host\n\n user_agent = environ.get(\"HTTP_USER_AGENT\")\n if user_agent is not None and len(user_agent) > 0:\n result[SpanAttributes.HTTP_USER_AGENT] = user_agent\n\n setifnotnone(\n result, SpanAttributes.NET_PEER_PORT, environ.get(\"REMOTE_PORT\")\n )\n flavor = environ.get(\"SERVER_PROTOCOL\", \"\")\n if flavor.upper().startswith(_HTTP_VERSION_PREFIX):\n flavor = flavor[len(_HTTP_VERSION_PREFIX) :]\n if flavor:\n result[SpanAttributes.HTTP_FLAVOR] = flavor\n\n return result\n\n\ndef add_response_attributes(\n span, start_response_status, response_headers\n): # pylint: disable=unused-argument\n \"\"\"Adds HTTP response attributes to span using the arguments\n passed to a PEP3333-conforming start_response callable.\"\"\"\n if not span.is_recording():\n return\n status_code, _ = start_response_status.split(\" \", 1)\n\n try:\n status_code = int(status_code)\n except ValueError:\n span.set_status(\n Status(\n StatusCode.ERROR,\n \"Non-integer HTTP status: \" + repr(status_code),\n )\n )\n else:\n span.set_attribute(SpanAttributes.HTTP_STATUS_CODE, status_code)\n span.set_status(Status(http_status_to_status_code(status_code)))\n\n\ndef get_default_span_name(environ):\n \"\"\"Default implementation for name_callback, returns HTTP {METHOD_NAME}.\"\"\"\n return \"HTTP {}\".format(environ.get(\"REQUEST_METHOD\", \"\")).strip()\n\n\nclass OpenTelemetryMiddleware:\n \"\"\"The WSGI application middleware.\n\n This class is a PEP 3333 conforming WSGI middleware that starts and\n annotates spans for any requests it is invoked with.\n\n Args:\n wsgi: The WSGI application callable to forward requests to.\n request_hook: Optional callback which is called with the server span and WSGI\n environ object for every incoming request.\n response_hook: Optional callback which is called with the server span,\n WSGI environ, status_code and response_headers for every\n incoming request.\n tracer_provider: Optional tracer provider to use. If omitted the current\n globally configured one is used.\n \"\"\"\n\n def __init__(\n self, wsgi, request_hook=None, response_hook=None, tracer_provider=None\n ):\n self.wsgi = wsgi\n self.tracer = trace.get_tracer(__name__, __version__, tracer_provider)\n self.request_hook = request_hook\n self.response_hook = response_hook\n\n @staticmethod\n def _create_start_response(span, start_response, response_hook):\n @functools.wraps(start_response)\n def _start_response(status, response_headers, *args, **kwargs):\n add_response_attributes(span, status, response_headers)\n if response_hook:\n response_hook(status, response_headers)\n return start_response(status, response_headers, *args, **kwargs)\n\n return _start_response\n\n def __call__(self, environ, start_response):\n \"\"\"The WSGI application\n\n Args:\n environ: A WSGI environment.\n start_response: The WSGI start_response callable.\n \"\"\"\n\n token = context.attach(extract(environ, getter=wsgi_getter))\n\n span = self.tracer.start_span(\n get_default_span_name(environ),\n kind=trace.SpanKind.SERVER,\n attributes=collect_request_attributes(environ),\n )\n\n if self.request_hook:\n self.request_hook(span, environ)\n\n response_hook = self.response_hook\n if response_hook:\n response_hook = functools.partial(response_hook, span, environ)\n\n try:\n with trace.use_span(span):\n start_response = self._create_start_response(\n span, start_response, response_hook\n )\n iterable = self.wsgi(environ, start_response)\n return _end_span_after_iterating(\n iterable, span, self.tracer, token\n )\n except Exception as ex:\n if span.is_recording():\n span.set_status(Status(StatusCode.ERROR, str(ex)))\n span.end()\n context.detach(token)\n raise\n\n\n# Put this in a subfunction to not delay the call to the wrapped\n# WSGI application (instrumentation should change the application\n# behavior as little as possible).\ndef _end_span_after_iterating(iterable, span, tracer, token):\n try:\n with trace.use_span(span):\n for yielded in iterable:\n yield yielded\n finally:\n close = getattr(iterable, \"close\", None)\n if close:\n close()\n span.end()\n context.detach(token)\n\n\n# TODO: inherit from opentelemetry.instrumentation.propagators.Setter\n\n\nclass ResponsePropagationSetter:\n def set(self, carrier, key, value): # pylint: disable=no-self-use\n carrier.append((key, value))\n\n\ndefault_response_propagation_setter = ResponsePropagationSetter()\n", "path": "instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis library provides a WSGI middleware that can be used on any WSGI framework\n(such as Django / Flask / Web.py) to track requests timing through OpenTelemetry.\n\nUsage (Flask)\n-------------\n\n.. code-block:: python\n\n from flask import Flask\n from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware\n\n app = Flask(__name__)\n app.wsgi_app = OpenTelemetryMiddleware(app.wsgi_app)\n\n @app.route(\"/\")\n def hello():\n return \"Hello!\"\n\n if __name__ == \"__main__\":\n app.run(debug=True)\n\n\nUsage (Django)\n--------------\n\nModify the application's ``wsgi.py`` file as shown below.\n\n.. code-block:: python\n\n import os\n from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware\n from django.core.wsgi import get_wsgi_application\n\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'application.settings')\n\n application = get_wsgi_application()\n application = OpenTelemetryMiddleware(application)\n\nUsage (Web.py)\n--------------\n\n.. code-block:: python\n\n import web\n from opentelemetry.instrumentation.wsgi import OpenTelemetryMiddleware\n from cheroot import wsgi\n\n urls = ('/', 'index')\n\n\n class index:\n\n def GET(self):\n return \"Hello, world!\"\n\n\n if __name__ == \"__main__\":\n app = web.application(urls, globals())\n func = app.wsgifunc()\n\n func = OpenTelemetryMiddleware(func)\n\n server = wsgi.WSGIServer(\n (\"localhost\", 5100), func, server_name=\"localhost\"\n )\n server.start()\n\nAPI\n---\n\"\"\"\n\nimport functools\nimport typing\nimport wsgiref.util as wsgiref_util\n\nfrom opentelemetry import context, trace\nfrom opentelemetry.instrumentation.utils import http_status_to_status_code\nfrom opentelemetry.instrumentation.wsgi.version import __version__\nfrom opentelemetry.propagate import extract\nfrom opentelemetry.propagators.textmap import Getter\nfrom opentelemetry.semconv.trace import SpanAttributes\nfrom opentelemetry.trace.status import Status, StatusCode\nfrom opentelemetry.util.http import remove_url_credentials\n\n_HTTP_VERSION_PREFIX = \"HTTP/\"\n_CARRIER_KEY_PREFIX = \"HTTP_\"\n_CARRIER_KEY_PREFIX_LEN = len(_CARRIER_KEY_PREFIX)\n\n\nclass WSGIGetter(Getter):\n def get(\n self, carrier: dict, key: str\n ) -> typing.Optional[typing.List[str]]:\n \"\"\"Getter implementation to retrieve a HTTP header value from the\n PEP3333-conforming WSGI environ\n\n Args:\n carrier: WSGI environ object\n key: header name in environ object\n Returns:\n A list with a single string with the header value if it exists,\n else None.\n \"\"\"\n environ_key = \"HTTP_\" + key.upper().replace(\"-\", \"_\")\n value = carrier.get(environ_key)\n if value is not None:\n return [value]\n return None\n\n def keys(self, carrier):\n return [\n key[_CARRIER_KEY_PREFIX_LEN:].lower().replace(\"_\", \"-\")\n for key in carrier\n if key.startswith(_CARRIER_KEY_PREFIX)\n ]\n\n\nwsgi_getter = WSGIGetter()\n\n\ndef setifnotnone(dic, key, value):\n if value is not None:\n dic[key] = value\n\n\ndef collect_request_attributes(environ):\n \"\"\"Collects HTTP request attributes from the PEP3333-conforming\n WSGI environ and returns a dictionary to be used as span creation attributes.\"\"\"\n\n result = {\n SpanAttributes.HTTP_METHOD: environ.get(\"REQUEST_METHOD\"),\n SpanAttributes.HTTP_SERVER_NAME: environ.get(\"SERVER_NAME\"),\n SpanAttributes.HTTP_SCHEME: environ.get(\"wsgi.url_scheme\"),\n }\n\n host_port = environ.get(\"SERVER_PORT\")\n if host_port is not None and not host_port == \"\":\n result.update({SpanAttributes.NET_HOST_PORT: int(host_port)})\n\n setifnotnone(result, SpanAttributes.HTTP_HOST, environ.get(\"HTTP_HOST\"))\n target = environ.get(\"RAW_URI\")\n if target is None: # Note: `\"\" or None is None`\n target = environ.get(\"REQUEST_URI\")\n if target is not None:\n result[SpanAttributes.HTTP_TARGET] = target\n else:\n result[SpanAttributes.HTTP_URL] = remove_url_credentials(\n wsgiref_util.request_uri(environ)\n )\n\n remote_addr = environ.get(\"REMOTE_ADDR\")\n if remote_addr:\n result[SpanAttributes.NET_PEER_IP] = remote_addr\n remote_host = environ.get(\"REMOTE_HOST\")\n if remote_host and remote_host != remote_addr:\n result[SpanAttributes.NET_PEER_NAME] = remote_host\n\n user_agent = environ.get(\"HTTP_USER_AGENT\")\n if user_agent is not None and len(user_agent) > 0:\n result[SpanAttributes.HTTP_USER_AGENT] = user_agent\n\n setifnotnone(\n result, SpanAttributes.NET_PEER_PORT, environ.get(\"REMOTE_PORT\")\n )\n flavor = environ.get(\"SERVER_PROTOCOL\", \"\")\n if flavor.upper().startswith(_HTTP_VERSION_PREFIX):\n flavor = flavor[len(_HTTP_VERSION_PREFIX) :]\n if flavor:\n result[SpanAttributes.HTTP_FLAVOR] = flavor\n\n return result\n\n\ndef add_response_attributes(\n span, start_response_status, response_headers\n): # pylint: disable=unused-argument\n \"\"\"Adds HTTP response attributes to span using the arguments\n passed to a PEP3333-conforming start_response callable.\"\"\"\n if not span.is_recording():\n return\n status_code, _ = start_response_status.split(\" \", 1)\n\n try:\n status_code = int(status_code)\n except ValueError:\n span.set_status(\n Status(\n StatusCode.ERROR,\n \"Non-integer HTTP status: \" + repr(status_code),\n )\n )\n else:\n span.set_attribute(SpanAttributes.HTTP_STATUS_CODE, status_code)\n span.set_status(Status(http_status_to_status_code(status_code)))\n\n\ndef get_default_span_name(environ):\n \"\"\"Default implementation for name_callback, returns HTTP {METHOD_NAME}.\"\"\"\n return \"HTTP {}\".format(environ.get(\"REQUEST_METHOD\", \"\")).strip()\n\n\nclass OpenTelemetryMiddleware:\n \"\"\"The WSGI application middleware.\n\n This class is a PEP 3333 conforming WSGI middleware that starts and\n annotates spans for any requests it is invoked with.\n\n Args:\n wsgi: The WSGI application callable to forward requests to.\n request_hook: Optional callback which is called with the server span and WSGI\n environ object for every incoming request.\n response_hook: Optional callback which is called with the server span,\n WSGI environ, status_code and response_headers for every\n incoming request.\n tracer_provider: Optional tracer provider to use. If omitted the current\n globally configured one is used.\n \"\"\"\n\n def __init__(\n self, wsgi, request_hook=None, response_hook=None, tracer_provider=None\n ):\n self.wsgi = wsgi\n self.tracer = trace.get_tracer(__name__, __version__, tracer_provider)\n self.request_hook = request_hook\n self.response_hook = response_hook\n\n @staticmethod\n def _create_start_response(span, start_response, response_hook):\n @functools.wraps(start_response)\n def _start_response(status, response_headers, *args, **kwargs):\n add_response_attributes(span, status, response_headers)\n if response_hook:\n response_hook(status, response_headers)\n return start_response(status, response_headers, *args, **kwargs)\n\n return _start_response\n\n def __call__(self, environ, start_response):\n \"\"\"The WSGI application\n\n Args:\n environ: A WSGI environment.\n start_response: The WSGI start_response callable.\n \"\"\"\n\n token = context.attach(extract(environ, getter=wsgi_getter))\n\n span = self.tracer.start_span(\n get_default_span_name(environ),\n kind=trace.SpanKind.SERVER,\n attributes=collect_request_attributes(environ),\n )\n\n if self.request_hook:\n self.request_hook(span, environ)\n\n response_hook = self.response_hook\n if response_hook:\n response_hook = functools.partial(response_hook, span, environ)\n\n try:\n with trace.use_span(span):\n start_response = self._create_start_response(\n span, start_response, response_hook\n )\n iterable = self.wsgi(environ, start_response)\n return _end_span_after_iterating(\n iterable, span, self.tracer, token\n )\n except Exception as ex:\n if span.is_recording():\n span.set_status(Status(StatusCode.ERROR, str(ex)))\n span.end()\n context.detach(token)\n raise\n\n\n# Put this in a subfunction to not delay the call to the wrapped\n# WSGI application (instrumentation should change the application\n# behavior as little as possible).\ndef _end_span_after_iterating(iterable, span, tracer, token):\n try:\n with trace.use_span(span):\n for yielded in iterable:\n yield yielded\n finally:\n close = getattr(iterable, \"close\", None)\n if close:\n close()\n span.end()\n context.detach(token)\n\n\n# TODO: inherit from opentelemetry.instrumentation.propagators.Setter\n\n\nclass ResponsePropagationSetter:\n def set(self, carrier, key, value): # pylint: disable=no-self-use\n carrier.append((key, value))\n\n\ndefault_response_propagation_setter = ResponsePropagationSetter()\n", "path": "instrumentation/opentelemetry-instrumentation-wsgi/src/opentelemetry/instrumentation/wsgi/__init__.py"}]}
3,139
385
gh_patches_debug_11812
rasdani/github-patches
git_diff
craiga__will-of-the-prophets-196
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Visiting /roll raises an error if no rolls exist ``` Environment: Request Method: GET Request URL: http://localhost:8000/roll/ Django Version: 2.2 Python Version: 3.7.3 Installed Applications: ['raven.contrib.django.raven_compat', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'sass_processor', 'widget_tweaks', 'tz_detect', 'debug_toolbar', 's3direct', 'bootstrap', 'will_of_the_prophets'] Installed Middleware: ('raven.contrib.django.middleware.DjangoRestFrameworkCompatMiddleware', 'raven.contrib.django.middleware.SentryMiddleware', 'whitenoise.middleware.WhiteNoiseMiddleware', 'raven.contrib.django.raven_compat.middleware.Sentry404CatchMiddleware', 'debug_toolbar.middleware.DebugToolbarMiddleware', 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'tz_detect.middleware.TimezoneMiddleware') Traceback: File "/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/core/handlers/exception.py" in inner 34. response = get_response(request) File "/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/core/handlers/base.py" in _get_response 115. response = self.process_exception_by_middleware(e, request) File "/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/core/handlers/base.py" in _get_response 113. response = wrapped_callback(request, *callback_args, **callback_kwargs) File "/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/views/generic/base.py" in view 71. return self.dispatch(request, *args, **kwargs) File "/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/contrib/auth/mixins.py" in dispatch 52. return super().dispatch(request, *args, **kwargs) File "/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/views/generic/base.py" in dispatch 97. return handler(request, *args, **kwargs) File "/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/views/generic/edit.py" in get 168. return super().get(request, *args, **kwargs) File "/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/views/generic/edit.py" in get 133. return self.render_to_response(self.get_context_data()) File "/Users/craiga/will_of_the_prophets/will_of_the_prophets/views.py" in get_context_data 82. board=board.Board(now=last_roll.embargo), Exception Type: AttributeError at /roll/ Exception Value: 'NoneType' object has no attribute 'embargo' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `will_of_the_prophets/views.py` Content: ``` 1 """Views.""" 2 3 from django.conf import settings 4 from django.contrib.auth.mixins import LoginRequiredMixin 5 from django.shortcuts import render 6 from django.urls import reverse 7 from django.utils import timezone 8 from django.views.decorators.cache import cache_control 9 from django.views.decorators.clickjacking import xframe_options_exempt 10 from django.views.decorators.http import condition 11 from django.views.generic.edit import CreateView 12 13 from will_of_the_prophets import board, forms, models 14 15 16 def get_last_modified(request): 17 """Get board's last modified datetime.""" 18 try: 19 return ( 20 models.Roll.objects.filter(embargo__lte=timezone.now()) 21 .latest("embargo") 22 .embargo 23 ) 24 except models.Roll.DoesNotExist: 25 return None 26 27 28 @xframe_options_exempt 29 @condition(last_modified_func=get_last_modified) 30 @cache_control(max_age=3600) 31 def public_board(request): 32 """ 33 Board for the public. 34 35 Does not take embargoed rolls into account. 36 """ 37 response = render( 38 request, 39 "will_of_the_prophets/public_board.html", 40 { 41 "board": board.Board(), 42 "special_square_types": models.SpecialSquareType.objects.all(), 43 }, 44 ) 45 46 canonical_url = settings.PUBLIC_BOARD_CANONICAL_URL 47 if canonical_url: 48 response["Link"] = f'<{canonical_url}>; rel="canonical"' 49 50 return response 51 52 53 @xframe_options_exempt 54 @condition(last_modified_func=get_last_modified) 55 @cache_control(max_age=3600) 56 def roll_frequency(request): 57 """ 58 Show roll frequency. 59 """ 60 roll_count = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0} 61 for roll in board.Board().rolls: 62 roll_count[roll] += 1 63 64 return render( 65 request, 66 "will_of_the_prophets/roll_frequency.html", 67 {"roll_frequency": roll_count}, 68 ) 69 70 71 class RollView(LoginRequiredMixin, CreateView): 72 """View for rolling the die.""" 73 74 form_class = forms.RollForm 75 template_name = "will_of_the_prophets/roll.html" 76 77 def get_context_data(self, **kwargs): 78 last_roll = models.Roll.objects.order_by("-embargo").first() 79 return super().get_context_data( 80 **kwargs, 81 last_roll=last_roll, 82 board=board.Board(now=last_roll.embargo), 83 special_square_types=models.SpecialSquareType.objects.all(), 84 ) 85 86 def get_success_url(self): 87 return reverse("roll") + "#chula" 88 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/will_of_the_prophets/views.py b/will_of_the_prophets/views.py --- a/will_of_the_prophets/views.py +++ b/will_of_the_prophets/views.py @@ -76,10 +76,14 @@ def get_context_data(self, **kwargs): last_roll = models.Roll.objects.order_by("-embargo").first() + last_roll_embargo = None + if last_roll: + last_roll_embargo = last_roll.embargo + return super().get_context_data( **kwargs, last_roll=last_roll, - board=board.Board(now=last_roll.embargo), + board=board.Board(now=last_roll_embargo), special_square_types=models.SpecialSquareType.objects.all(), )
{"golden_diff": "diff --git a/will_of_the_prophets/views.py b/will_of_the_prophets/views.py\n--- a/will_of_the_prophets/views.py\n+++ b/will_of_the_prophets/views.py\n@@ -76,10 +76,14 @@\n \n def get_context_data(self, **kwargs):\n last_roll = models.Roll.objects.order_by(\"-embargo\").first()\n+ last_roll_embargo = None\n+ if last_roll:\n+ last_roll_embargo = last_roll.embargo\n+\n return super().get_context_data(\n **kwargs,\n last_roll=last_roll,\n- board=board.Board(now=last_roll.embargo),\n+ board=board.Board(now=last_roll_embargo),\n special_square_types=models.SpecialSquareType.objects.all(),\n )\n", "issue": "Visiting /roll raises an error if no rolls exist\n```\r\nEnvironment:\r\n\r\n\r\nRequest Method: GET\r\nRequest URL: http://localhost:8000/roll/\r\n\r\nDjango Version: 2.2\r\nPython Version: 3.7.3\r\nInstalled Applications:\r\n['raven.contrib.django.raven_compat',\r\n 'django.contrib.admin',\r\n 'django.contrib.auth',\r\n 'django.contrib.contenttypes',\r\n 'django.contrib.sessions',\r\n 'django.contrib.messages',\r\n 'django.contrib.staticfiles',\r\n 'sass_processor',\r\n 'widget_tweaks',\r\n 'tz_detect',\r\n 'debug_toolbar',\r\n 's3direct',\r\n 'bootstrap',\r\n 'will_of_the_prophets']\r\nInstalled Middleware:\r\n('raven.contrib.django.middleware.DjangoRestFrameworkCompatMiddleware',\r\n 'raven.contrib.django.middleware.SentryMiddleware',\r\n 'whitenoise.middleware.WhiteNoiseMiddleware',\r\n 'raven.contrib.django.raven_compat.middleware.Sentry404CatchMiddleware',\r\n 'debug_toolbar.middleware.DebugToolbarMiddleware',\r\n 'django.middleware.security.SecurityMiddleware',\r\n 'django.contrib.sessions.middleware.SessionMiddleware',\r\n 'django.middleware.common.CommonMiddleware',\r\n 'django.middleware.csrf.CsrfViewMiddleware',\r\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\r\n 'django.contrib.messages.middleware.MessageMiddleware',\r\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\r\n 'tz_detect.middleware.TimezoneMiddleware')\r\n\r\n\r\n\r\nTraceback:\r\n\r\nFile \"/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/core/handlers/exception.py\" in inner\r\n 34. response = get_response(request)\r\n\r\nFile \"/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/core/handlers/base.py\" in _get_response\r\n 115. response = self.process_exception_by_middleware(e, request)\r\n\r\nFile \"/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/core/handlers/base.py\" in _get_response\r\n 113. response = wrapped_callback(request, *callback_args, **callback_kwargs)\r\n\r\nFile \"/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/views/generic/base.py\" in view\r\n 71. return self.dispatch(request, *args, **kwargs)\r\n\r\nFile \"/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/contrib/auth/mixins.py\" in dispatch\r\n 52. return super().dispatch(request, *args, **kwargs)\r\n\r\nFile \"/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/views/generic/base.py\" in dispatch\r\n 97. return handler(request, *args, **kwargs)\r\n\r\nFile \"/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/views/generic/edit.py\" in get\r\n 168. return super().get(request, *args, **kwargs)\r\n\r\nFile \"/Users/craiga/.ve/will_of_the_prophets-4-oJArpS/lib/python3.7/site-packages/django/views/generic/edit.py\" in get\r\n 133. return self.render_to_response(self.get_context_data())\r\n\r\nFile \"/Users/craiga/will_of_the_prophets/will_of_the_prophets/views.py\" in get_context_data\r\n 82. board=board.Board(now=last_roll.embargo),\r\n\r\nException Type: AttributeError at /roll/\r\nException Value: 'NoneType' object has no attribute 'embargo'\r\n```\n", "before_files": [{"content": "\"\"\"Views.\"\"\"\n\nfrom django.conf import settings\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.views.decorators.cache import cache_control\nfrom django.views.decorators.clickjacking import xframe_options_exempt\nfrom django.views.decorators.http import condition\nfrom django.views.generic.edit import CreateView\n\nfrom will_of_the_prophets import board, forms, models\n\n\ndef get_last_modified(request):\n \"\"\"Get board's last modified datetime.\"\"\"\n try:\n return (\n models.Roll.objects.filter(embargo__lte=timezone.now())\n .latest(\"embargo\")\n .embargo\n )\n except models.Roll.DoesNotExist:\n return None\n\n\n@xframe_options_exempt\n@condition(last_modified_func=get_last_modified)\n@cache_control(max_age=3600)\ndef public_board(request):\n \"\"\"\n Board for the public.\n\n Does not take embargoed rolls into account.\n \"\"\"\n response = render(\n request,\n \"will_of_the_prophets/public_board.html\",\n {\n \"board\": board.Board(),\n \"special_square_types\": models.SpecialSquareType.objects.all(),\n },\n )\n\n canonical_url = settings.PUBLIC_BOARD_CANONICAL_URL\n if canonical_url:\n response[\"Link\"] = f'<{canonical_url}>; rel=\"canonical\"'\n\n return response\n\n\n@xframe_options_exempt\n@condition(last_modified_func=get_last_modified)\n@cache_control(max_age=3600)\ndef roll_frequency(request):\n \"\"\"\n Show roll frequency.\n \"\"\"\n roll_count = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}\n for roll in board.Board().rolls:\n roll_count[roll] += 1\n\n return render(\n request,\n \"will_of_the_prophets/roll_frequency.html\",\n {\"roll_frequency\": roll_count},\n )\n\n\nclass RollView(LoginRequiredMixin, CreateView):\n \"\"\"View for rolling the die.\"\"\"\n\n form_class = forms.RollForm\n template_name = \"will_of_the_prophets/roll.html\"\n\n def get_context_data(self, **kwargs):\n last_roll = models.Roll.objects.order_by(\"-embargo\").first()\n return super().get_context_data(\n **kwargs,\n last_roll=last_roll,\n board=board.Board(now=last_roll.embargo),\n special_square_types=models.SpecialSquareType.objects.all(),\n )\n\n def get_success_url(self):\n return reverse(\"roll\") + \"#chula\"\n", "path": "will_of_the_prophets/views.py"}], "after_files": [{"content": "\"\"\"Views.\"\"\"\n\nfrom django.conf import settings\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom django.utils import timezone\nfrom django.views.decorators.cache import cache_control\nfrom django.views.decorators.clickjacking import xframe_options_exempt\nfrom django.views.decorators.http import condition\nfrom django.views.generic.edit import CreateView\n\nfrom will_of_the_prophets import board, forms, models\n\n\ndef get_last_modified(request):\n \"\"\"Get board's last modified datetime.\"\"\"\n try:\n return (\n models.Roll.objects.filter(embargo__lte=timezone.now())\n .latest(\"embargo\")\n .embargo\n )\n except models.Roll.DoesNotExist:\n return None\n\n\n@xframe_options_exempt\n@condition(last_modified_func=get_last_modified)\n@cache_control(max_age=3600)\ndef public_board(request):\n \"\"\"\n Board for the public.\n\n Does not take embargoed rolls into account.\n \"\"\"\n response = render(\n request,\n \"will_of_the_prophets/public_board.html\",\n {\n \"board\": board.Board(),\n \"special_square_types\": models.SpecialSquareType.objects.all(),\n },\n )\n\n canonical_url = settings.PUBLIC_BOARD_CANONICAL_URL\n if canonical_url:\n response[\"Link\"] = f'<{canonical_url}>; rel=\"canonical\"'\n\n return response\n\n\n@xframe_options_exempt\n@condition(last_modified_func=get_last_modified)\n@cache_control(max_age=3600)\ndef roll_frequency(request):\n \"\"\"\n Show roll frequency.\n \"\"\"\n roll_count = {1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0}\n for roll in board.Board().rolls:\n roll_count[roll] += 1\n\n return render(\n request,\n \"will_of_the_prophets/roll_frequency.html\",\n {\"roll_frequency\": roll_count},\n )\n\n\nclass RollView(LoginRequiredMixin, CreateView):\n \"\"\"View for rolling the die.\"\"\"\n\n form_class = forms.RollForm\n template_name = \"will_of_the_prophets/roll.html\"\n\n def get_context_data(self, **kwargs):\n last_roll = models.Roll.objects.order_by(\"-embargo\").first()\n last_roll_embargo = None\n if last_roll:\n last_roll_embargo = last_roll.embargo\n\n return super().get_context_data(\n **kwargs,\n last_roll=last_roll,\n board=board.Board(now=last_roll_embargo),\n special_square_types=models.SpecialSquareType.objects.all(),\n )\n\n def get_success_url(self):\n return reverse(\"roll\") + \"#chula\"\n", "path": "will_of_the_prophets/views.py"}]}
1,798
176
gh_patches_debug_27514
rasdani/github-patches
git_diff
TabbycatDebate__tabbycat-951
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Venue's Check-In Statuses page crashes The "Venue's Check-In Statuses" page (Check-Ins -> Rooms' status) is empty, apart from the title element. Opening up devtools shows this error: ![image](https://user-images.githubusercontent.com/10998201/49333465-3ada3780-f5b7-11e8-8108-506d81922565.png) Which points to this line: ![image](https://user-images.githubusercontent.com/10998201/49333471-57766f80-f5b7-11e8-9749-9fbd278a9d38.png) This is a fresh installation on our own DigitalOcean server, and every other page works as expected. Running TabbyCat 2.2.7 on Ubuntu 18.04 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `tabbycat/checkins/views.py` Content: ``` 1 import json 2 3 from asgiref.sync import async_to_sync 4 from channels.layers import get_channel_layer 5 from django.contrib import messages 6 from django.core.exceptions import ObjectDoesNotExist 7 from django.views.generic.base import TemplateView 8 from django.template.response import TemplateResponse 9 from django.utils.translation import gettext as _ 10 11 from actionlog.mixins import LogActionMixin 12 from actionlog.models import ActionLogEntry 13 from options.utils import use_team_code_names 14 from participants.models import Person, Speaker 15 from utils.misc import reverse_tournament 16 from utils.mixins import AdministratorMixin, AssistantMixin 17 from utils.views import PostOnlyRedirectView 18 from tournaments.mixins import PublicTournamentPageMixin, TournamentMixin 19 20 from .consumers import CheckInEventConsumer 21 from .models import PersonIdentifier, VenueIdentifier 22 from .utils import create_identifiers, get_unexpired_checkins 23 24 25 class CheckInPreScanView(TournamentMixin, TemplateView): 26 template_name = 'checkin_scan.html' 27 page_title = _('Scan Identifiers') 28 page_emoji = '📷' 29 30 def get_context_data(self, **kwargs): 31 kwargs["scan_url"] = self.tournament.slug + '/checkins/' 32 return super().get_context_data(**kwargs) 33 34 35 class AdminCheckInPreScanView(AdministratorMixin, CheckInPreScanView): 36 scan_view = 'admin-checkin-scan' 37 38 39 class AssistantCheckInPreScanView(AssistantMixin, CheckInPreScanView): 40 scan_view = 'assistant-checkin-scan' 41 42 43 class BaseCheckInStatusView(TournamentMixin, TemplateView): 44 template_name = 'checkin_status.html' 45 scan_view = False 46 47 def get_context_data(self, **kwargs): 48 events = get_unexpired_checkins(self.tournament, self.window_preference) 49 kwargs["events"] = json.dumps([e.serialize() for e in events]) 50 if self.scan_view: 51 kwargs["scan_url"] = self.tournament.slug + '/checkins/' 52 return super().get_context_data(**kwargs) 53 54 55 class CheckInPeopleStatusView(BaseCheckInStatusView): 56 page_emoji = '⌚️' 57 page_title = _("People's Check-In Statuses") 58 window_preference = 'checkin_window_people' 59 60 def get_context_data(self, **kwargs): 61 62 for_admin = True 63 if hasattr(self, '_user_role') and self._user_role == 'public': 64 for_admin = False 65 kwargs["for_admin"] = json.dumps(for_admin) 66 67 team_codes = use_team_code_names(self.tournament, admin=for_admin) 68 kwargs["team_codes"] = json.dumps(team_codes) 69 70 adjudicators = [] 71 for adj in self.tournament.relevant_adjudicators.all().select_related('institution', 'checkin_identifier'): 72 try: 73 code = adj.checkin_identifier.barcode 74 except ObjectDoesNotExist: 75 code = None 76 77 adjudicators.append({ 78 'id': adj.id, 'name': adj.name, 'type': 'Adjudicator', 79 'identifier': [code], 'locked': False, 'independent': adj.independent, 80 'institution': adj.institution.serialize if adj.institution else None, 81 }) 82 kwargs["adjudicators"] = json.dumps(adjudicators) 83 84 speakers = [] 85 for speaker in Speaker.objects.filter(team__tournament=self.tournament).select_related('team', 'team__institution', 'checkin_identifier'): 86 try: 87 code = speaker.checkin_identifier.barcode 88 except ObjectDoesNotExist: 89 code = None 90 91 speakers.append({ 92 'id': speaker.id, 'name': speaker.name, 'type': 'Speaker', 93 'identifier': [code], 'locked': False, 94 'team': speaker.team.code_name if team_codes else speaker.team.short_name, 95 'institution': speaker.team.institution.serialize if speaker.team.institution else None, 96 }) 97 kwargs["speakers"] = json.dumps(speakers) 98 99 return super().get_context_data(**kwargs) 100 101 102 class AdminCheckInPeopleStatusView(AdministratorMixin, CheckInPeopleStatusView): 103 scan_view = 'admin-checkin-scan' 104 105 106 class AssistantCheckInPeopleStatusView(AssistantMixin, CheckInPeopleStatusView): 107 scan_view = 'assistant-checkin-scan' 108 109 110 class PublicCheckInPeopleStatusView(PublicTournamentPageMixin, CheckInPeopleStatusView): 111 public_page_preference = 'public_checkins' 112 113 114 class CheckInVenuesStatusView(BaseCheckInStatusView): 115 page_emoji = '👜' 116 page_title = _("Venue's Check-In Statuses") 117 window_preference = 'checkin_window_venues' 118 119 def get_context_data(self, **kwargs): 120 venues = [] 121 for venue in self.tournament.relevant_venues.select_related('checkin_identifier').prefetch_related('venuecategory_set').all(): 122 item = venue.serialize() 123 item['locked'] = False 124 try: 125 item['identifier'] = [venue.checkin_identifier.barcode] 126 except ObjectDoesNotExist: 127 item['identifier'] = [None] 128 venues.append(item) 129 kwargs["venues"] = json.dumps(venues) 130 kwargs["team_codes"] = json.dumps(False) 131 132 return super().get_context_data(**kwargs) 133 134 135 class AdminCheckInVenuesStatusView(AdministratorMixin, CheckInVenuesStatusView): 136 scan_view = 'admin-checkin-scan' 137 138 139 class AssistantCheckInVenuesStatusView(AssistantMixin, CheckInVenuesStatusView): 140 scan_view = 'assistant-checkin-scan' 141 142 143 class SegregatedCheckinsMixin(TournamentMixin): 144 145 def t_speakers(self): 146 return Speaker.objects.filter( 147 team__tournament=self.tournament).values_list( 148 'person_ptr_id', flat=True) 149 150 def speakers_with_barcodes(self): 151 identifiers = PersonIdentifier.objects.all() 152 return identifiers.filter(person_id__in=self.t_speakers()) 153 154 def t_adjs(self): 155 return self.tournament.adjudicator_set.values_list( 156 'person_ptr_id', flat=True) 157 158 def adjs_with_barcodes(self): 159 identifiers = PersonIdentifier.objects.all() 160 return identifiers.filter(person_id__in=self.t_adjs()) 161 162 163 class CheckInIdentifiersView(SegregatedCheckinsMixin, TemplateView): 164 template_name = 'checkin_ids.html' 165 page_title = _('Make Identifiers') 166 page_emoji = '📛' 167 168 def get_context_data(self, **kwargs): 169 t = self.tournament 170 kwargs["check_in_info"] = { 171 "speakers": { 172 "title": _("Speakers"), 173 "total": self.t_speakers().count(), 174 "in": self.speakers_with_barcodes().count() 175 }, 176 "adjudicators": { 177 "title": _("Adjudicators"), 178 "total": self.t_adjs().count(), 179 "in": self.adjs_with_barcodes().count() 180 }, 181 "venues": { 182 "title": _("Venues"), 183 "total": t.venue_set.count(), 184 "in": VenueIdentifier.objects.filter(venue__tournament=t).count(), 185 } 186 } 187 return super().get_context_data(**kwargs) 188 189 190 class AdminCheckInIdentifiersView(AdministratorMixin, CheckInIdentifiersView): 191 pass 192 193 194 class AssistantCheckInIdentifiersView(AssistantMixin, CheckInIdentifiersView): 195 pass 196 197 198 class AdminCheckInGenerateView(AdministratorMixin, LogActionMixin, 199 TournamentMixin, PostOnlyRedirectView): 200 201 def get_action_log_type(self): 202 if self.kwargs["kind"] == "speakers": 203 return ActionLogEntry.ACTION_TYPE_CHECKIN_SPEAK_GENERATE 204 elif self.kwargs["kind"] == "adjudicators": 205 return ActionLogEntry.ACTION_TYPE_CHECKIN_ADJ_GENERATE 206 elif self.kwargs["kind"] == "venues": 207 return ActionLogEntry.ACTION_TYPE_CHECKIN_VENUES_GENERATE 208 209 # Providing tournament_slug_url_kwarg isn't working for some reason; so use: 210 def get_redirect_url(self, *args, **kwargs): 211 return reverse_tournament('admin-checkin-identifiers', self.tournament) 212 213 def post(self, request, *args, **kwargs): 214 t = self.tournament 215 216 if self.kwargs["kind"] == "speakers": 217 create_identifiers(PersonIdentifier, Speaker.objects.filter(team__tournament=t)) 218 elif self.kwargs["kind"] == "adjudicators": 219 create_identifiers(PersonIdentifier, t.adjudicator_set.all()) 220 elif self.kwargs["kind"] == "venues": 221 create_identifiers(VenueIdentifier, t.venue_set.all()) 222 223 messages.success(request, _("Generated identifiers for %s" % self.kwargs["kind"])) 224 self.log_action() # Need to call explicitly 225 return super().post(request, *args, **kwargs) 226 227 228 class CheckInPrintablesView(SegregatedCheckinsMixin, TemplateView): 229 template_name = 'checkin_printables.html' 230 page_title = _('Identifiers') 231 page_emoji = '📛' 232 233 def get_context_data(self, **kwargs): 234 if self.kwargs["kind"] == "speakers": 235 kwargs["identifiers"] = self.speakers_with_barcodes().order_by('person__name') 236 elif self.kwargs["kind"] == "adjudicators": 237 kwargs["identifiers"] = self.adjs_with_barcodes().order_by('person__name') 238 elif self.kwargs["kind"] == "venues": 239 venues = self.tournament.relevant_venues 240 kwargs["identifiers"] = VenueIdentifier.objects.filter(venue__in=venues) 241 242 return super().get_context_data(**kwargs) 243 244 245 class AdminCheckInPrintablesView(AdministratorMixin, CheckInPrintablesView): 246 pass 247 248 249 class AssistantCheckInPrintablesView(AssistantMixin, CheckInPrintablesView): 250 pass 251 252 253 class ParticipantCheckinView(PublicTournamentPageMixin, PostOnlyRedirectView): 254 255 public_page_preference = 'public_checkins_submit' 256 257 def post(self, request, *args, **kwargs): 258 t = self.tournament 259 260 action = request.POST['action'] 261 262 try: 263 person = Person.objects.get(url_key=kwargs['url_key']) 264 identifier = PersonIdentifier.objects.get(person=person) 265 except ObjectDoesNotExist: 266 messages.error(self.request, _("Could not check you in as you do not have an identifying code — your tab director may need to make you an identifier.")) 267 return super().post(request, *args, **kwargs) 268 269 checkins = get_unexpired_checkins(t, 'checkin_window_people') 270 existing_checkin = checkins.filter(identifier=identifier) 271 if action == 'revoke': 272 if existing_checkin.exists(): 273 messages.success(self.request, _("You have revoked your check-in.")) 274 else: 275 messages.error(self.request, _("Whoops! Looks like your check-in was already revoked.")) 276 elif action == 'checkin': 277 if existing_checkin.exists(): 278 messages.error(self.request, _("Whoops! Looks like you're already checked in.")) 279 else: 280 messages.success(self.request, _("You are now checked in.")) 281 else: 282 return TemplateResponse(request=self.request, template='400.html', status=400) 283 284 group_name = CheckInEventConsumer.group_prefix + "_" + t.slug 285 286 # Override permissions check - no user but authenticated through URL 287 async_to_sync(get_channel_layer().group_send)( 288 group_name, { 289 'type': 'broadcast_checkin', 290 'content': { 291 'barcodes': [identifier.barcode], 292 'status': action == 'checkin', 293 'type': 'people', 294 'component_id': None 295 } 296 } 297 ) 298 299 return super().post(request, *args, **kwargs) 300 301 def get_redirect_url(self, *args, **kwargs): 302 return reverse_tournament('privateurls-person-index', self.tournament, kwargs={'url_key': kwargs['url_key']}) 303 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/tabbycat/checkins/views.py b/tabbycat/checkins/views.py --- a/tabbycat/checkins/views.py +++ b/tabbycat/checkins/views.py @@ -43,8 +43,12 @@ class BaseCheckInStatusView(TournamentMixin, TemplateView): template_name = 'checkin_status.html' scan_view = False + for_admin = True def get_context_data(self, **kwargs): + + kwargs["for_admin"] = json.dumps(self.for_admin) + events = get_unexpired_checkins(self.tournament, self.window_preference) kwargs["events"] = json.dumps([e.serialize() for e in events]) if self.scan_view: @@ -59,12 +63,7 @@ def get_context_data(self, **kwargs): - for_admin = True - if hasattr(self, '_user_role') and self._user_role == 'public': - for_admin = False - kwargs["for_admin"] = json.dumps(for_admin) - - team_codes = use_team_code_names(self.tournament, admin=for_admin) + team_codes = use_team_code_names(self.tournament, admin=self.for_admin) kwargs["team_codes"] = json.dumps(team_codes) adjudicators = [] @@ -108,6 +107,7 @@ class PublicCheckInPeopleStatusView(PublicTournamentPageMixin, CheckInPeopleStatusView): + for_admin = False public_page_preference = 'public_checkins'
{"golden_diff": "diff --git a/tabbycat/checkins/views.py b/tabbycat/checkins/views.py\n--- a/tabbycat/checkins/views.py\n+++ b/tabbycat/checkins/views.py\n@@ -43,8 +43,12 @@\n class BaseCheckInStatusView(TournamentMixin, TemplateView):\n template_name = 'checkin_status.html'\n scan_view = False\n+ for_admin = True\n \n def get_context_data(self, **kwargs):\n+\n+ kwargs[\"for_admin\"] = json.dumps(self.for_admin)\n+\n events = get_unexpired_checkins(self.tournament, self.window_preference)\n kwargs[\"events\"] = json.dumps([e.serialize() for e in events])\n if self.scan_view:\n@@ -59,12 +63,7 @@\n \n def get_context_data(self, **kwargs):\n \n- for_admin = True\n- if hasattr(self, '_user_role') and self._user_role == 'public':\n- for_admin = False\n- kwargs[\"for_admin\"] = json.dumps(for_admin)\n-\n- team_codes = use_team_code_names(self.tournament, admin=for_admin)\n+ team_codes = use_team_code_names(self.tournament, admin=self.for_admin)\n kwargs[\"team_codes\"] = json.dumps(team_codes)\n \n adjudicators = []\n@@ -108,6 +107,7 @@\n \n \n class PublicCheckInPeopleStatusView(PublicTournamentPageMixin, CheckInPeopleStatusView):\n+ for_admin = False\n public_page_preference = 'public_checkins'\n", "issue": "Venue's Check-In Statuses page crashes\nThe \"Venue's Check-In Statuses\" page (Check-Ins -> Rooms' status) is empty, apart from the title element. Opening up devtools shows this error:\r\n\r\n![image](https://user-images.githubusercontent.com/10998201/49333465-3ada3780-f5b7-11e8-8108-506d81922565.png)\r\n\r\nWhich points to this line:\r\n\r\n![image](https://user-images.githubusercontent.com/10998201/49333471-57766f80-f5b7-11e8-9749-9fbd278a9d38.png)\r\n\r\nThis is a fresh installation on our own DigitalOcean server, and every other page works as expected. Running TabbyCat 2.2.7 on Ubuntu 18.04\n", "before_files": [{"content": "import json\n\nfrom asgiref.sync import async_to_sync\nfrom channels.layers import get_channel_layer\nfrom django.contrib import messages\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.views.generic.base import TemplateView\nfrom django.template.response import TemplateResponse\nfrom django.utils.translation import gettext as _\n\nfrom actionlog.mixins import LogActionMixin\nfrom actionlog.models import ActionLogEntry\nfrom options.utils import use_team_code_names\nfrom participants.models import Person, Speaker\nfrom utils.misc import reverse_tournament\nfrom utils.mixins import AdministratorMixin, AssistantMixin\nfrom utils.views import PostOnlyRedirectView\nfrom tournaments.mixins import PublicTournamentPageMixin, TournamentMixin\n\nfrom .consumers import CheckInEventConsumer\nfrom .models import PersonIdentifier, VenueIdentifier\nfrom .utils import create_identifiers, get_unexpired_checkins\n\n\nclass CheckInPreScanView(TournamentMixin, TemplateView):\n template_name = 'checkin_scan.html'\n page_title = _('Scan Identifiers')\n page_emoji = '\ud83d\udcf7'\n\n def get_context_data(self, **kwargs):\n kwargs[\"scan_url\"] = self.tournament.slug + '/checkins/'\n return super().get_context_data(**kwargs)\n\n\nclass AdminCheckInPreScanView(AdministratorMixin, CheckInPreScanView):\n scan_view = 'admin-checkin-scan'\n\n\nclass AssistantCheckInPreScanView(AssistantMixin, CheckInPreScanView):\n scan_view = 'assistant-checkin-scan'\n\n\nclass BaseCheckInStatusView(TournamentMixin, TemplateView):\n template_name = 'checkin_status.html'\n scan_view = False\n\n def get_context_data(self, **kwargs):\n events = get_unexpired_checkins(self.tournament, self.window_preference)\n kwargs[\"events\"] = json.dumps([e.serialize() for e in events])\n if self.scan_view:\n kwargs[\"scan_url\"] = self.tournament.slug + '/checkins/'\n return super().get_context_data(**kwargs)\n\n\nclass CheckInPeopleStatusView(BaseCheckInStatusView):\n page_emoji = '\u231a\ufe0f'\n page_title = _(\"People's Check-In Statuses\")\n window_preference = 'checkin_window_people'\n\n def get_context_data(self, **kwargs):\n\n for_admin = True\n if hasattr(self, '_user_role') and self._user_role == 'public':\n for_admin = False\n kwargs[\"for_admin\"] = json.dumps(for_admin)\n\n team_codes = use_team_code_names(self.tournament, admin=for_admin)\n kwargs[\"team_codes\"] = json.dumps(team_codes)\n\n adjudicators = []\n for adj in self.tournament.relevant_adjudicators.all().select_related('institution', 'checkin_identifier'):\n try:\n code = adj.checkin_identifier.barcode\n except ObjectDoesNotExist:\n code = None\n\n adjudicators.append({\n 'id': adj.id, 'name': adj.name, 'type': 'Adjudicator',\n 'identifier': [code], 'locked': False, 'independent': adj.independent,\n 'institution': adj.institution.serialize if adj.institution else None,\n })\n kwargs[\"adjudicators\"] = json.dumps(adjudicators)\n\n speakers = []\n for speaker in Speaker.objects.filter(team__tournament=self.tournament).select_related('team', 'team__institution', 'checkin_identifier'):\n try:\n code = speaker.checkin_identifier.barcode\n except ObjectDoesNotExist:\n code = None\n\n speakers.append({\n 'id': speaker.id, 'name': speaker.name, 'type': 'Speaker',\n 'identifier': [code], 'locked': False,\n 'team': speaker.team.code_name if team_codes else speaker.team.short_name,\n 'institution': speaker.team.institution.serialize if speaker.team.institution else None,\n })\n kwargs[\"speakers\"] = json.dumps(speakers)\n\n return super().get_context_data(**kwargs)\n\n\nclass AdminCheckInPeopleStatusView(AdministratorMixin, CheckInPeopleStatusView):\n scan_view = 'admin-checkin-scan'\n\n\nclass AssistantCheckInPeopleStatusView(AssistantMixin, CheckInPeopleStatusView):\n scan_view = 'assistant-checkin-scan'\n\n\nclass PublicCheckInPeopleStatusView(PublicTournamentPageMixin, CheckInPeopleStatusView):\n public_page_preference = 'public_checkins'\n\n\nclass CheckInVenuesStatusView(BaseCheckInStatusView):\n page_emoji = '\ud83d\udc5c'\n page_title = _(\"Venue's Check-In Statuses\")\n window_preference = 'checkin_window_venues'\n\n def get_context_data(self, **kwargs):\n venues = []\n for venue in self.tournament.relevant_venues.select_related('checkin_identifier').prefetch_related('venuecategory_set').all():\n item = venue.serialize()\n item['locked'] = False\n try:\n item['identifier'] = [venue.checkin_identifier.barcode]\n except ObjectDoesNotExist:\n item['identifier'] = [None]\n venues.append(item)\n kwargs[\"venues\"] = json.dumps(venues)\n kwargs[\"team_codes\"] = json.dumps(False)\n\n return super().get_context_data(**kwargs)\n\n\nclass AdminCheckInVenuesStatusView(AdministratorMixin, CheckInVenuesStatusView):\n scan_view = 'admin-checkin-scan'\n\n\nclass AssistantCheckInVenuesStatusView(AssistantMixin, CheckInVenuesStatusView):\n scan_view = 'assistant-checkin-scan'\n\n\nclass SegregatedCheckinsMixin(TournamentMixin):\n\n def t_speakers(self):\n return Speaker.objects.filter(\n team__tournament=self.tournament).values_list(\n 'person_ptr_id', flat=True)\n\n def speakers_with_barcodes(self):\n identifiers = PersonIdentifier.objects.all()\n return identifiers.filter(person_id__in=self.t_speakers())\n\n def t_adjs(self):\n return self.tournament.adjudicator_set.values_list(\n 'person_ptr_id', flat=True)\n\n def adjs_with_barcodes(self):\n identifiers = PersonIdentifier.objects.all()\n return identifiers.filter(person_id__in=self.t_adjs())\n\n\nclass CheckInIdentifiersView(SegregatedCheckinsMixin, TemplateView):\n template_name = 'checkin_ids.html'\n page_title = _('Make Identifiers')\n page_emoji = '\ud83d\udcdb'\n\n def get_context_data(self, **kwargs):\n t = self.tournament\n kwargs[\"check_in_info\"] = {\n \"speakers\": {\n \"title\": _(\"Speakers\"),\n \"total\": self.t_speakers().count(),\n \"in\": self.speakers_with_barcodes().count()\n },\n \"adjudicators\": {\n \"title\": _(\"Adjudicators\"),\n \"total\": self.t_adjs().count(),\n \"in\": self.adjs_with_barcodes().count()\n },\n \"venues\": {\n \"title\": _(\"Venues\"),\n \"total\": t.venue_set.count(),\n \"in\": VenueIdentifier.objects.filter(venue__tournament=t).count(),\n }\n }\n return super().get_context_data(**kwargs)\n\n\nclass AdminCheckInIdentifiersView(AdministratorMixin, CheckInIdentifiersView):\n pass\n\n\nclass AssistantCheckInIdentifiersView(AssistantMixin, CheckInIdentifiersView):\n pass\n\n\nclass AdminCheckInGenerateView(AdministratorMixin, LogActionMixin,\n TournamentMixin, PostOnlyRedirectView):\n\n def get_action_log_type(self):\n if self.kwargs[\"kind\"] == \"speakers\":\n return ActionLogEntry.ACTION_TYPE_CHECKIN_SPEAK_GENERATE\n elif self.kwargs[\"kind\"] == \"adjudicators\":\n return ActionLogEntry.ACTION_TYPE_CHECKIN_ADJ_GENERATE\n elif self.kwargs[\"kind\"] == \"venues\":\n return ActionLogEntry.ACTION_TYPE_CHECKIN_VENUES_GENERATE\n\n # Providing tournament_slug_url_kwarg isn't working for some reason; so use:\n def get_redirect_url(self, *args, **kwargs):\n return reverse_tournament('admin-checkin-identifiers', self.tournament)\n\n def post(self, request, *args, **kwargs):\n t = self.tournament\n\n if self.kwargs[\"kind\"] == \"speakers\":\n create_identifiers(PersonIdentifier, Speaker.objects.filter(team__tournament=t))\n elif self.kwargs[\"kind\"] == \"adjudicators\":\n create_identifiers(PersonIdentifier, t.adjudicator_set.all())\n elif self.kwargs[\"kind\"] == \"venues\":\n create_identifiers(VenueIdentifier, t.venue_set.all())\n\n messages.success(request, _(\"Generated identifiers for %s\" % self.kwargs[\"kind\"]))\n self.log_action() # Need to call explicitly\n return super().post(request, *args, **kwargs)\n\n\nclass CheckInPrintablesView(SegregatedCheckinsMixin, TemplateView):\n template_name = 'checkin_printables.html'\n page_title = _('Identifiers')\n page_emoji = '\ud83d\udcdb'\n\n def get_context_data(self, **kwargs):\n if self.kwargs[\"kind\"] == \"speakers\":\n kwargs[\"identifiers\"] = self.speakers_with_barcodes().order_by('person__name')\n elif self.kwargs[\"kind\"] == \"adjudicators\":\n kwargs[\"identifiers\"] = self.adjs_with_barcodes().order_by('person__name')\n elif self.kwargs[\"kind\"] == \"venues\":\n venues = self.tournament.relevant_venues\n kwargs[\"identifiers\"] = VenueIdentifier.objects.filter(venue__in=venues)\n\n return super().get_context_data(**kwargs)\n\n\nclass AdminCheckInPrintablesView(AdministratorMixin, CheckInPrintablesView):\n pass\n\n\nclass AssistantCheckInPrintablesView(AssistantMixin, CheckInPrintablesView):\n pass\n\n\nclass ParticipantCheckinView(PublicTournamentPageMixin, PostOnlyRedirectView):\n\n public_page_preference = 'public_checkins_submit'\n\n def post(self, request, *args, **kwargs):\n t = self.tournament\n\n action = request.POST['action']\n\n try:\n person = Person.objects.get(url_key=kwargs['url_key'])\n identifier = PersonIdentifier.objects.get(person=person)\n except ObjectDoesNotExist:\n messages.error(self.request, _(\"Could not check you in as you do not have an identifying code \u2014 your tab director may need to make you an identifier.\"))\n return super().post(request, *args, **kwargs)\n\n checkins = get_unexpired_checkins(t, 'checkin_window_people')\n existing_checkin = checkins.filter(identifier=identifier)\n if action == 'revoke':\n if existing_checkin.exists():\n messages.success(self.request, _(\"You have revoked your check-in.\"))\n else:\n messages.error(self.request, _(\"Whoops! Looks like your check-in was already revoked.\"))\n elif action == 'checkin':\n if existing_checkin.exists():\n messages.error(self.request, _(\"Whoops! Looks like you're already checked in.\"))\n else:\n messages.success(self.request, _(\"You are now checked in.\"))\n else:\n return TemplateResponse(request=self.request, template='400.html', status=400)\n\n group_name = CheckInEventConsumer.group_prefix + \"_\" + t.slug\n\n # Override permissions check - no user but authenticated through URL\n async_to_sync(get_channel_layer().group_send)(\n group_name, {\n 'type': 'broadcast_checkin',\n 'content': {\n 'barcodes': [identifier.barcode],\n 'status': action == 'checkin',\n 'type': 'people',\n 'component_id': None\n }\n }\n )\n\n return super().post(request, *args, **kwargs)\n\n def get_redirect_url(self, *args, **kwargs):\n return reverse_tournament('privateurls-person-index', self.tournament, kwargs={'url_key': kwargs['url_key']})\n", "path": "tabbycat/checkins/views.py"}], "after_files": [{"content": "import json\n\nfrom asgiref.sync import async_to_sync\nfrom channels.layers import get_channel_layer\nfrom django.contrib import messages\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.views.generic.base import TemplateView\nfrom django.template.response import TemplateResponse\nfrom django.utils.translation import gettext as _\n\nfrom actionlog.mixins import LogActionMixin\nfrom actionlog.models import ActionLogEntry\nfrom options.utils import use_team_code_names\nfrom participants.models import Person, Speaker\nfrom utils.misc import reverse_tournament\nfrom utils.mixins import AdministratorMixin, AssistantMixin\nfrom utils.views import PostOnlyRedirectView\nfrom tournaments.mixins import PublicTournamentPageMixin, TournamentMixin\n\nfrom .consumers import CheckInEventConsumer\nfrom .models import PersonIdentifier, VenueIdentifier\nfrom .utils import create_identifiers, get_unexpired_checkins\n\n\nclass CheckInPreScanView(TournamentMixin, TemplateView):\n template_name = 'checkin_scan.html'\n page_title = _('Scan Identifiers')\n page_emoji = '\ud83d\udcf7'\n\n def get_context_data(self, **kwargs):\n kwargs[\"scan_url\"] = self.tournament.slug + '/checkins/'\n return super().get_context_data(**kwargs)\n\n\nclass AdminCheckInPreScanView(AdministratorMixin, CheckInPreScanView):\n scan_view = 'admin-checkin-scan'\n\n\nclass AssistantCheckInPreScanView(AssistantMixin, CheckInPreScanView):\n scan_view = 'assistant-checkin-scan'\n\n\nclass BaseCheckInStatusView(TournamentMixin, TemplateView):\n template_name = 'checkin_status.html'\n scan_view = False\n for_admin = True\n\n def get_context_data(self, **kwargs):\n\n kwargs[\"for_admin\"] = json.dumps(self.for_admin)\n\n events = get_unexpired_checkins(self.tournament, self.window_preference)\n kwargs[\"events\"] = json.dumps([e.serialize() for e in events])\n if self.scan_view:\n kwargs[\"scan_url\"] = self.tournament.slug + '/checkins/'\n return super().get_context_data(**kwargs)\n\n\nclass CheckInPeopleStatusView(BaseCheckInStatusView):\n page_emoji = '\u231a\ufe0f'\n page_title = _(\"People's Check-In Statuses\")\n window_preference = 'checkin_window_people'\n\n def get_context_data(self, **kwargs):\n\n team_codes = use_team_code_names(self.tournament, admin=self.for_admin)\n kwargs[\"team_codes\"] = json.dumps(team_codes)\n\n adjudicators = []\n for adj in self.tournament.relevant_adjudicators.all().select_related('institution', 'checkin_identifier'):\n try:\n code = adj.checkin_identifier.barcode\n except ObjectDoesNotExist:\n code = None\n\n adjudicators.append({\n 'id': adj.id, 'name': adj.name, 'type': 'Adjudicator',\n 'identifier': [code], 'locked': False, 'independent': adj.independent,\n 'institution': adj.institution.serialize if adj.institution else None,\n })\n kwargs[\"adjudicators\"] = json.dumps(adjudicators)\n\n speakers = []\n for speaker in Speaker.objects.filter(team__tournament=self.tournament).select_related('team', 'team__institution', 'checkin_identifier'):\n try:\n code = speaker.checkin_identifier.barcode\n except ObjectDoesNotExist:\n code = None\n\n speakers.append({\n 'id': speaker.id, 'name': speaker.name, 'type': 'Speaker',\n 'identifier': [code], 'locked': False,\n 'team': speaker.team.code_name if team_codes else speaker.team.short_name,\n 'institution': speaker.team.institution.serialize if speaker.team.institution else None,\n })\n kwargs[\"speakers\"] = json.dumps(speakers)\n\n return super().get_context_data(**kwargs)\n\n\nclass AdminCheckInPeopleStatusView(AdministratorMixin, CheckInPeopleStatusView):\n scan_view = 'admin-checkin-scan'\n\n\nclass AssistantCheckInPeopleStatusView(AssistantMixin, CheckInPeopleStatusView):\n scan_view = 'assistant-checkin-scan'\n\n\nclass PublicCheckInPeopleStatusView(PublicTournamentPageMixin, CheckInPeopleStatusView):\n for_admin = False\n public_page_preference = 'public_checkins'\n\n\nclass CheckInVenuesStatusView(BaseCheckInStatusView):\n page_emoji = '\ud83d\udc5c'\n page_title = _(\"Venue's Check-In Statuses\")\n window_preference = 'checkin_window_venues'\n\n def get_context_data(self, **kwargs):\n venues = []\n for venue in self.tournament.relevant_venues.select_related('checkin_identifier').prefetch_related('venuecategory_set').all():\n item = venue.serialize()\n item['locked'] = False\n try:\n item['identifier'] = [venue.checkin_identifier.barcode]\n except ObjectDoesNotExist:\n item['identifier'] = [None]\n venues.append(item)\n kwargs[\"venues\"] = json.dumps(venues)\n kwargs[\"team_codes\"] = json.dumps(False)\n\n return super().get_context_data(**kwargs)\n\n\nclass AdminCheckInVenuesStatusView(AdministratorMixin, CheckInVenuesStatusView):\n scan_view = 'admin-checkin-scan'\n\n\nclass AssistantCheckInVenuesStatusView(AssistantMixin, CheckInVenuesStatusView):\n scan_view = 'assistant-checkin-scan'\n\n\nclass SegregatedCheckinsMixin(TournamentMixin):\n\n def t_speakers(self):\n return Speaker.objects.filter(\n team__tournament=self.tournament).values_list(\n 'person_ptr_id', flat=True)\n\n def speakers_with_barcodes(self):\n identifiers = PersonIdentifier.objects.all()\n return identifiers.filter(person_id__in=self.t_speakers())\n\n def t_adjs(self):\n return self.tournament.adjudicator_set.values_list(\n 'person_ptr_id', flat=True)\n\n def adjs_with_barcodes(self):\n identifiers = PersonIdentifier.objects.all()\n return identifiers.filter(person_id__in=self.t_adjs())\n\n\nclass CheckInIdentifiersView(SegregatedCheckinsMixin, TemplateView):\n template_name = 'checkin_ids.html'\n page_title = _('Make Identifiers')\n page_emoji = '\ud83d\udcdb'\n\n def get_context_data(self, **kwargs):\n t = self.tournament\n kwargs[\"check_in_info\"] = {\n \"speakers\": {\n \"title\": _(\"Speakers\"),\n \"total\": self.t_speakers().count(),\n \"in\": self.speakers_with_barcodes().count()\n },\n \"adjudicators\": {\n \"title\": _(\"Adjudicators\"),\n \"total\": self.t_adjs().count(),\n \"in\": self.adjs_with_barcodes().count()\n },\n \"venues\": {\n \"title\": _(\"Venues\"),\n \"total\": t.venue_set.count(),\n \"in\": VenueIdentifier.objects.filter(venue__tournament=t).count(),\n }\n }\n return super().get_context_data(**kwargs)\n\n\nclass AdminCheckInIdentifiersView(AdministratorMixin, CheckInIdentifiersView):\n pass\n\n\nclass AssistantCheckInIdentifiersView(AssistantMixin, CheckInIdentifiersView):\n pass\n\n\nclass AdminCheckInGenerateView(AdministratorMixin, LogActionMixin,\n TournamentMixin, PostOnlyRedirectView):\n\n def get_action_log_type(self):\n if self.kwargs[\"kind\"] == \"speakers\":\n return ActionLogEntry.ACTION_TYPE_CHECKIN_SPEAK_GENERATE\n elif self.kwargs[\"kind\"] == \"adjudicators\":\n return ActionLogEntry.ACTION_TYPE_CHECKIN_ADJ_GENERATE\n elif self.kwargs[\"kind\"] == \"venues\":\n return ActionLogEntry.ACTION_TYPE_CHECKIN_VENUES_GENERATE\n\n # Providing tournament_slug_url_kwarg isn't working for some reason; so use:\n def get_redirect_url(self, *args, **kwargs):\n return reverse_tournament('admin-checkin-identifiers', self.tournament)\n\n def post(self, request, *args, **kwargs):\n t = self.tournament\n\n if self.kwargs[\"kind\"] == \"speakers\":\n create_identifiers(PersonIdentifier, Speaker.objects.filter(team__tournament=t))\n elif self.kwargs[\"kind\"] == \"adjudicators\":\n create_identifiers(PersonIdentifier, t.adjudicator_set.all())\n elif self.kwargs[\"kind\"] == \"venues\":\n create_identifiers(VenueIdentifier, t.venue_set.all())\n\n messages.success(request, _(\"Generated identifiers for %s\" % self.kwargs[\"kind\"]))\n self.log_action() # Need to call explicitly\n return super().post(request, *args, **kwargs)\n\n\nclass CheckInPrintablesView(SegregatedCheckinsMixin, TemplateView):\n template_name = 'checkin_printables.html'\n page_title = _('Identifiers')\n page_emoji = '\ud83d\udcdb'\n\n def get_context_data(self, **kwargs):\n if self.kwargs[\"kind\"] == \"speakers\":\n kwargs[\"identifiers\"] = self.speakers_with_barcodes().order_by('person__name')\n elif self.kwargs[\"kind\"] == \"adjudicators\":\n kwargs[\"identifiers\"] = self.adjs_with_barcodes().order_by('person__name')\n elif self.kwargs[\"kind\"] == \"venues\":\n venues = self.tournament.relevant_venues\n kwargs[\"identifiers\"] = VenueIdentifier.objects.filter(venue__in=venues)\n\n return super().get_context_data(**kwargs)\n\n\nclass AdminCheckInPrintablesView(AdministratorMixin, CheckInPrintablesView):\n pass\n\n\nclass AssistantCheckInPrintablesView(AssistantMixin, CheckInPrintablesView):\n pass\n\n\nclass ParticipantCheckinView(PublicTournamentPageMixin, PostOnlyRedirectView):\n\n public_page_preference = 'public_checkins_submit'\n\n def post(self, request, *args, **kwargs):\n t = self.tournament\n\n action = request.POST['action']\n\n try:\n person = Person.objects.get(url_key=kwargs['url_key'])\n identifier = PersonIdentifier.objects.get(person=person)\n except ObjectDoesNotExist:\n messages.error(self.request, _(\"Could not check you in as you do not have an identifying code \u2014 your tab director may need to make you an identifier.\"))\n return super().post(request, *args, **kwargs)\n\n checkins = get_unexpired_checkins(t, 'checkin_window_people')\n existing_checkin = checkins.filter(identifier=identifier)\n if action == 'revoke':\n if existing_checkin.exists():\n messages.success(self.request, _(\"You have revoked your check-in.\"))\n else:\n messages.error(self.request, _(\"Whoops! Looks like your check-in was already revoked.\"))\n elif action == 'checkin':\n if existing_checkin.exists():\n messages.error(self.request, _(\"Whoops! Looks like you're already checked in.\"))\n else:\n messages.success(self.request, _(\"You are now checked in.\"))\n else:\n return TemplateResponse(request=self.request, template='400.html', status=400)\n\n group_name = CheckInEventConsumer.group_prefix + \"_\" + t.slug\n\n # Override permissions check - no user but authenticated through URL\n async_to_sync(get_channel_layer().group_send)(\n group_name, {\n 'type': 'broadcast_checkin',\n 'content': {\n 'barcodes': [identifier.barcode],\n 'status': action == 'checkin',\n 'type': 'people',\n 'component_id': None\n }\n }\n )\n\n return super().post(request, *args, **kwargs)\n\n def get_redirect_url(self, *args, **kwargs):\n return reverse_tournament('privateurls-person-index', self.tournament, kwargs={'url_key': kwargs['url_key']})\n", "path": "tabbycat/checkins/views.py"}]}
3,836
332
gh_patches_debug_3598
rasdani/github-patches
git_diff
Zeroto521__my-data-toolkit-580
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- MAINT: Simplify `register_method_factory` <!-- Thanks for contributing a pull request! Please follow these standard acronyms to start the commit message: - ENH: enhancement - BUG: bug fix - DOC: documentation - TYP: type annotations - TST: addition or modification of tests - MAINT: maintenance commit (refactoring, typos, etc.) - BLD: change related to building - REL: related to releasing - API: an (incompatible) API change - DEP: deprecate something, or remove a deprecated object - DEV: development tool or utility - REV: revert an earlier commit - PERF: performance improvement - BOT: always commit via a bot - CI: related to CI or CD - CLN: Code cleanup --> - [ ] closes #xxxx - [x] whatsnew entry Delete duplicate `wraps` lines --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `dtoolkit/accessor/register.py` Content: ``` 1 from __future__ import annotations 2 3 from functools import wraps 4 from typing import Callable 5 6 from pandas.api.extensions import register_dataframe_accessor 7 from pandas.api.extensions import register_index_accessor 8 from pandas.api.extensions import register_series_accessor 9 from pandas.util._decorators import doc 10 11 from dtoolkit._typing import SeriesOrFrame 12 13 14 def register_method_factory(register_accessor): 15 """ 16 Let pandas-object like accessor which only hooks class also hooks function easily. 17 18 Read more in the `User Guide`_. 19 20 .. _User Guide: ../../guide/tips_about_accessor.ipynb#Extend-to-Pandas-like-Object 21 22 Parameters 23 ---------- 24 register_accessor : Pandas-object like accessor 25 26 See Also 27 -------- 28 register_dataframe_method 29 register_series_method 30 register_index_method 31 dtoolkit.geoaccessor.register_geoseries_method 32 dtoolkit.geoaccessor.register_geodataframe_method 33 """ 34 35 # based on pandas_flavor/register.py 36 def register_accessor_method(method: Callable, name: str): 37 def method_accessor(pd_obj: SeriesOrFrame): 38 @wraps(method) 39 def wrapper(*args, **kwargs): 40 return method(pd_obj, *args, **kwargs) 41 42 return wrapper 43 44 # Register method as pandas object inner method. 45 register_accessor(name)(method_accessor) 46 47 # Must return method itself, otherwise would get None. 48 return method 49 50 def register_accessor_alias(name: str = None): 51 def wrapper(method: Callable): 52 return register_accessor_method(method, name or method.__name__) 53 54 return wrapper 55 56 @wraps(register_accessor) 57 def decorator(name: Callable | str = None): 58 if callable(name): # Supports `@register_*_method` using. 59 method = name # This 'name' variable actually is a function. 60 return register_accessor_method(method, method.__name__) 61 62 # Supports `@register_*_method()` and `@register_*_method(name="")` using. 63 return register_accessor_alias(name) 64 65 return decorator 66 67 68 @register_method_factory 69 @doc(klass=":class:`~pandas.Series`") 70 def register_series_method(name: str = None): 71 """ 72 {klass} register accessor for human. 73 74 Write method normally, use method naturally. 75 76 Read more in the `User Guide`_. 77 78 .. _User Guide: ../../guide/tips_about_accessor.ipynb 79 80 Parameters 81 ---------- 82 name : str, optional 83 Use the ``method`` name as the default accessor entrance if ``name`` is None. 84 85 See Also 86 -------- 87 register_dataframe_method 88 register_series_method 89 register_index_method 90 pandas.api.extensions.register_dataframe_accessor 91 pandas.api.extensions.register_series_accessor 92 pandas.api.extensions.register_index_accessor 93 94 Examples 95 -------- 96 In your library code:: 97 98 from __future__ import annotations 99 100 from dtoolkit.accessor import register_dataframe_method 101 from dtoolkit.accessor import register_series_method 102 from dtoolkit.accessor import register_index_method 103 import pandas as pd 104 105 @register_index_method("col") # Support alias name also. 106 @register_series_method("col") 107 @register_dataframe_method(name="col") 108 @register_index_method # Use accessor method's `__name__` as the entrance. 109 @register_series_method 110 @register_dataframe_method 111 def cols(pd_obj) -> int | str | list[int | str] | None: 112 ''' 113 An API to gather :attr:`~pandas.Series.name` and 114 :attr:`~pandas.DataFrame.columns` to one. 115 ''' 116 117 if isinstance(pd_obj, (pd.Series, pd.Index)): 118 return pd_obj.name 119 120 return pd_obj.columns.tolist() 121 122 Back in an interactive IPython session: 123 124 .. code-block:: ipython 125 126 In [1]: import pandas as pd 127 128 In [2]: df = pd.DataFrame( 129 ...: {{ 130 ...: "a": [1, 2], 131 ...: "b": [3, 4], 132 ...: }}, 133 ...: index=pd.Index( 134 ...: ["x", "y"], 135 ...: name="c", 136 ...: ), 137 ...: ) 138 139 In [3]: df 140 Out[3]: 141 a b 142 c 143 x 1 3 144 y 2 4 145 146 Get the columns of DataFrame via `cols` or `col` method 147 148 In [4]: df.col() 149 Out[4]: ['a', 'b'] 150 151 Get name of Series via `cols` or `col` method 152 153 In [5]: df.a.col() 154 Out[5]: 'a' 155 156 Get name of Index via `cols` or `col` method 157 158 In [6]: df.index.col() 159 Out[6]: 'c' 160 """ 161 162 return register_series_accessor(name) 163 164 165 @register_method_factory 166 @doc(register_series_method, klass=":class:`~pandas.DataFrame`") 167 def register_dataframe_method(name: str = None): 168 return register_dataframe_accessor(name) 169 170 171 @register_method_factory 172 @doc(register_series_method, klass=":class:`~pandas.Index`") 173 def register_index_method(name: str = None): 174 return register_index_accessor(name) 175 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/dtoolkit/accessor/register.py b/dtoolkit/accessor/register.py --- a/dtoolkit/accessor/register.py +++ b/dtoolkit/accessor/register.py @@ -34,6 +34,7 @@ # based on pandas_flavor/register.py def register_accessor_method(method: Callable, name: str): + @wraps(method) def method_accessor(pd_obj: SeriesOrFrame): @wraps(method) def wrapper(*args, **kwargs):
{"golden_diff": "diff --git a/dtoolkit/accessor/register.py b/dtoolkit/accessor/register.py\n--- a/dtoolkit/accessor/register.py\n+++ b/dtoolkit/accessor/register.py\n@@ -34,6 +34,7 @@\n \n # based on pandas_flavor/register.py\n def register_accessor_method(method: Callable, name: str):\n+ @wraps(method)\n def method_accessor(pd_obj: SeriesOrFrame):\n @wraps(method)\n def wrapper(*args, **kwargs):\n", "issue": "MAINT: Simplify `register_method_factory`\n<!--\r\nThanks for contributing a pull request!\r\n\r\nPlease follow these standard acronyms to start the commit message:\r\n\r\n- ENH: enhancement\r\n- BUG: bug fix\r\n- DOC: documentation\r\n- TYP: type annotations\r\n- TST: addition or modification of tests\r\n- MAINT: maintenance commit (refactoring, typos, etc.)\r\n- BLD: change related to building\r\n- REL: related to releasing\r\n- API: an (incompatible) API change\r\n- DEP: deprecate something, or remove a deprecated object\r\n- DEV: development tool or utility\r\n- REV: revert an earlier commit\r\n- PERF: performance improvement\r\n- BOT: always commit via a bot\r\n- CI: related to CI or CD\r\n- CLN: Code cleanup\r\n-->\r\n\r\n- [ ] closes #xxxx\r\n- [x] whatsnew entry\r\n\r\nDelete duplicate `wraps` lines\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom functools import wraps\nfrom typing import Callable\n\nfrom pandas.api.extensions import register_dataframe_accessor\nfrom pandas.api.extensions import register_index_accessor\nfrom pandas.api.extensions import register_series_accessor\nfrom pandas.util._decorators import doc\n\nfrom dtoolkit._typing import SeriesOrFrame\n\n\ndef register_method_factory(register_accessor):\n \"\"\"\n Let pandas-object like accessor which only hooks class also hooks function easily.\n\n Read more in the `User Guide`_.\n\n .. _User Guide: ../../guide/tips_about_accessor.ipynb#Extend-to-Pandas-like-Object\n\n Parameters\n ----------\n register_accessor : Pandas-object like accessor\n\n See Also\n --------\n register_dataframe_method\n register_series_method\n register_index_method\n dtoolkit.geoaccessor.register_geoseries_method\n dtoolkit.geoaccessor.register_geodataframe_method\n \"\"\"\n\n # based on pandas_flavor/register.py\n def register_accessor_method(method: Callable, name: str):\n def method_accessor(pd_obj: SeriesOrFrame):\n @wraps(method)\n def wrapper(*args, **kwargs):\n return method(pd_obj, *args, **kwargs)\n\n return wrapper\n\n # Register method as pandas object inner method.\n register_accessor(name)(method_accessor)\n\n # Must return method itself, otherwise would get None.\n return method\n\n def register_accessor_alias(name: str = None):\n def wrapper(method: Callable):\n return register_accessor_method(method, name or method.__name__)\n\n return wrapper\n\n @wraps(register_accessor)\n def decorator(name: Callable | str = None):\n if callable(name): # Supports `@register_*_method` using.\n method = name # This 'name' variable actually is a function.\n return register_accessor_method(method, method.__name__)\n\n # Supports `@register_*_method()` and `@register_*_method(name=\"\")` using.\n return register_accessor_alias(name)\n\n return decorator\n\n\n@register_method_factory\n@doc(klass=\":class:`~pandas.Series`\")\ndef register_series_method(name: str = None):\n \"\"\"\n {klass} register accessor for human.\n\n Write method normally, use method naturally.\n\n Read more in the `User Guide`_.\n\n .. _User Guide: ../../guide/tips_about_accessor.ipynb\n\n Parameters\n ----------\n name : str, optional\n Use the ``method`` name as the default accessor entrance if ``name`` is None.\n\n See Also\n --------\n register_dataframe_method\n register_series_method\n register_index_method\n pandas.api.extensions.register_dataframe_accessor\n pandas.api.extensions.register_series_accessor\n pandas.api.extensions.register_index_accessor\n\n Examples\n --------\n In your library code::\n\n from __future__ import annotations\n\n from dtoolkit.accessor import register_dataframe_method\n from dtoolkit.accessor import register_series_method\n from dtoolkit.accessor import register_index_method\n import pandas as pd\n\n @register_index_method(\"col\") # Support alias name also.\n @register_series_method(\"col\")\n @register_dataframe_method(name=\"col\")\n @register_index_method # Use accessor method's `__name__` as the entrance.\n @register_series_method\n @register_dataframe_method\n def cols(pd_obj) -> int | str | list[int | str] | None:\n '''\n An API to gather :attr:`~pandas.Series.name` and\n :attr:`~pandas.DataFrame.columns` to one.\n '''\n\n if isinstance(pd_obj, (pd.Series, pd.Index)):\n return pd_obj.name\n\n return pd_obj.columns.tolist()\n\n Back in an interactive IPython session:\n\n .. code-block:: ipython\n\n In [1]: import pandas as pd\n\n In [2]: df = pd.DataFrame(\n ...: {{\n ...: \"a\": [1, 2],\n ...: \"b\": [3, 4],\n ...: }},\n ...: index=pd.Index(\n ...: [\"x\", \"y\"],\n ...: name=\"c\",\n ...: ),\n ...: )\n\n In [3]: df\n Out[3]:\n a b\n c\n x 1 3\n y 2 4\n\n Get the columns of DataFrame via `cols` or `col` method\n\n In [4]: df.col()\n Out[4]: ['a', 'b']\n\n Get name of Series via `cols` or `col` method\n\n In [5]: df.a.col()\n Out[5]: 'a'\n\n Get name of Index via `cols` or `col` method\n\n In [6]: df.index.col()\n Out[6]: 'c'\n \"\"\"\n\n return register_series_accessor(name)\n\n\n@register_method_factory\n@doc(register_series_method, klass=\":class:`~pandas.DataFrame`\")\ndef register_dataframe_method(name: str = None):\n return register_dataframe_accessor(name)\n\n\n@register_method_factory\n@doc(register_series_method, klass=\":class:`~pandas.Index`\")\ndef register_index_method(name: str = None):\n return register_index_accessor(name)\n", "path": "dtoolkit/accessor/register.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom functools import wraps\nfrom typing import Callable\n\nfrom pandas.api.extensions import register_dataframe_accessor\nfrom pandas.api.extensions import register_index_accessor\nfrom pandas.api.extensions import register_series_accessor\nfrom pandas.util._decorators import doc\n\nfrom dtoolkit._typing import SeriesOrFrame\n\n\ndef register_method_factory(register_accessor):\n \"\"\"\n Let pandas-object like accessor which only hooks class also hooks function easily.\n\n Read more in the `User Guide`_.\n\n .. _User Guide: ../../guide/tips_about_accessor.ipynb#Extend-to-Pandas-like-Object\n\n Parameters\n ----------\n register_accessor : Pandas-object like accessor\n\n See Also\n --------\n register_dataframe_method\n register_series_method\n register_index_method\n dtoolkit.geoaccessor.register_geoseries_method\n dtoolkit.geoaccessor.register_geodataframe_method\n \"\"\"\n\n # based on pandas_flavor/register.py\n def register_accessor_method(method: Callable, name: str):\n @wraps(method)\n def method_accessor(pd_obj: SeriesOrFrame):\n @wraps(method)\n def wrapper(*args, **kwargs):\n return method(pd_obj, *args, **kwargs)\n\n return wrapper\n\n # Register method as pandas object inner method.\n register_accessor(name)(method_accessor)\n\n # Must return method itself, otherwise would get None.\n return method\n\n def register_accessor_alias(name: str = None):\n def wrapper(method: Callable):\n return register_accessor_method(method, name or method.__name__)\n\n return wrapper\n\n @wraps(register_accessor)\n def decorator(name: Callable | str = None):\n if callable(name): # Supports `@register_*_method` using.\n method = name # This 'name' variable actually is a function.\n return register_accessor_method(method, method.__name__)\n\n # Supports `@register_*_method()` and `@register_*_method(name=\"\")` using.\n return register_accessor_alias(name)\n\n return decorator\n\n\n@register_method_factory\n@doc(klass=\":class:`~pandas.Series`\")\ndef register_series_method(name: str = None):\n \"\"\"\n {klass} register accessor for human.\n\n Write method normally, use method naturally.\n\n Read more in the `User Guide`_.\n\n .. _User Guide: ../../guide/tips_about_accessor.ipynb\n\n Parameters\n ----------\n name : str, optional\n Use the ``method`` name as the default accessor entrance if ``name`` is None.\n\n See Also\n --------\n register_dataframe_method\n register_series_method\n register_index_method\n pandas.api.extensions.register_dataframe_accessor\n pandas.api.extensions.register_series_accessor\n pandas.api.extensions.register_index_accessor\n\n Examples\n --------\n In your library code::\n\n from __future__ import annotations\n\n from dtoolkit.accessor import register_dataframe_method\n from dtoolkit.accessor import register_series_method\n from dtoolkit.accessor import register_index_method\n import pandas as pd\n\n @register_index_method(\"col\") # Support alias name also.\n @register_series_method(\"col\")\n @register_dataframe_method(name=\"col\")\n @register_index_method # Use accessor method's `__name__` as the entrance.\n @register_series_method\n @register_dataframe_method\n def cols(pd_obj) -> int | str | list[int | str] | None:\n '''\n An API to gather :attr:`~pandas.Series.name` and\n :attr:`~pandas.DataFrame.columns` to one.\n '''\n\n if isinstance(pd_obj, (pd.Series, pd.Index)):\n return pd_obj.name\n\n return pd_obj.columns.tolist()\n\n Back in an interactive IPython session:\n\n .. code-block:: ipython\n\n In [1]: import pandas as pd\n\n In [2]: df = pd.DataFrame(\n ...: {{\n ...: \"a\": [1, 2],\n ...: \"b\": [3, 4],\n ...: }},\n ...: index=pd.Index(\n ...: [\"x\", \"y\"],\n ...: name=\"c\",\n ...: ),\n ...: )\n\n In [3]: df\n Out[3]:\n a b\n c\n x 1 3\n y 2 4\n\n Get the columns of DataFrame via `cols` or `col` method\n\n In [4]: df.col()\n Out[4]: ['a', 'b']\n\n Get name of Series via `cols` or `col` method\n\n In [5]: df.a.col()\n Out[5]: 'a'\n\n Get name of Index via `cols` or `col` method\n\n In [6]: df.index.col()\n Out[6]: 'c'\n \"\"\"\n\n return register_series_accessor(name)\n\n\n@register_method_factory\n@doc(register_series_method, klass=\":class:`~pandas.DataFrame`\")\ndef register_dataframe_method(name: str = None):\n return register_dataframe_accessor(name)\n\n\n@register_method_factory\n@doc(register_series_method, klass=\":class:`~pandas.Index`\")\ndef register_index_method(name: str = None):\n return register_index_accessor(name)\n", "path": "dtoolkit/accessor/register.py"}]}
2,019
109
gh_patches_debug_5416
rasdani/github-patches
git_diff
rasterio__rasterio-1192
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- query params in https url is not working Hi I wanted to use rasterio with a https url that has query-params, for example: https://.....?a=a&b=b First I wanted to see if gdal supports that kind of url and gdal does, so I looked for the code in rasterio that is responsible for cutting the url, It can be found here: https://github.com/mapbox/rasterio/blob/master/rasterio/vfs.py#L32 I added in my env the following code: ``` if parts.query: path += "?" + parts.query ``` and it solved the issue, Is there a reason for cutting the querystring? Thanks, Guy query params in https url is not working Hi I wanted to use rasterio with a https url that has query-params, for example: https://.....?a=a&b=b First I wanted to see if gdal supports that kind of url and gdal does, so I looked for the code in rasterio that is responsible for cutting the url, It can be found here: https://github.com/mapbox/rasterio/blob/master/rasterio/vfs.py#L32 I added in my env the following code: ``` if parts.query: path += "?" + parts.query ``` and it solved the issue, Is there a reason for cutting the querystring? Thanks, Guy --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `rasterio/vfs.py` Content: ``` 1 """Implementation of Apache VFS schemes and URLs.""" 2 3 import os 4 5 from rasterio.compat import urlparse 6 7 8 # NB: As not to propagate fallacies of distributed computing, Rasterio 9 # does not support HTTP or FTP URLs via GDAL's vsicurl handler. Only 10 # the following local filesystem schemes are supported. 11 SCHEMES = { 12 'gzip': 'gzip', 13 'gzip+file': 'gzip', 14 'zip': 'zip', 15 'zip+file': 'zip', 16 'tar': 'tar', 17 'tar+file': 'tar', 18 'https': 'curl', 19 'http': 'curl', 20 's3': 's3'} 21 22 FILE_SCHEMES = [ 23 '', 'file', 'gzip', 'gzip+file', 'zip', 'zip+file', 'tar', 'tar+file'] 24 25 26 def parse_path(uri, vfs=None): 27 """Parse a URI or Apache VFS URL into its parts 28 29 Returns: tuple 30 (path, archive, scheme) 31 """ 32 archive = scheme = None 33 path = uri 34 if vfs: 35 parts = urlparse(vfs) 36 scheme = parts.scheme 37 archive = parts.path 38 if parts.netloc and parts.netloc != 'localhost': # pragma: no cover 39 archive = parts.netloc + archive 40 else: 41 parts = urlparse(path) 42 scheme = parts.scheme 43 path = parts.path 44 if parts.netloc and parts.netloc != 'localhost': 45 path = parts.netloc + path 46 # There are certain URI schemes we favor over GDAL's names. 47 if scheme in SCHEMES: 48 parts = path.split('!') 49 path = parts.pop() if parts else None 50 archive = parts.pop() if parts else None 51 # For filesystem paths. 52 elif scheme.lower() in FILE_SCHEMES: 53 pass 54 # We permit GDAL's idiosyncratic URI-like dataset paths such as 55 # 'netcdf':... to fall right through with no parsed archive 56 # or scheme. 57 else: 58 archive = scheme = None 59 path = uri 60 61 return path, archive, scheme 62 63 64 def vsi_path(path, archive=None, scheme=None): 65 """Convert a parsed path to a GDAL VSI path.""" 66 # If a VSF and archive file are specified, we convert the path to 67 # a GDAL VSI path (see cpl_vsi.h). 68 if scheme and scheme.startswith('http'): 69 result = "/vsicurl/{0}://{1}".format(scheme, path) 70 elif scheme and scheme == 's3': 71 result = "/vsis3/{0}".format(path) 72 elif scheme and scheme != 'file': 73 if archive: 74 result = '/vsi{0}/{1}/{2}'.format( 75 scheme, archive, path.lstrip('/')) 76 else: 77 result = '/vsi{0}/{1}'.format(scheme, path.lstrip('/')) 78 else: 79 result = path 80 return result 81 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/rasterio/vfs.py b/rasterio/vfs.py --- a/rasterio/vfs.py +++ b/rasterio/vfs.py @@ -41,6 +41,8 @@ parts = urlparse(path) scheme = parts.scheme path = parts.path + if parts.query: + path += "?" + parts.query if parts.netloc and parts.netloc != 'localhost': path = parts.netloc + path # There are certain URI schemes we favor over GDAL's names.
{"golden_diff": "diff --git a/rasterio/vfs.py b/rasterio/vfs.py\n--- a/rasterio/vfs.py\n+++ b/rasterio/vfs.py\n@@ -41,6 +41,8 @@\n parts = urlparse(path)\n scheme = parts.scheme\n path = parts.path\n+ if parts.query:\n+ path += \"?\" + parts.query\n if parts.netloc and parts.netloc != 'localhost':\n path = parts.netloc + path\n # There are certain URI schemes we favor over GDAL's names.\n", "issue": "query params in https url is not working\nHi \r\n\r\nI wanted to use rasterio with a https url that has query-params, for example: https://.....?a=a&b=b\r\nFirst I wanted to see if gdal supports that kind of url and gdal does, so I looked for the code in rasterio that is responsible for cutting the url, \r\n\r\nIt can be found here:\r\nhttps://github.com/mapbox/rasterio/blob/master/rasterio/vfs.py#L32\r\n\r\nI added in my env the following code:\r\n```\r\n if parts.query: \r\n path += \"?\" + parts.query \r\n```\r\n\r\nand it solved the issue, \r\n\r\nIs there a reason for cutting the querystring?\r\n\r\n\r\n\r\nThanks, Guy\r\n\nquery params in https url is not working\nHi \r\n\r\nI wanted to use rasterio with a https url that has query-params, for example: https://.....?a=a&b=b\r\nFirst I wanted to see if gdal supports that kind of url and gdal does, so I looked for the code in rasterio that is responsible for cutting the url, \r\n\r\nIt can be found here:\r\nhttps://github.com/mapbox/rasterio/blob/master/rasterio/vfs.py#L32\r\n\r\nI added in my env the following code:\r\n```\r\n if parts.query: \r\n path += \"?\" + parts.query \r\n```\r\n\r\nand it solved the issue, \r\n\r\nIs there a reason for cutting the querystring?\r\n\r\n\r\n\r\nThanks, Guy\r\n\n", "before_files": [{"content": "\"\"\"Implementation of Apache VFS schemes and URLs.\"\"\"\n\nimport os\n\nfrom rasterio.compat import urlparse\n\n\n# NB: As not to propagate fallacies of distributed computing, Rasterio\n# does not support HTTP or FTP URLs via GDAL's vsicurl handler. Only\n# the following local filesystem schemes are supported.\nSCHEMES = {\n 'gzip': 'gzip',\n 'gzip+file': 'gzip',\n 'zip': 'zip',\n 'zip+file': 'zip',\n 'tar': 'tar',\n 'tar+file': 'tar',\n 'https': 'curl',\n 'http': 'curl',\n 's3': 's3'}\n\nFILE_SCHEMES = [\n '', 'file', 'gzip', 'gzip+file', 'zip', 'zip+file', 'tar', 'tar+file']\n\n\ndef parse_path(uri, vfs=None):\n \"\"\"Parse a URI or Apache VFS URL into its parts\n\n Returns: tuple\n (path, archive, scheme)\n \"\"\"\n archive = scheme = None\n path = uri\n if vfs:\n parts = urlparse(vfs)\n scheme = parts.scheme\n archive = parts.path\n if parts.netloc and parts.netloc != 'localhost': # pragma: no cover\n archive = parts.netloc + archive\n else:\n parts = urlparse(path)\n scheme = parts.scheme\n path = parts.path\n if parts.netloc and parts.netloc != 'localhost':\n path = parts.netloc + path\n # There are certain URI schemes we favor over GDAL's names.\n if scheme in SCHEMES:\n parts = path.split('!')\n path = parts.pop() if parts else None\n archive = parts.pop() if parts else None\n # For filesystem paths.\n elif scheme.lower() in FILE_SCHEMES:\n pass\n # We permit GDAL's idiosyncratic URI-like dataset paths such as\n # 'netcdf':... to fall right through with no parsed archive\n # or scheme.\n else:\n archive = scheme = None\n path = uri\n\n return path, archive, scheme\n\n\ndef vsi_path(path, archive=None, scheme=None):\n \"\"\"Convert a parsed path to a GDAL VSI path.\"\"\"\n # If a VSF and archive file are specified, we convert the path to\n # a GDAL VSI path (see cpl_vsi.h).\n if scheme and scheme.startswith('http'):\n result = \"/vsicurl/{0}://{1}\".format(scheme, path)\n elif scheme and scheme == 's3':\n result = \"/vsis3/{0}\".format(path)\n elif scheme and scheme != 'file':\n if archive:\n result = '/vsi{0}/{1}/{2}'.format(\n scheme, archive, path.lstrip('/'))\n else:\n result = '/vsi{0}/{1}'.format(scheme, path.lstrip('/'))\n else:\n result = path\n return result\n", "path": "rasterio/vfs.py"}], "after_files": [{"content": "\"\"\"Implementation of Apache VFS schemes and URLs.\"\"\"\n\nimport os\n\nfrom rasterio.compat import urlparse\n\n\n# NB: As not to propagate fallacies of distributed computing, Rasterio\n# does not support HTTP or FTP URLs via GDAL's vsicurl handler. Only\n# the following local filesystem schemes are supported.\nSCHEMES = {\n 'gzip': 'gzip',\n 'gzip+file': 'gzip',\n 'zip': 'zip',\n 'zip+file': 'zip',\n 'tar': 'tar',\n 'tar+file': 'tar',\n 'https': 'curl',\n 'http': 'curl',\n 's3': 's3'}\n\nFILE_SCHEMES = [\n '', 'file', 'gzip', 'gzip+file', 'zip', 'zip+file', 'tar', 'tar+file']\n\n\ndef parse_path(uri, vfs=None):\n \"\"\"Parse a URI or Apache VFS URL into its parts\n\n Returns: tuple\n (path, archive, scheme)\n \"\"\"\n archive = scheme = None\n path = uri\n if vfs:\n parts = urlparse(vfs)\n scheme = parts.scheme\n archive = parts.path\n if parts.netloc and parts.netloc != 'localhost': # pragma: no cover\n archive = parts.netloc + archive\n else:\n parts = urlparse(path)\n scheme = parts.scheme\n path = parts.path\n if parts.query:\n path += \"?\" + parts.query\n if parts.netloc and parts.netloc != 'localhost':\n path = parts.netloc + path\n # There are certain URI schemes we favor over GDAL's names.\n if scheme in SCHEMES:\n parts = path.split('!')\n path = parts.pop() if parts else None\n archive = parts.pop() if parts else None\n # For filesystem paths.\n elif scheme.lower() in FILE_SCHEMES:\n pass\n # We permit GDAL's idiosyncratic URI-like dataset paths such as\n # 'netcdf':... to fall right through with no parsed archive\n # or scheme.\n else:\n archive = scheme = None\n path = uri\n\n return path, archive, scheme\n\n\ndef vsi_path(path, archive=None, scheme=None):\n \"\"\"Convert a parsed path to a GDAL VSI path.\"\"\"\n # If a VSF and archive file are specified, we convert the path to\n # a GDAL VSI path (see cpl_vsi.h).\n if scheme and scheme.startswith('http'):\n result = \"/vsicurl/{0}://{1}\".format(scheme, path)\n elif scheme and scheme == 's3':\n result = \"/vsis3/{0}\".format(path)\n elif scheme and scheme != 'file':\n if archive:\n result = '/vsi{0}/{1}/{2}'.format(\n scheme, archive, path.lstrip('/'))\n else:\n result = '/vsi{0}/{1}'.format(scheme, path.lstrip('/'))\n else:\n result = path\n return result\n", "path": "rasterio/vfs.py"}]}
1,370
118
gh_patches_debug_35266
rasdani/github-patches
git_diff
feast-dev__feast-2430
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Missing documentation for abstract `pull_all_from_table_or_query` offline store method ## Expected Behavior Since https://github.com/feast-dev/feast/pull/2197, offline store method `pull_all_from_table_or_query` must be overridden by custom offline stores. This is currently not documented. Expectations: - [Docstring](https://github.com/feast-dev/feast/blob/b35e1e84720523cef70cba6d6306af8f193b469f/sdk/python/feast/infra/offline_stores/offline_store.py#L203) of `pull_all_from_table_or_query` contains a meaningful description of the method - [Web doc](https://docs.feast.dev/how-to-guides/adding-a-new-offline-store) mentions that `pull_all_from_table_or_query` must be overriden. ## Current Behavior No documentation for `pull_all_from_table_or_query`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sdk/python/feast/infra/offline_stores/offline_store.py` Content: ``` 1 # Copyright 2019 The Feast Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # https://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 import warnings 15 from abc import ABC, abstractmethod 16 from datetime import datetime 17 from typing import TYPE_CHECKING, List, Optional, Union 18 19 import pandas as pd 20 import pyarrow 21 22 from feast.data_source import DataSource 23 from feast.dqm.errors import ValidationFailed 24 from feast.feature_view import FeatureView 25 from feast.on_demand_feature_view import OnDemandFeatureView 26 from feast.registry import Registry 27 from feast.repo_config import RepoConfig 28 from feast.saved_dataset import SavedDatasetStorage 29 30 if TYPE_CHECKING: 31 from feast.saved_dataset import ValidationReference 32 33 34 class RetrievalMetadata: 35 min_event_timestamp: Optional[datetime] 36 max_event_timestamp: Optional[datetime] 37 38 # List of feature references 39 features: List[str] 40 # List of entity keys + ODFV inputs 41 keys: List[str] 42 43 def __init__( 44 self, 45 features: List[str], 46 keys: List[str], 47 min_event_timestamp: Optional[datetime] = None, 48 max_event_timestamp: Optional[datetime] = None, 49 ): 50 self.features = features 51 self.keys = keys 52 self.min_event_timestamp = min_event_timestamp 53 self.max_event_timestamp = max_event_timestamp 54 55 56 class RetrievalJob(ABC): 57 """RetrievalJob is used to manage the execution of a historical feature retrieval""" 58 59 @property 60 @abstractmethod 61 def full_feature_names(self) -> bool: 62 pass 63 64 @property 65 @abstractmethod 66 def on_demand_feature_views(self) -> Optional[List[OnDemandFeatureView]]: 67 pass 68 69 def to_df( 70 self, validation_reference: Optional["ValidationReference"] = None 71 ) -> pd.DataFrame: 72 """ 73 Return dataset as Pandas DataFrame synchronously including on demand transforms 74 Args: 75 validation_reference: If provided resulting dataset will be validated against this reference profile. 76 """ 77 features_df = self._to_df_internal() 78 79 if self.on_demand_feature_views: 80 # TODO(adchia): Fix requirement to specify dependent feature views in feature_refs 81 for odfv in self.on_demand_feature_views: 82 features_df = features_df.join( 83 odfv.get_transformed_features_df( 84 features_df, self.full_feature_names, 85 ) 86 ) 87 88 if validation_reference: 89 warnings.warn( 90 "Dataset validation is an experimental feature. " 91 "This API is unstable and it could and most probably will be changed in the future. " 92 "We do not guarantee that future changes will maintain backward compatibility.", 93 RuntimeWarning, 94 ) 95 96 validation_result = validation_reference.profile.validate(features_df) 97 if not validation_result.is_success: 98 raise ValidationFailed(validation_result) 99 100 return features_df 101 102 @abstractmethod 103 def _to_df_internal(self) -> pd.DataFrame: 104 """Return dataset as Pandas DataFrame synchronously""" 105 pass 106 107 @abstractmethod 108 def _to_arrow_internal(self) -> pyarrow.Table: 109 """Return dataset as pyarrow Table synchronously""" 110 pass 111 112 def to_arrow( 113 self, validation_reference: Optional["ValidationReference"] = None 114 ) -> pyarrow.Table: 115 """ 116 Return dataset as pyarrow Table synchronously 117 Args: 118 validation_reference: If provided resulting dataset will be validated against this reference profile. 119 """ 120 if not self.on_demand_feature_views and not validation_reference: 121 return self._to_arrow_internal() 122 123 features_df = self._to_df_internal() 124 if self.on_demand_feature_views: 125 for odfv in self.on_demand_feature_views: 126 features_df = features_df.join( 127 odfv.get_transformed_features_df( 128 features_df, self.full_feature_names, 129 ) 130 ) 131 132 if validation_reference: 133 warnings.warn( 134 "Dataset validation is an experimental feature. " 135 "This API is unstable and it could and most probably will be changed in the future. " 136 "We do not guarantee that future changes will maintain backward compatibility.", 137 RuntimeWarning, 138 ) 139 140 validation_result = validation_reference.profile.validate(features_df) 141 if not validation_result.is_success: 142 raise ValidationFailed(validation_result) 143 144 return pyarrow.Table.from_pandas(features_df) 145 146 @abstractmethod 147 def persist(self, storage: SavedDatasetStorage): 148 """ 149 Run the retrieval and persist the results in the same offline store used for read. 150 """ 151 pass 152 153 @property 154 @abstractmethod 155 def metadata(self) -> Optional[RetrievalMetadata]: 156 """ 157 Return metadata information about retrieval. 158 Should be available even before materializing the dataset itself. 159 """ 160 pass 161 162 163 class OfflineStore(ABC): 164 """ 165 OfflineStore is an object used for all interaction between Feast and the service used for offline storage of 166 features. 167 """ 168 169 @staticmethod 170 @abstractmethod 171 def pull_latest_from_table_or_query( 172 config: RepoConfig, 173 data_source: DataSource, 174 join_key_columns: List[str], 175 feature_name_columns: List[str], 176 event_timestamp_column: str, 177 created_timestamp_column: Optional[str], 178 start_date: datetime, 179 end_date: datetime, 180 ) -> RetrievalJob: 181 """ 182 Note that join_key_columns, feature_name_columns, event_timestamp_column, and created_timestamp_column 183 have all already been mapped to column names of the source table and those column names are the values passed 184 into this function. 185 """ 186 pass 187 188 @staticmethod 189 @abstractmethod 190 def get_historical_features( 191 config: RepoConfig, 192 feature_views: List[FeatureView], 193 feature_refs: List[str], 194 entity_df: Union[pd.DataFrame, str], 195 registry: Registry, 196 project: str, 197 full_feature_names: bool = False, 198 ) -> RetrievalJob: 199 pass 200 201 @staticmethod 202 @abstractmethod 203 def pull_all_from_table_or_query( 204 config: RepoConfig, 205 data_source: DataSource, 206 join_key_columns: List[str], 207 feature_name_columns: List[str], 208 event_timestamp_column: str, 209 start_date: datetime, 210 end_date: datetime, 211 ) -> RetrievalJob: 212 """ 213 Note that join_key_columns, feature_name_columns, event_timestamp_column, and created_timestamp_column 214 have all already been mapped to column names of the source table and those column names are the values passed 215 into this function. 216 """ 217 pass 218 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sdk/python/feast/infra/offline_stores/offline_store.py b/sdk/python/feast/infra/offline_stores/offline_store.py --- a/sdk/python/feast/infra/offline_stores/offline_store.py +++ b/sdk/python/feast/infra/offline_stores/offline_store.py @@ -179,9 +179,24 @@ end_date: datetime, ) -> RetrievalJob: """ + This method pulls data from the offline store, and the FeatureStore class is used to write + this data into the online store. This method is invoked when running materialization (using + the `feast materialize` or `feast materialize-incremental` commands, or the corresponding + FeatureStore.materialize() method. This method pulls data from the offline store, and the FeatureStore + class is used to write this data into the online store. + Note that join_key_columns, feature_name_columns, event_timestamp_column, and created_timestamp_column have all already been mapped to column names of the source table and those column names are the values passed into this function. + + Args: + config: Repo configuration object + data_source: Data source to pull all of the columns from + join_key_columns: Columns of the join keys + feature_name_columns: Columns of the feature names needed + event_timestamp_column: Timestamp column + start_date: Starting date of query + end_date: Ending date of query """ pass @@ -210,8 +225,19 @@ end_date: datetime, ) -> RetrievalJob: """ + Returns a Retrieval Job for all join key columns, feature name columns, and the event timestamp columns that occur between the start_date and end_date. + Note that join_key_columns, feature_name_columns, event_timestamp_column, and created_timestamp_column have all already been mapped to column names of the source table and those column names are the values passed into this function. + + Args: + config: Repo configuration object + data_source: Data source to pull all of the columns from + join_key_columns: Columns of the join keys + feature_name_columns: Columns of the feature names needed + event_timestamp_column: Timestamp column + start_date: Starting date of query + end_date: Ending date of query """ pass
{"golden_diff": "diff --git a/sdk/python/feast/infra/offline_stores/offline_store.py b/sdk/python/feast/infra/offline_stores/offline_store.py\n--- a/sdk/python/feast/infra/offline_stores/offline_store.py\n+++ b/sdk/python/feast/infra/offline_stores/offline_store.py\n@@ -179,9 +179,24 @@\n end_date: datetime,\n ) -> RetrievalJob:\n \"\"\"\n+ This method pulls data from the offline store, and the FeatureStore class is used to write\n+ this data into the online store. This method is invoked when running materialization (using\n+ the `feast materialize` or `feast materialize-incremental` commands, or the corresponding\n+ FeatureStore.materialize() method. This method pulls data from the offline store, and the FeatureStore\n+ class is used to write this data into the online store.\n+\n Note that join_key_columns, feature_name_columns, event_timestamp_column, and created_timestamp_column\n have all already been mapped to column names of the source table and those column names are the values passed\n into this function.\n+\n+ Args:\n+ config: Repo configuration object\n+ data_source: Data source to pull all of the columns from\n+ join_key_columns: Columns of the join keys\n+ feature_name_columns: Columns of the feature names needed\n+ event_timestamp_column: Timestamp column\n+ start_date: Starting date of query\n+ end_date: Ending date of query\n \"\"\"\n pass\n \n@@ -210,8 +225,19 @@\n end_date: datetime,\n ) -> RetrievalJob:\n \"\"\"\n+ Returns a Retrieval Job for all join key columns, feature name columns, and the event timestamp columns that occur between the start_date and end_date.\n+\n Note that join_key_columns, feature_name_columns, event_timestamp_column, and created_timestamp_column\n have all already been mapped to column names of the source table and those column names are the values passed\n into this function.\n+\n+ Args:\n+ config: Repo configuration object\n+ data_source: Data source to pull all of the columns from\n+ join_key_columns: Columns of the join keys\n+ feature_name_columns: Columns of the feature names needed\n+ event_timestamp_column: Timestamp column\n+ start_date: Starting date of query\n+ end_date: Ending date of query\n \"\"\"\n pass\n", "issue": "Missing documentation for abstract `pull_all_from_table_or_query` offline store method\n## Expected Behavior \r\n\r\nSince https://github.com/feast-dev/feast/pull/2197, offline store method `pull_all_from_table_or_query` must be overridden by custom offline stores. This is currently not documented.\r\n\r\nExpectations: \r\n- [Docstring](https://github.com/feast-dev/feast/blob/b35e1e84720523cef70cba6d6306af8f193b469f/sdk/python/feast/infra/offline_stores/offline_store.py#L203) of `pull_all_from_table_or_query` contains a meaningful description of the method\r\n\r\n- [Web doc](https://docs.feast.dev/how-to-guides/adding-a-new-offline-store) mentions that `pull_all_from_table_or_query` must be overriden.\r\n\r\n## Current Behavior\r\nNo documentation for `pull_all_from_table_or_query`.\r\n\r\n\n", "before_files": [{"content": "# Copyright 2019 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport warnings\nfrom abc import ABC, abstractmethod\nfrom datetime import datetime\nfrom typing import TYPE_CHECKING, List, Optional, Union\n\nimport pandas as pd\nimport pyarrow\n\nfrom feast.data_source import DataSource\nfrom feast.dqm.errors import ValidationFailed\nfrom feast.feature_view import FeatureView\nfrom feast.on_demand_feature_view import OnDemandFeatureView\nfrom feast.registry import Registry\nfrom feast.repo_config import RepoConfig\nfrom feast.saved_dataset import SavedDatasetStorage\n\nif TYPE_CHECKING:\n from feast.saved_dataset import ValidationReference\n\n\nclass RetrievalMetadata:\n min_event_timestamp: Optional[datetime]\n max_event_timestamp: Optional[datetime]\n\n # List of feature references\n features: List[str]\n # List of entity keys + ODFV inputs\n keys: List[str]\n\n def __init__(\n self,\n features: List[str],\n keys: List[str],\n min_event_timestamp: Optional[datetime] = None,\n max_event_timestamp: Optional[datetime] = None,\n ):\n self.features = features\n self.keys = keys\n self.min_event_timestamp = min_event_timestamp\n self.max_event_timestamp = max_event_timestamp\n\n\nclass RetrievalJob(ABC):\n \"\"\"RetrievalJob is used to manage the execution of a historical feature retrieval\"\"\"\n\n @property\n @abstractmethod\n def full_feature_names(self) -> bool:\n pass\n\n @property\n @abstractmethod\n def on_demand_feature_views(self) -> Optional[List[OnDemandFeatureView]]:\n pass\n\n def to_df(\n self, validation_reference: Optional[\"ValidationReference\"] = None\n ) -> pd.DataFrame:\n \"\"\"\n Return dataset as Pandas DataFrame synchronously including on demand transforms\n Args:\n validation_reference: If provided resulting dataset will be validated against this reference profile.\n \"\"\"\n features_df = self._to_df_internal()\n\n if self.on_demand_feature_views:\n # TODO(adchia): Fix requirement to specify dependent feature views in feature_refs\n for odfv in self.on_demand_feature_views:\n features_df = features_df.join(\n odfv.get_transformed_features_df(\n features_df, self.full_feature_names,\n )\n )\n\n if validation_reference:\n warnings.warn(\n \"Dataset validation is an experimental feature. \"\n \"This API is unstable and it could and most probably will be changed in the future. \"\n \"We do not guarantee that future changes will maintain backward compatibility.\",\n RuntimeWarning,\n )\n\n validation_result = validation_reference.profile.validate(features_df)\n if not validation_result.is_success:\n raise ValidationFailed(validation_result)\n\n return features_df\n\n @abstractmethod\n def _to_df_internal(self) -> pd.DataFrame:\n \"\"\"Return dataset as Pandas DataFrame synchronously\"\"\"\n pass\n\n @abstractmethod\n def _to_arrow_internal(self) -> pyarrow.Table:\n \"\"\"Return dataset as pyarrow Table synchronously\"\"\"\n pass\n\n def to_arrow(\n self, validation_reference: Optional[\"ValidationReference\"] = None\n ) -> pyarrow.Table:\n \"\"\"\n Return dataset as pyarrow Table synchronously\n Args:\n validation_reference: If provided resulting dataset will be validated against this reference profile.\n \"\"\"\n if not self.on_demand_feature_views and not validation_reference:\n return self._to_arrow_internal()\n\n features_df = self._to_df_internal()\n if self.on_demand_feature_views:\n for odfv in self.on_demand_feature_views:\n features_df = features_df.join(\n odfv.get_transformed_features_df(\n features_df, self.full_feature_names,\n )\n )\n\n if validation_reference:\n warnings.warn(\n \"Dataset validation is an experimental feature. \"\n \"This API is unstable and it could and most probably will be changed in the future. \"\n \"We do not guarantee that future changes will maintain backward compatibility.\",\n RuntimeWarning,\n )\n\n validation_result = validation_reference.profile.validate(features_df)\n if not validation_result.is_success:\n raise ValidationFailed(validation_result)\n\n return pyarrow.Table.from_pandas(features_df)\n\n @abstractmethod\n def persist(self, storage: SavedDatasetStorage):\n \"\"\"\n Run the retrieval and persist the results in the same offline store used for read.\n \"\"\"\n pass\n\n @property\n @abstractmethod\n def metadata(self) -> Optional[RetrievalMetadata]:\n \"\"\"\n Return metadata information about retrieval.\n Should be available even before materializing the dataset itself.\n \"\"\"\n pass\n\n\nclass OfflineStore(ABC):\n \"\"\"\n OfflineStore is an object used for all interaction between Feast and the service used for offline storage of\n features.\n \"\"\"\n\n @staticmethod\n @abstractmethod\n def pull_latest_from_table_or_query(\n config: RepoConfig,\n data_source: DataSource,\n join_key_columns: List[str],\n feature_name_columns: List[str],\n event_timestamp_column: str,\n created_timestamp_column: Optional[str],\n start_date: datetime,\n end_date: datetime,\n ) -> RetrievalJob:\n \"\"\"\n Note that join_key_columns, feature_name_columns, event_timestamp_column, and created_timestamp_column\n have all already been mapped to column names of the source table and those column names are the values passed\n into this function.\n \"\"\"\n pass\n\n @staticmethod\n @abstractmethod\n def get_historical_features(\n config: RepoConfig,\n feature_views: List[FeatureView],\n feature_refs: List[str],\n entity_df: Union[pd.DataFrame, str],\n registry: Registry,\n project: str,\n full_feature_names: bool = False,\n ) -> RetrievalJob:\n pass\n\n @staticmethod\n @abstractmethod\n def pull_all_from_table_or_query(\n config: RepoConfig,\n data_source: DataSource,\n join_key_columns: List[str],\n feature_name_columns: List[str],\n event_timestamp_column: str,\n start_date: datetime,\n end_date: datetime,\n ) -> RetrievalJob:\n \"\"\"\n Note that join_key_columns, feature_name_columns, event_timestamp_column, and created_timestamp_column\n have all already been mapped to column names of the source table and those column names are the values passed\n into this function.\n \"\"\"\n pass\n", "path": "sdk/python/feast/infra/offline_stores/offline_store.py"}], "after_files": [{"content": "# Copyright 2019 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport warnings\nfrom abc import ABC, abstractmethod\nfrom datetime import datetime\nfrom typing import TYPE_CHECKING, List, Optional, Union\n\nimport pandas as pd\nimport pyarrow\n\nfrom feast.data_source import DataSource\nfrom feast.dqm.errors import ValidationFailed\nfrom feast.feature_view import FeatureView\nfrom feast.on_demand_feature_view import OnDemandFeatureView\nfrom feast.registry import Registry\nfrom feast.repo_config import RepoConfig\nfrom feast.saved_dataset import SavedDatasetStorage\n\nif TYPE_CHECKING:\n from feast.saved_dataset import ValidationReference\n\n\nclass RetrievalMetadata:\n min_event_timestamp: Optional[datetime]\n max_event_timestamp: Optional[datetime]\n\n # List of feature references\n features: List[str]\n # List of entity keys + ODFV inputs\n keys: List[str]\n\n def __init__(\n self,\n features: List[str],\n keys: List[str],\n min_event_timestamp: Optional[datetime] = None,\n max_event_timestamp: Optional[datetime] = None,\n ):\n self.features = features\n self.keys = keys\n self.min_event_timestamp = min_event_timestamp\n self.max_event_timestamp = max_event_timestamp\n\n\nclass RetrievalJob(ABC):\n \"\"\"RetrievalJob is used to manage the execution of a historical feature retrieval\"\"\"\n\n @property\n @abstractmethod\n def full_feature_names(self) -> bool:\n pass\n\n @property\n @abstractmethod\n def on_demand_feature_views(self) -> Optional[List[OnDemandFeatureView]]:\n pass\n\n def to_df(\n self, validation_reference: Optional[\"ValidationReference\"] = None\n ) -> pd.DataFrame:\n \"\"\"\n Return dataset as Pandas DataFrame synchronously including on demand transforms\n Args:\n validation_reference: If provided resulting dataset will be validated against this reference profile.\n \"\"\"\n features_df = self._to_df_internal()\n\n if self.on_demand_feature_views:\n # TODO(adchia): Fix requirement to specify dependent feature views in feature_refs\n for odfv in self.on_demand_feature_views:\n features_df = features_df.join(\n odfv.get_transformed_features_df(\n features_df, self.full_feature_names,\n )\n )\n\n if validation_reference:\n warnings.warn(\n \"Dataset validation is an experimental feature. \"\n \"This API is unstable and it could and most probably will be changed in the future. \"\n \"We do not guarantee that future changes will maintain backward compatibility.\",\n RuntimeWarning,\n )\n\n validation_result = validation_reference.profile.validate(features_df)\n if not validation_result.is_success:\n raise ValidationFailed(validation_result)\n\n return features_df\n\n @abstractmethod\n def _to_df_internal(self) -> pd.DataFrame:\n \"\"\"Return dataset as Pandas DataFrame synchronously\"\"\"\n pass\n\n @abstractmethod\n def _to_arrow_internal(self) -> pyarrow.Table:\n \"\"\"Return dataset as pyarrow Table synchronously\"\"\"\n pass\n\n def to_arrow(\n self, validation_reference: Optional[\"ValidationReference\"] = None\n ) -> pyarrow.Table:\n \"\"\"\n Return dataset as pyarrow Table synchronously\n Args:\n validation_reference: If provided resulting dataset will be validated against this reference profile.\n \"\"\"\n if not self.on_demand_feature_views and not validation_reference:\n return self._to_arrow_internal()\n\n features_df = self._to_df_internal()\n if self.on_demand_feature_views:\n for odfv in self.on_demand_feature_views:\n features_df = features_df.join(\n odfv.get_transformed_features_df(\n features_df, self.full_feature_names,\n )\n )\n\n if validation_reference:\n warnings.warn(\n \"Dataset validation is an experimental feature. \"\n \"This API is unstable and it could and most probably will be changed in the future. \"\n \"We do not guarantee that future changes will maintain backward compatibility.\",\n RuntimeWarning,\n )\n\n validation_result = validation_reference.profile.validate(features_df)\n if not validation_result.is_success:\n raise ValidationFailed(validation_result)\n\n return pyarrow.Table.from_pandas(features_df)\n\n @abstractmethod\n def persist(self, storage: SavedDatasetStorage):\n \"\"\"\n Run the retrieval and persist the results in the same offline store used for read.\n \"\"\"\n pass\n\n @property\n @abstractmethod\n def metadata(self) -> Optional[RetrievalMetadata]:\n \"\"\"\n Return metadata information about retrieval.\n Should be available even before materializing the dataset itself.\n \"\"\"\n pass\n\n\nclass OfflineStore(ABC):\n \"\"\"\n OfflineStore is an object used for all interaction between Feast and the service used for offline storage of\n features.\n \"\"\"\n\n @staticmethod\n @abstractmethod\n def pull_latest_from_table_or_query(\n config: RepoConfig,\n data_source: DataSource,\n join_key_columns: List[str],\n feature_name_columns: List[str],\n event_timestamp_column: str,\n created_timestamp_column: Optional[str],\n start_date: datetime,\n end_date: datetime,\n ) -> RetrievalJob:\n \"\"\"\n This method pulls data from the offline store, and the FeatureStore class is used to write\n this data into the online store. This method is invoked when running materialization (using\n the `feast materialize` or `feast materialize-incremental` commands, or the corresponding\n FeatureStore.materialize() method. This method pulls data from the offline store, and the FeatureStore\n class is used to write this data into the online store.\n\n Note that join_key_columns, feature_name_columns, event_timestamp_column, and created_timestamp_column\n have all already been mapped to column names of the source table and those column names are the values passed\n into this function.\n\n Args:\n config: Repo configuration object\n data_source: Data source to pull all of the columns from\n join_key_columns: Columns of the join keys\n feature_name_columns: Columns of the feature names needed\n event_timestamp_column: Timestamp column\n start_date: Starting date of query\n end_date: Ending date of query\n \"\"\"\n pass\n\n @staticmethod\n @abstractmethod\n def get_historical_features(\n config: RepoConfig,\n feature_views: List[FeatureView],\n feature_refs: List[str],\n entity_df: Union[pd.DataFrame, str],\n registry: Registry,\n project: str,\n full_feature_names: bool = False,\n ) -> RetrievalJob:\n pass\n\n @staticmethod\n @abstractmethod\n def pull_all_from_table_or_query(\n config: RepoConfig,\n data_source: DataSource,\n join_key_columns: List[str],\n feature_name_columns: List[str],\n event_timestamp_column: str,\n start_date: datetime,\n end_date: datetime,\n ) -> RetrievalJob:\n \"\"\"\n Returns a Retrieval Job for all join key columns, feature name columns, and the event timestamp columns that occur between the start_date and end_date.\n\n Note that join_key_columns, feature_name_columns, event_timestamp_column, and created_timestamp_column\n have all already been mapped to column names of the source table and those column names are the values passed\n into this function.\n\n Args:\n config: Repo configuration object\n data_source: Data source to pull all of the columns from\n join_key_columns: Columns of the join keys\n feature_name_columns: Columns of the feature names needed\n event_timestamp_column: Timestamp column\n start_date: Starting date of query\n end_date: Ending date of query\n \"\"\"\n pass\n", "path": "sdk/python/feast/infra/offline_stores/offline_store.py"}]}
2,492
537
gh_patches_debug_2624
rasdani/github-patches
git_diff
zestedesavoir__zds-site-6488
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Possible erreur 500 à la résolution d'une alerte sur un contenu qui n'est plus public Rapporté par Sentry. J'ai eu du mal à comprendre comment le bug a pu se produire, mais j'ai réussi à le reproduire (d'une façon peut-être un peu tirée par les cheveux...). **Comment reproduire ?** 1. Se connecter en tant que `user1` 2. Signaler un billet 3. Se connecter en tant que `staff` 4. Ouvrir la page du billet signalé dans deux onglets différents 5. Sur un des onglets, dépublier le billet 6. Sur l'autre onglet, résoudre l'alerte (ne pas recharger la page juste avant, le billet n'est en fait plus publié, c'est là qu'est l'astuce) Une erreur 500 va alors apparaître. Elle provient d'ici : https://github.com/zestedesavoir/zds-site/blob/c06671c4901a95c30f31067c09d5e4526fd86575/zds/tutorialv2/views/alerts.py#L88 Le contenu n'a plus de version publique, donc plus d'URL publique, et `content.get_absolute_url_online()` renvoie alors `''`. La correction de ce bug passe sans doute par la vérification si l'alerte est déjà résolue ou si le contenu signalé a bien une version publique : si l'une de ces conditions n'est pas remplie, une erreur 404 devrait être levée. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `zds/tutorialv2/views/alerts.py` Content: ``` 1 from datetime import datetime 2 3 from django.contrib import messages 4 from django.core.exceptions import PermissionDenied 5 from django.contrib.auth.mixins import LoginRequiredMixin 6 from django.db import transaction 7 from django.http import Http404 8 from django.shortcuts import get_object_or_404, redirect 9 from django.template.loader import render_to_string 10 from django.utils.decorators import method_decorator 11 from django.utils.translation import gettext_lazy as _ 12 from django.views.generic import FormView 13 14 from zds.tutorialv2.models import TYPE_CHOICES_DICT 15 from zds.tutorialv2.models.database import PublishableContent 16 from zds.utils.models import Alert 17 18 19 class SendContentAlert(LoginRequiredMixin, FormView): 20 http_method_names = ["post"] 21 22 @method_decorator(transaction.atomic) 23 def dispatch(self, *args, **kwargs): 24 return super().dispatch(*args, **kwargs) 25 26 def post(self, request, *args, **kwargs): 27 try: 28 content_pk = int(self.kwargs["pk"]) 29 except (KeyError, ValueError): 30 raise Http404("Identifiant manquant ou conversion en entier impossible.") 31 content = get_object_or_404(PublishableContent, pk=content_pk) 32 33 if len(request.POST["signal_text"].strip()) == 0: 34 messages.error(request, _("La raison du signalement ne peut pas être vide.")) 35 else: 36 alert = Alert( 37 author=request.user, 38 content=content, 39 scope="CONTENT", 40 text=request.POST["signal_text"], 41 pubdate=datetime.now(), 42 ) 43 alert.save() 44 45 human_content_type = TYPE_CHOICES_DICT[content.type].lower() 46 messages.success(self.request, _("Ce {} a bien été signalé aux modérateurs.").format(human_content_type)) 47 48 return redirect(content.get_absolute_url_online()) 49 50 51 class SolveContentAlert(LoginRequiredMixin, FormView): 52 @method_decorator(transaction.atomic) 53 def dispatch(self, *args, **kwargs): 54 return super().dispatch(*args, **kwargs) 55 56 def post(self, request, *args, **kwargs): 57 if not request.user.has_perm("tutorialv2.change_contentreaction"): 58 raise PermissionDenied 59 try: 60 alert = get_object_or_404(Alert, pk=int(request.POST["alert_pk"])) 61 content = PublishableContent.objects.get(pk=alert.content.id) 62 except (KeyError, ValueError): 63 raise Http404("L'alerte n'existe pas.") 64 65 resolve_reason = "" 66 msg_title = "" 67 msg_content = "" 68 if "text" in request.POST and request.POST["text"]: 69 resolve_reason = request.POST["text"] 70 authors = alert.content.authors.values_list("username", flat=True) 71 authors = ", ".join(authors) 72 msg_title = _("Résolution d'alerte : {0}").format(content.title) 73 msg_content = render_to_string( 74 "tutorialv2/messages/resolve_alert.md", 75 { 76 "content": content, 77 "url": content.get_absolute_url_online(), 78 "name": alert.author.username, 79 "target_name": authors, 80 "modo_name": request.user.username, 81 "message": "\n".join(["> " + line for line in resolve_reason.split("\n")]), 82 "alert_text": "\n".join(["> " + line for line in alert.text.split("\n")]), 83 }, 84 ) 85 alert.solve(request.user, resolve_reason, msg_title, msg_content) 86 87 messages.success(self.request, _("L'alerte a bien été résolue.")) 88 return redirect(content.get_absolute_url_online()) 89 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/zds/tutorialv2/views/alerts.py b/zds/tutorialv2/views/alerts.py --- a/zds/tutorialv2/views/alerts.py +++ b/zds/tutorialv2/views/alerts.py @@ -62,6 +62,9 @@ except (KeyError, ValueError): raise Http404("L'alerte n'existe pas.") + if alert.solved: + raise Http404("L'alerte a déjà été résolue.") + resolve_reason = "" msg_title = "" msg_content = ""
{"golden_diff": "diff --git a/zds/tutorialv2/views/alerts.py b/zds/tutorialv2/views/alerts.py\n--- a/zds/tutorialv2/views/alerts.py\n+++ b/zds/tutorialv2/views/alerts.py\n@@ -62,6 +62,9 @@\n except (KeyError, ValueError):\n raise Http404(\"L'alerte n'existe pas.\")\n \n+ if alert.solved:\n+ raise Http404(\"L'alerte a d\u00e9j\u00e0 \u00e9t\u00e9 r\u00e9solue.\")\n+\n resolve_reason = \"\"\n msg_title = \"\"\n msg_content = \"\"\n", "issue": "Possible erreur 500 \u00e0 la r\u00e9solution d'une alerte sur un contenu qui n'est plus public\nRapport\u00e9 par Sentry. J'ai eu du mal \u00e0 comprendre comment le bug a pu se produire, mais j'ai r\u00e9ussi \u00e0 le reproduire (d'une fa\u00e7on peut-\u00eatre un peu tir\u00e9e par les cheveux...).\r\n\r\n**Comment reproduire ?**\r\n\r\n1. Se connecter en tant que `user1`\r\n2. Signaler un billet\r\n3. Se connecter en tant que `staff`\r\n4. Ouvrir la page du billet signal\u00e9 dans deux onglets diff\u00e9rents\r\n5. Sur un des onglets, d\u00e9publier le billet\r\n6. Sur l'autre onglet, r\u00e9soudre l'alerte (ne pas recharger la page juste avant, le billet n'est en fait plus publi\u00e9, c'est l\u00e0 qu'est l'astuce)\r\n\r\nUne erreur 500 va alors appara\u00eetre. Elle provient d'ici : https://github.com/zestedesavoir/zds-site/blob/c06671c4901a95c30f31067c09d5e4526fd86575/zds/tutorialv2/views/alerts.py#L88\r\nLe contenu n'a plus de version publique, donc plus d'URL publique, et `content.get_absolute_url_online()` renvoie alors `''`.\r\n\r\nLa correction de ce bug passe sans doute par la v\u00e9rification si l'alerte est d\u00e9j\u00e0 r\u00e9solue ou si le contenu signal\u00e9 a bien une version publique : si l'une de ces conditions n'est pas remplie, une erreur 404 devrait \u00eatre lev\u00e9e.\r\n\n", "before_files": [{"content": "from datetime import datetime\n\nfrom django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.db import transaction\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.loader import render_to_string\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.generic import FormView\n\nfrom zds.tutorialv2.models import TYPE_CHOICES_DICT\nfrom zds.tutorialv2.models.database import PublishableContent\nfrom zds.utils.models import Alert\n\n\nclass SendContentAlert(LoginRequiredMixin, FormView):\n http_method_names = [\"post\"]\n\n @method_decorator(transaction.atomic)\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n try:\n content_pk = int(self.kwargs[\"pk\"])\n except (KeyError, ValueError):\n raise Http404(\"Identifiant manquant ou conversion en entier impossible.\")\n content = get_object_or_404(PublishableContent, pk=content_pk)\n\n if len(request.POST[\"signal_text\"].strip()) == 0:\n messages.error(request, _(\"La raison du signalement ne peut pas \u00eatre vide.\"))\n else:\n alert = Alert(\n author=request.user,\n content=content,\n scope=\"CONTENT\",\n text=request.POST[\"signal_text\"],\n pubdate=datetime.now(),\n )\n alert.save()\n\n human_content_type = TYPE_CHOICES_DICT[content.type].lower()\n messages.success(self.request, _(\"Ce {} a bien \u00e9t\u00e9 signal\u00e9 aux mod\u00e9rateurs.\").format(human_content_type))\n\n return redirect(content.get_absolute_url_online())\n\n\nclass SolveContentAlert(LoginRequiredMixin, FormView):\n @method_decorator(transaction.atomic)\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n if not request.user.has_perm(\"tutorialv2.change_contentreaction\"):\n raise PermissionDenied\n try:\n alert = get_object_or_404(Alert, pk=int(request.POST[\"alert_pk\"]))\n content = PublishableContent.objects.get(pk=alert.content.id)\n except (KeyError, ValueError):\n raise Http404(\"L'alerte n'existe pas.\")\n\n resolve_reason = \"\"\n msg_title = \"\"\n msg_content = \"\"\n if \"text\" in request.POST and request.POST[\"text\"]:\n resolve_reason = request.POST[\"text\"]\n authors = alert.content.authors.values_list(\"username\", flat=True)\n authors = \", \".join(authors)\n msg_title = _(\"R\u00e9solution d'alerte : {0}\").format(content.title)\n msg_content = render_to_string(\n \"tutorialv2/messages/resolve_alert.md\",\n {\n \"content\": content,\n \"url\": content.get_absolute_url_online(),\n \"name\": alert.author.username,\n \"target_name\": authors,\n \"modo_name\": request.user.username,\n \"message\": \"\\n\".join([\"> \" + line for line in resolve_reason.split(\"\\n\")]),\n \"alert_text\": \"\\n\".join([\"> \" + line for line in alert.text.split(\"\\n\")]),\n },\n )\n alert.solve(request.user, resolve_reason, msg_title, msg_content)\n\n messages.success(self.request, _(\"L'alerte a bien \u00e9t\u00e9 r\u00e9solue.\"))\n return redirect(content.get_absolute_url_online())\n", "path": "zds/tutorialv2/views/alerts.py"}], "after_files": [{"content": "from datetime import datetime\n\nfrom django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.db import transaction\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.loader import render_to_string\nfrom django.utils.decorators import method_decorator\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.generic import FormView\n\nfrom zds.tutorialv2.models import TYPE_CHOICES_DICT\nfrom zds.tutorialv2.models.database import PublishableContent\nfrom zds.utils.models import Alert\n\n\nclass SendContentAlert(LoginRequiredMixin, FormView):\n http_method_names = [\"post\"]\n\n @method_decorator(transaction.atomic)\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n try:\n content_pk = int(self.kwargs[\"pk\"])\n except (KeyError, ValueError):\n raise Http404(\"Identifiant manquant ou conversion en entier impossible.\")\n content = get_object_or_404(PublishableContent, pk=content_pk)\n\n if len(request.POST[\"signal_text\"].strip()) == 0:\n messages.error(request, _(\"La raison du signalement ne peut pas \u00eatre vide.\"))\n else:\n alert = Alert(\n author=request.user,\n content=content,\n scope=\"CONTENT\",\n text=request.POST[\"signal_text\"],\n pubdate=datetime.now(),\n )\n alert.save()\n\n human_content_type = TYPE_CHOICES_DICT[content.type].lower()\n messages.success(self.request, _(\"Ce {} a bien \u00e9t\u00e9 signal\u00e9 aux mod\u00e9rateurs.\").format(human_content_type))\n\n return redirect(content.get_absolute_url_online())\n\n\nclass SolveContentAlert(LoginRequiredMixin, FormView):\n @method_decorator(transaction.atomic)\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n if not request.user.has_perm(\"tutorialv2.change_contentreaction\"):\n raise PermissionDenied\n try:\n alert = get_object_or_404(Alert, pk=int(request.POST[\"alert_pk\"]))\n content = PublishableContent.objects.get(pk=alert.content.id)\n except (KeyError, ValueError):\n raise Http404(\"L'alerte n'existe pas.\")\n\n if alert.solved:\n raise Http404(\"L'alerte a d\u00e9j\u00e0 \u00e9t\u00e9 r\u00e9solue.\")\n\n resolve_reason = \"\"\n msg_title = \"\"\n msg_content = \"\"\n if \"text\" in request.POST and request.POST[\"text\"]:\n resolve_reason = request.POST[\"text\"]\n authors = alert.content.authors.values_list(\"username\", flat=True)\n authors = \", \".join(authors)\n msg_title = _(\"R\u00e9solution d'alerte : {0}\").format(content.title)\n msg_content = render_to_string(\n \"tutorialv2/messages/resolve_alert.md\",\n {\n \"content\": content,\n \"url\": content.get_absolute_url_online(),\n \"name\": alert.author.username,\n \"target_name\": authors,\n \"modo_name\": request.user.username,\n \"message\": \"\\n\".join([\"> \" + line for line in resolve_reason.split(\"\\n\")]),\n \"alert_text\": \"\\n\".join([\"> \" + line for line in alert.text.split(\"\\n\")]),\n },\n )\n alert.solve(request.user, resolve_reason, msg_title, msg_content)\n\n messages.success(self.request, _(\"L'alerte a bien \u00e9t\u00e9 r\u00e9solue.\"))\n return redirect(content.get_absolute_url_online())\n", "path": "zds/tutorialv2/views/alerts.py"}]}
1,562
127
gh_patches_debug_30624
rasdani/github-patches
git_diff
pyro-ppl__pyro-1702
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Get error when running lda.py with “pyro.enable_validation(True)” As title described, I add "pyro.enable_validation(True)" at the head of script (examples/lda.py), and run with "python lda.py". I get an error, saying "ValueError: Error while computing log_prob at site 'topic_words':". However, the original script without "pyro.enable_validation(True)" can run smoothly. Thanks for suggestion. ### Environment - Centos 7,python 3.6. - PyTorch: 1.0.0 - Pyro version: 0.3.0 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `examples/lda.py` Content: ``` 1 """ 2 This example demonstrates how to marginalize out discrete assignment variables 3 in a Pyro model. 4 5 Our example model is Latent Dirichlet Allocation. While the model in this 6 example does work, it is not the recommended way of coding up LDA in Pyro. 7 Whereas the model in this example treats documents as vectors of categorical 8 variables (vectors of word ids), it is usually more efficient to treat 9 documents as bags of words (histograms of word counts). 10 """ 11 from __future__ import absolute_import, division, print_function 12 13 import argparse 14 import functools 15 import logging 16 17 import torch 18 from torch import nn 19 from torch.distributions import constraints 20 21 import pyro 22 import pyro.distributions as dist 23 from pyro.infer import SVI, JitTraceEnum_ELBO, TraceEnum_ELBO 24 from pyro.optim import Adam 25 26 logging.basicConfig(format='%(relativeCreated) 9d %(message)s', level=logging.INFO) 27 28 29 # This is a fully generative model of a batch of documents. 30 # data is a [num_words_per_doc, num_documents] shaped array of word ids 31 # (specifically it is not a histogram). We assume in this simple example 32 # that all documents have the same number of words. 33 def model(data=None, args=None, batch_size=None): 34 # Globals. 35 with pyro.plate("topics", args.num_topics): 36 topic_weights = pyro.sample("topic_weights", dist.Gamma(1. / args.num_topics, 1.)) 37 topic_words = pyro.sample("topic_words", 38 dist.Dirichlet(torch.ones(args.num_words) / args.num_words)) 39 40 # Locals. 41 with pyro.plate("documents", args.num_docs) as ind: 42 if data is not None: 43 with pyro.util.ignore_jit_warnings(): 44 assert data.shape == (args.num_words_per_doc, args.num_docs) 45 data = data[:, ind] 46 doc_topics = pyro.sample("doc_topics", dist.Dirichlet(topic_weights)) 47 with pyro.plate("words", args.num_words_per_doc): 48 # The word_topics variable is marginalized out during inference, 49 # achieved by specifying infer={"enumerate": "parallel"} and using 50 # TraceEnum_ELBO for inference. Thus we can ignore this variable in 51 # the guide. 52 word_topics = pyro.sample("word_topics", dist.Categorical(doc_topics), 53 infer={"enumerate": "parallel"}) 54 data = pyro.sample("doc_words", dist.Categorical(topic_words[word_topics]), 55 obs=data) 56 57 return topic_weights, topic_words, data 58 59 60 # We will use amortized inference of the local topic variables, achieved by a 61 # multi-layer perceptron. We'll wrap the guide in an nn.Module. 62 def make_predictor(args): 63 layer_sizes = ([args.num_words] + 64 [int(s) for s in args.layer_sizes.split('-')] + 65 [args.num_topics]) 66 logging.info('Creating MLP with sizes {}'.format(layer_sizes)) 67 layers = [] 68 for in_size, out_size in zip(layer_sizes, layer_sizes[1:]): 69 layer = nn.Linear(in_size, out_size) 70 layer.weight.data.normal_(0, 0.001) 71 layer.bias.data.normal_(0, 0.001) 72 layers.append(layer) 73 layers.append(nn.Sigmoid()) 74 return nn.Sequential(*layers) 75 76 77 def parametrized_guide(predictor, data, args, batch_size=None): 78 # Use a conjugate guide for global variables. 79 topic_weights_posterior = pyro.param( 80 "topic_weights_posterior", 81 lambda: torch.ones(args.num_topics) / args.num_topics, 82 constraint=constraints.positive) 83 topic_words_posterior = pyro.param( 84 "topic_words_posterior", 85 lambda: torch.ones(args.num_topics, args.num_words) / args.num_words, 86 constraint=constraints.positive) 87 with pyro.plate("topics", args.num_topics): 88 pyro.sample("topic_weights", dist.Gamma(topic_weights_posterior, 1.)) 89 pyro.sample("topic_words", dist.Dirichlet(topic_words_posterior)) 90 91 # Use an amortized guide for local variables. 92 pyro.module("predictor", predictor) 93 with pyro.plate("documents", args.num_docs, batch_size) as ind: 94 # The neural network will operate on histograms rather than word 95 # index vectors, so we'll convert the raw data to a histogram. 96 if torch._C._get_tracing_state(): 97 counts = torch.eye(1024)[data[:, ind]].sum(0).t() 98 else: 99 counts = torch.zeros(args.num_words, ind.size(0)) 100 counts.scatter_add_(0, data[:, ind], torch.tensor(1.).expand(counts.shape)) 101 doc_topics = predictor(counts.transpose(0, 1)) 102 pyro.sample("doc_topics", dist.Delta(doc_topics, event_dim=1)) 103 104 105 def main(args): 106 logging.info('Generating data') 107 pyro.set_rng_seed(0) 108 # We can generate synthetic data directly by calling the model. 109 true_topic_weights, true_topic_words, data = model(args=args) 110 111 # We'll train using SVI. 112 logging.info('-' * 40) 113 logging.info('Training on {} documents'.format(args.num_docs)) 114 predictor = make_predictor(args) 115 guide = functools.partial(parametrized_guide, predictor) 116 Elbo = JitTraceEnum_ELBO if args.jit else TraceEnum_ELBO 117 elbo = Elbo(max_plate_nesting=2) 118 optim = Adam({'lr': args.learning_rate}) 119 svi = SVI(model, guide, optim, elbo) 120 logging.info('Step\tLoss') 121 for step in range(args.num_steps): 122 loss = svi.step(data, args=args, batch_size=args.batch_size) 123 if step % 10 == 0: 124 logging.info('{: >5d}\t{}'.format(step, loss)) 125 loss = elbo.loss(model, guide, data, args=args) 126 logging.info('final loss = {}'.format(loss)) 127 128 129 if __name__ == '__main__': 130 assert pyro.__version__.startswith('0.3.0') 131 parser = argparse.ArgumentParser(description="Amortized Latent Dirichlet Allocation") 132 parser.add_argument("-t", "--num-topics", default=8, type=int) 133 parser.add_argument("-w", "--num-words", default=1024, type=int) 134 parser.add_argument("-d", "--num-docs", default=1000, type=int) 135 parser.add_argument("-wd", "--num-words-per-doc", default=64, type=int) 136 parser.add_argument("-n", "--num-steps", default=1000, type=int) 137 parser.add_argument("-l", "--layer-sizes", default="100-100") 138 parser.add_argument("-lr", "--learning-rate", default=0.001, type=float) 139 parser.add_argument("-b", "--batch-size", default=32, type=int) 140 parser.add_argument('--jit', action='store_true') 141 args = parser.parse_args() 142 main(args) 143 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/examples/lda.py b/examples/lda.py --- a/examples/lda.py +++ b/examples/lda.py @@ -71,6 +71,7 @@ layer.bias.data.normal_(0, 0.001) layers.append(layer) layers.append(nn.Sigmoid()) + layers.append(nn.Softmax(dim=-1)) return nn.Sequential(*layers) @@ -78,12 +79,12 @@ # Use a conjugate guide for global variables. topic_weights_posterior = pyro.param( "topic_weights_posterior", - lambda: torch.ones(args.num_topics) / args.num_topics, + lambda: torch.ones(args.num_topics), constraint=constraints.positive) topic_words_posterior = pyro.param( "topic_words_posterior", - lambda: torch.ones(args.num_topics, args.num_words) / args.num_words, - constraint=constraints.positive) + lambda: torch.ones(args.num_topics, args.num_words), + constraint=constraints.greater_than(0.5)) with pyro.plate("topics", args.num_topics): pyro.sample("topic_weights", dist.Gamma(topic_weights_posterior, 1.)) pyro.sample("topic_words", dist.Dirichlet(topic_words_posterior)) @@ -105,6 +106,9 @@ def main(args): logging.info('Generating data') pyro.set_rng_seed(0) + pyro.clear_param_store() + pyro.enable_validation(True) + # We can generate synthetic data directly by calling the model. true_topic_weights, true_topic_words, data = model(args=args)
{"golden_diff": "diff --git a/examples/lda.py b/examples/lda.py\n--- a/examples/lda.py\n+++ b/examples/lda.py\n@@ -71,6 +71,7 @@\n layer.bias.data.normal_(0, 0.001)\n layers.append(layer)\n layers.append(nn.Sigmoid())\n+ layers.append(nn.Softmax(dim=-1))\n return nn.Sequential(*layers)\n \n \n@@ -78,12 +79,12 @@\n # Use a conjugate guide for global variables.\n topic_weights_posterior = pyro.param(\n \"topic_weights_posterior\",\n- lambda: torch.ones(args.num_topics) / args.num_topics,\n+ lambda: torch.ones(args.num_topics),\n constraint=constraints.positive)\n topic_words_posterior = pyro.param(\n \"topic_words_posterior\",\n- lambda: torch.ones(args.num_topics, args.num_words) / args.num_words,\n- constraint=constraints.positive)\n+ lambda: torch.ones(args.num_topics, args.num_words),\n+ constraint=constraints.greater_than(0.5))\n with pyro.plate(\"topics\", args.num_topics):\n pyro.sample(\"topic_weights\", dist.Gamma(topic_weights_posterior, 1.))\n pyro.sample(\"topic_words\", dist.Dirichlet(topic_words_posterior))\n@@ -105,6 +106,9 @@\n def main(args):\n logging.info('Generating data')\n pyro.set_rng_seed(0)\n+ pyro.clear_param_store()\n+ pyro.enable_validation(True)\n+\n # We can generate synthetic data directly by calling the model.\n true_topic_weights, true_topic_words, data = model(args=args)\n", "issue": "Get error when running lda.py with \u201cpyro.enable_validation(True)\u201d \n\r\nAs title described, I add \"pyro.enable_validation(True)\" at the head of script (examples/lda.py), and run with \"python lda.py\".\r\n\r\nI get an error, saying \"ValueError: Error while computing log_prob at site 'topic_words':\".\r\n\r\nHowever, the original script without \"pyro.enable_validation(True)\" can run smoothly.\r\n\r\nThanks for suggestion. \r\n\r\n### Environment\r\n - Centos 7,python 3.6.\r\n - PyTorch: 1.0.0\r\n - Pyro version: 0.3.0\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nThis example demonstrates how to marginalize out discrete assignment variables\nin a Pyro model.\n\nOur example model is Latent Dirichlet Allocation. While the model in this\nexample does work, it is not the recommended way of coding up LDA in Pyro.\nWhereas the model in this example treats documents as vectors of categorical\nvariables (vectors of word ids), it is usually more efficient to treat\ndocuments as bags of words (histograms of word counts).\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nimport argparse\nimport functools\nimport logging\n\nimport torch\nfrom torch import nn\nfrom torch.distributions import constraints\n\nimport pyro\nimport pyro.distributions as dist\nfrom pyro.infer import SVI, JitTraceEnum_ELBO, TraceEnum_ELBO\nfrom pyro.optim import Adam\n\nlogging.basicConfig(format='%(relativeCreated) 9d %(message)s', level=logging.INFO)\n\n\n# This is a fully generative model of a batch of documents.\n# data is a [num_words_per_doc, num_documents] shaped array of word ids\n# (specifically it is not a histogram). We assume in this simple example\n# that all documents have the same number of words.\ndef model(data=None, args=None, batch_size=None):\n # Globals.\n with pyro.plate(\"topics\", args.num_topics):\n topic_weights = pyro.sample(\"topic_weights\", dist.Gamma(1. / args.num_topics, 1.))\n topic_words = pyro.sample(\"topic_words\",\n dist.Dirichlet(torch.ones(args.num_words) / args.num_words))\n\n # Locals.\n with pyro.plate(\"documents\", args.num_docs) as ind:\n if data is not None:\n with pyro.util.ignore_jit_warnings():\n assert data.shape == (args.num_words_per_doc, args.num_docs)\n data = data[:, ind]\n doc_topics = pyro.sample(\"doc_topics\", dist.Dirichlet(topic_weights))\n with pyro.plate(\"words\", args.num_words_per_doc):\n # The word_topics variable is marginalized out during inference,\n # achieved by specifying infer={\"enumerate\": \"parallel\"} and using\n # TraceEnum_ELBO for inference. Thus we can ignore this variable in\n # the guide.\n word_topics = pyro.sample(\"word_topics\", dist.Categorical(doc_topics),\n infer={\"enumerate\": \"parallel\"})\n data = pyro.sample(\"doc_words\", dist.Categorical(topic_words[word_topics]),\n obs=data)\n\n return topic_weights, topic_words, data\n\n\n# We will use amortized inference of the local topic variables, achieved by a\n# multi-layer perceptron. We'll wrap the guide in an nn.Module.\ndef make_predictor(args):\n layer_sizes = ([args.num_words] +\n [int(s) for s in args.layer_sizes.split('-')] +\n [args.num_topics])\n logging.info('Creating MLP with sizes {}'.format(layer_sizes))\n layers = []\n for in_size, out_size in zip(layer_sizes, layer_sizes[1:]):\n layer = nn.Linear(in_size, out_size)\n layer.weight.data.normal_(0, 0.001)\n layer.bias.data.normal_(0, 0.001)\n layers.append(layer)\n layers.append(nn.Sigmoid())\n return nn.Sequential(*layers)\n\n\ndef parametrized_guide(predictor, data, args, batch_size=None):\n # Use a conjugate guide for global variables.\n topic_weights_posterior = pyro.param(\n \"topic_weights_posterior\",\n lambda: torch.ones(args.num_topics) / args.num_topics,\n constraint=constraints.positive)\n topic_words_posterior = pyro.param(\n \"topic_words_posterior\",\n lambda: torch.ones(args.num_topics, args.num_words) / args.num_words,\n constraint=constraints.positive)\n with pyro.plate(\"topics\", args.num_topics):\n pyro.sample(\"topic_weights\", dist.Gamma(topic_weights_posterior, 1.))\n pyro.sample(\"topic_words\", dist.Dirichlet(topic_words_posterior))\n\n # Use an amortized guide for local variables.\n pyro.module(\"predictor\", predictor)\n with pyro.plate(\"documents\", args.num_docs, batch_size) as ind:\n # The neural network will operate on histograms rather than word\n # index vectors, so we'll convert the raw data to a histogram.\n if torch._C._get_tracing_state():\n counts = torch.eye(1024)[data[:, ind]].sum(0).t()\n else:\n counts = torch.zeros(args.num_words, ind.size(0))\n counts.scatter_add_(0, data[:, ind], torch.tensor(1.).expand(counts.shape))\n doc_topics = predictor(counts.transpose(0, 1))\n pyro.sample(\"doc_topics\", dist.Delta(doc_topics, event_dim=1))\n\n\ndef main(args):\n logging.info('Generating data')\n pyro.set_rng_seed(0)\n # We can generate synthetic data directly by calling the model.\n true_topic_weights, true_topic_words, data = model(args=args)\n\n # We'll train using SVI.\n logging.info('-' * 40)\n logging.info('Training on {} documents'.format(args.num_docs))\n predictor = make_predictor(args)\n guide = functools.partial(parametrized_guide, predictor)\n Elbo = JitTraceEnum_ELBO if args.jit else TraceEnum_ELBO\n elbo = Elbo(max_plate_nesting=2)\n optim = Adam({'lr': args.learning_rate})\n svi = SVI(model, guide, optim, elbo)\n logging.info('Step\\tLoss')\n for step in range(args.num_steps):\n loss = svi.step(data, args=args, batch_size=args.batch_size)\n if step % 10 == 0:\n logging.info('{: >5d}\\t{}'.format(step, loss))\n loss = elbo.loss(model, guide, data, args=args)\n logging.info('final loss = {}'.format(loss))\n\n\nif __name__ == '__main__':\n assert pyro.__version__.startswith('0.3.0')\n parser = argparse.ArgumentParser(description=\"Amortized Latent Dirichlet Allocation\")\n parser.add_argument(\"-t\", \"--num-topics\", default=8, type=int)\n parser.add_argument(\"-w\", \"--num-words\", default=1024, type=int)\n parser.add_argument(\"-d\", \"--num-docs\", default=1000, type=int)\n parser.add_argument(\"-wd\", \"--num-words-per-doc\", default=64, type=int)\n parser.add_argument(\"-n\", \"--num-steps\", default=1000, type=int)\n parser.add_argument(\"-l\", \"--layer-sizes\", default=\"100-100\")\n parser.add_argument(\"-lr\", \"--learning-rate\", default=0.001, type=float)\n parser.add_argument(\"-b\", \"--batch-size\", default=32, type=int)\n parser.add_argument('--jit', action='store_true')\n args = parser.parse_args()\n main(args)\n", "path": "examples/lda.py"}], "after_files": [{"content": "\"\"\"\nThis example demonstrates how to marginalize out discrete assignment variables\nin a Pyro model.\n\nOur example model is Latent Dirichlet Allocation. While the model in this\nexample does work, it is not the recommended way of coding up LDA in Pyro.\nWhereas the model in this example treats documents as vectors of categorical\nvariables (vectors of word ids), it is usually more efficient to treat\ndocuments as bags of words (histograms of word counts).\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nimport argparse\nimport functools\nimport logging\n\nimport torch\nfrom torch import nn\nfrom torch.distributions import constraints\n\nimport pyro\nimport pyro.distributions as dist\nfrom pyro.infer import SVI, JitTraceEnum_ELBO, TraceEnum_ELBO\nfrom pyro.optim import Adam\n\nlogging.basicConfig(format='%(relativeCreated) 9d %(message)s', level=logging.INFO)\n\n\n# This is a fully generative model of a batch of documents.\n# data is a [num_words_per_doc, num_documents] shaped array of word ids\n# (specifically it is not a histogram). We assume in this simple example\n# that all documents have the same number of words.\ndef model(data=None, args=None, batch_size=None):\n # Globals.\n with pyro.plate(\"topics\", args.num_topics):\n topic_weights = pyro.sample(\"topic_weights\", dist.Gamma(1. / args.num_topics, 1.))\n topic_words = pyro.sample(\"topic_words\",\n dist.Dirichlet(torch.ones(args.num_words) / args.num_words))\n\n # Locals.\n with pyro.plate(\"documents\", args.num_docs) as ind:\n if data is not None:\n with pyro.util.ignore_jit_warnings():\n assert data.shape == (args.num_words_per_doc, args.num_docs)\n data = data[:, ind]\n doc_topics = pyro.sample(\"doc_topics\", dist.Dirichlet(topic_weights))\n with pyro.plate(\"words\", args.num_words_per_doc):\n # The word_topics variable is marginalized out during inference,\n # achieved by specifying infer={\"enumerate\": \"parallel\"} and using\n # TraceEnum_ELBO for inference. Thus we can ignore this variable in\n # the guide.\n word_topics = pyro.sample(\"word_topics\", dist.Categorical(doc_topics),\n infer={\"enumerate\": \"parallel\"})\n data = pyro.sample(\"doc_words\", dist.Categorical(topic_words[word_topics]),\n obs=data)\n\n return topic_weights, topic_words, data\n\n\n# We will use amortized inference of the local topic variables, achieved by a\n# multi-layer perceptron. We'll wrap the guide in an nn.Module.\ndef make_predictor(args):\n layer_sizes = ([args.num_words] +\n [int(s) for s in args.layer_sizes.split('-')] +\n [args.num_topics])\n logging.info('Creating MLP with sizes {}'.format(layer_sizes))\n layers = []\n for in_size, out_size in zip(layer_sizes, layer_sizes[1:]):\n layer = nn.Linear(in_size, out_size)\n layer.weight.data.normal_(0, 0.001)\n layer.bias.data.normal_(0, 0.001)\n layers.append(layer)\n layers.append(nn.Sigmoid())\n layers.append(nn.Softmax(dim=-1))\n return nn.Sequential(*layers)\n\n\ndef parametrized_guide(predictor, data, args, batch_size=None):\n # Use a conjugate guide for global variables.\n topic_weights_posterior = pyro.param(\n \"topic_weights_posterior\",\n lambda: torch.ones(args.num_topics),\n constraint=constraints.positive)\n topic_words_posterior = pyro.param(\n \"topic_words_posterior\",\n lambda: torch.ones(args.num_topics, args.num_words),\n constraint=constraints.greater_than(0.5))\n with pyro.plate(\"topics\", args.num_topics):\n pyro.sample(\"topic_weights\", dist.Gamma(topic_weights_posterior, 1.))\n pyro.sample(\"topic_words\", dist.Dirichlet(topic_words_posterior))\n\n # Use an amortized guide for local variables.\n pyro.module(\"predictor\", predictor)\n with pyro.plate(\"documents\", args.num_docs, batch_size) as ind:\n # The neural network will operate on histograms rather than word\n # index vectors, so we'll convert the raw data to a histogram.\n if torch._C._get_tracing_state():\n counts = torch.eye(1024)[data[:, ind]].sum(0).t()\n else:\n counts = torch.zeros(args.num_words, ind.size(0))\n counts.scatter_add_(0, data[:, ind], torch.tensor(1.).expand(counts.shape))\n doc_topics = predictor(counts.transpose(0, 1))\n pyro.sample(\"doc_topics\", dist.Delta(doc_topics, event_dim=1))\n\n\ndef main(args):\n logging.info('Generating data')\n pyro.set_rng_seed(0)\n pyro.clear_param_store()\n pyro.enable_validation(True)\n\n # We can generate synthetic data directly by calling the model.\n true_topic_weights, true_topic_words, data = model(args=args)\n\n # We'll train using SVI.\n logging.info('-' * 40)\n logging.info('Training on {} documents'.format(args.num_docs))\n predictor = make_predictor(args)\n guide = functools.partial(parametrized_guide, predictor)\n Elbo = JitTraceEnum_ELBO if args.jit else TraceEnum_ELBO\n elbo = Elbo(max_plate_nesting=2)\n optim = Adam({'lr': args.learning_rate})\n svi = SVI(model, guide, optim, elbo)\n logging.info('Step\\tLoss')\n for step in range(args.num_steps):\n loss = svi.step(data, args=args, batch_size=args.batch_size)\n if step % 10 == 0:\n logging.info('{: >5d}\\t{}'.format(step, loss))\n loss = elbo.loss(model, guide, data, args=args)\n logging.info('final loss = {}'.format(loss))\n\n\nif __name__ == '__main__':\n assert pyro.__version__.startswith('0.3.0')\n parser = argparse.ArgumentParser(description=\"Amortized Latent Dirichlet Allocation\")\n parser.add_argument(\"-t\", \"--num-topics\", default=8, type=int)\n parser.add_argument(\"-w\", \"--num-words\", default=1024, type=int)\n parser.add_argument(\"-d\", \"--num-docs\", default=1000, type=int)\n parser.add_argument(\"-wd\", \"--num-words-per-doc\", default=64, type=int)\n parser.add_argument(\"-n\", \"--num-steps\", default=1000, type=int)\n parser.add_argument(\"-l\", \"--layer-sizes\", default=\"100-100\")\n parser.add_argument(\"-lr\", \"--learning-rate\", default=0.001, type=float)\n parser.add_argument(\"-b\", \"--batch-size\", default=32, type=int)\n parser.add_argument('--jit', action='store_true')\n args = parser.parse_args()\n main(args)\n", "path": "examples/lda.py"}]}
2,243
361
gh_patches_debug_27718
rasdani/github-patches
git_diff
DDMAL__CantusDB-839
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Admin Area: Cannot delete Source that contains chants When trying to delete a source that contains chants on the source admin page, we get a source DoesNotExist error. The source delete function works fine for sources that do not contain any chants, so I believe this has to do with deleting the associated chants after the source has already been deleted. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `django/cantusdb_project/main_app/signals.py` Content: ``` 1 import operator 2 from functools import reduce 3 4 from django.contrib.postgres.search import SearchVector 5 from django.db import models 6 from django.db.models import Value 7 from django.db.models.signals import post_save, post_delete 8 from django.dispatch import receiver 9 10 import re 11 12 from main_app.models import Chant 13 from main_app.models import Sequence 14 from main_app.models import Feast 15 16 17 @receiver(post_save, sender=Chant) 18 def on_chant_save(instance, **kwargs): 19 update_source_chant_count(instance) 20 update_source_melody_count(instance) 21 22 update_chant_search_vector(instance) 23 update_volpiano_fields(instance) 24 25 26 @receiver(post_delete, sender=Chant) 27 def on_chant_delete(instance, **kwargs): 28 update_source_chant_count(instance) 29 update_source_melody_count(instance) 30 31 32 @receiver(post_save, sender=Sequence) 33 def on_sequence_save(instance, **kwargs): 34 update_source_chant_count(instance) 35 36 37 @receiver(post_delete, sender=Sequence) 38 def on_sequence_delete(instance, **kwargs): 39 update_source_chant_count(instance) 40 41 42 @receiver(post_save, sender=Feast) 43 def on_feast_save(instance, **kwargs): 44 update_prefix_field(instance) 45 46 47 def update_chant_search_vector(instance): 48 """When saving an instance of Chant, update its search vector field. 49 50 Called in on_chant_save() 51 """ 52 index_components = instance.index_components() 53 pk = instance.pk 54 search_vectors = [] 55 56 for weight, data in index_components.items(): 57 search_vectors.append( 58 SearchVector(Value(data, output_field=models.TextField()), weight=weight) 59 ) 60 instance.__class__.objects.filter(pk=pk).update( 61 search_vector=reduce(operator.add, search_vectors) 62 ) 63 64 65 def update_source_chant_count(instance): 66 """When saving or deleting a Chant or Sequence, update its Source's number_of_chants field 67 68 Called in on_chant_save(), on_chant_delete(), on_sequence_save() and on_sequence_delete() 69 """ 70 71 source = instance.source 72 if source is not None: 73 source.number_of_chants = source.chant_set.count() + source.sequence_set.count() 74 source.save() 75 76 77 def update_source_melody_count(instance): 78 """When saving or deleting a Chant, update its Source's number_of_melodies field 79 80 Called in on_chant_save() and on_chant_delete() 81 """ 82 source = instance.source 83 if source is not None: 84 source.number_of_melodies = source.chant_set.filter( 85 volpiano__isnull=False 86 ).count() 87 source.save() 88 89 90 def update_volpiano_fields(instance): 91 """When saving a Chant, make sure the chant's volpiano_notes and volpiano_intervals are up-to-date 92 93 Called in on_chant_save() 94 """ 95 96 def generate_volpiano_notes(volpiano): 97 """ 98 Populate the ``volpiano_notes`` field of the ``Chant`` model 99 100 This field is used for melody search 101 102 Args: 103 volpiano (str): The content of ``chant.volpiano`` 104 105 Returns: 106 str: Volpiano str with non-note chars and duplicate consecutive notes removed 107 """ 108 # unwanted_chars are non-note chars, including the clefs, barlines, and accidentals etc. 109 # the `searchMelody.js` on old cantus makes no reference to the b-flat accidentals ("y", "i", "z") 110 # so put them in unwanted chars for now 111 unwanted_chars = [ 112 "-", 113 "1", 114 "2", 115 "3", 116 "4", 117 "5", 118 "6", 119 "7", 120 "?", 121 ".", 122 " ", 123 "y", 124 "i", 125 "z", 126 ] 127 # convert all charactors to lower-case, upper-case letters stand for liquescent of the same pitch 128 volpiano_lower = volpiano.lower() 129 # `)` stands for the lowest `g` note liquescent in volpiano, its 'lower case' is `9` 130 volpiano_notes = volpiano_lower.replace(")", "9") 131 # remove none-note charactors 132 for unwanted_char in unwanted_chars: 133 volpiano_notes = volpiano_notes.replace(unwanted_char, "") 134 # remove duplicate consecutive chars 135 volpiano_notes = re.sub(r"(.)\1+", r"\1", volpiano_notes) 136 return volpiano_notes 137 138 def generate_volpiano_intervals(volpiano_notes): 139 """ 140 Populate the ``volpiano_intervals`` field of the ``Chant`` model 141 142 This field is used for melody search when searching for transpositions 143 144 Args: 145 volpiano_notes (str): The content of ``chant.volpiano_notes``, 146 populated by the ``generate_volpiano_notes`` function 147 148 Returns: 149 str: A str of digits, recording the intervals between adjacent notes 150 """ 151 # replace '9' (the note G) with the char corresponding to (ASCII(a) - 1), because 'a' denotes the note A 152 volpiano_notes = volpiano_notes.replace("9", chr(ord("a") - 1)) 153 # we model the interval between notes using the difference between the ASCII codes of corresponding letters 154 # the letter for the note B is "j" (106), note A is "h" (104), the letter "i" (105) is skipped 155 # move all notes above A down by one letter 156 volpiano_notes = list(volpiano_notes) 157 for j, note in enumerate(volpiano_notes): 158 if ord(note) >= 106: 159 volpiano_notes[j] = chr(ord(note) - 1) 160 161 # `intervals` records the difference between two adjacent notes. 162 # Note that intervals are encoded by counting the number of scale 163 # steps between adjacent notes: an ascending second is thus encoded 164 # as "1"; a descending third is encoded "-2", and so on. 165 intervals = [] 166 for j in range(1, len(volpiano_notes)): 167 intervals.append(ord(volpiano_notes[j]) - ord(volpiano_notes[j - 1])) 168 # convert `intervals` to str 169 volpiano_intervals = "".join([str(interval) for interval in intervals]) 170 return volpiano_intervals 171 172 if instance.volpiano is None: 173 return 174 175 volpiano_notes = generate_volpiano_notes(instance.volpiano) 176 volpiano_intervals = generate_volpiano_intervals(volpiano_notes) 177 178 Chant.objects.filter(id=instance.id).update( 179 volpiano_notes=volpiano_notes, 180 volpiano_intervals=volpiano_intervals, 181 ) 182 183 184 def update_prefix_field(instance): 185 pk = instance.pk 186 187 if instance.feast_code: 188 prefix = str(instance.feast_code)[0:2] 189 instance.__class__.objects.filter(pk=pk).update(prefix=prefix) 190 else: # feast_code is None, "" 191 instance.__class__.objects.filter(pk=pk).update(prefix="") 192 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/django/cantusdb_project/main_app/signals.py b/django/cantusdb_project/main_app/signals.py --- a/django/cantusdb_project/main_app/signals.py +++ b/django/cantusdb_project/main_app/signals.py @@ -12,6 +12,7 @@ from main_app.models import Chant from main_app.models import Sequence from main_app.models import Feast +from main_app.models import Source @receiver(post_save, sender=Chant) @@ -68,7 +69,11 @@ Called in on_chant_save(), on_chant_delete(), on_sequence_save() and on_sequence_delete() """ - source = instance.source + # When a source is deleted (which in turn calls on_chant_delete() on all of its chants) instance.source does not exist + try: + source = instance.source + except Source.DoesNotExist: + source = None if source is not None: source.number_of_chants = source.chant_set.count() + source.sequence_set.count() source.save() @@ -79,7 +84,12 @@ Called in on_chant_save() and on_chant_delete() """ - source = instance.source + + # When a source is deleted (which in turn calls on_chant_delete() on all of its chants) instance.source does not exist + try: + source = instance.source + except Source.DoesNotExist: + source = None if source is not None: source.number_of_melodies = source.chant_set.filter( volpiano__isnull=False
{"golden_diff": "diff --git a/django/cantusdb_project/main_app/signals.py b/django/cantusdb_project/main_app/signals.py\n--- a/django/cantusdb_project/main_app/signals.py\n+++ b/django/cantusdb_project/main_app/signals.py\n@@ -12,6 +12,7 @@\n from main_app.models import Chant\n from main_app.models import Sequence\n from main_app.models import Feast\n+from main_app.models import Source\n \n \n @receiver(post_save, sender=Chant)\n@@ -68,7 +69,11 @@\n Called in on_chant_save(), on_chant_delete(), on_sequence_save() and on_sequence_delete()\n \"\"\"\n \n- source = instance.source\n+ # When a source is deleted (which in turn calls on_chant_delete() on all of its chants) instance.source does not exist\n+ try:\n+ source = instance.source\n+ except Source.DoesNotExist:\n+ source = None\n if source is not None:\n source.number_of_chants = source.chant_set.count() + source.sequence_set.count()\n source.save()\n@@ -79,7 +84,12 @@\n \n Called in on_chant_save() and on_chant_delete()\n \"\"\"\n- source = instance.source\n+\n+ # When a source is deleted (which in turn calls on_chant_delete() on all of its chants) instance.source does not exist\n+ try:\n+ source = instance.source\n+ except Source.DoesNotExist:\n+ source = None\n if source is not None:\n source.number_of_melodies = source.chant_set.filter(\n volpiano__isnull=False\n", "issue": "Admin Area: Cannot delete Source that contains chants\nWhen trying to delete a source that contains chants on the source admin page, we get a source DoesNotExist error. The source delete function works fine for sources that do not contain any chants, so I believe this has to do with deleting the associated chants after the source has already been deleted.\n", "before_files": [{"content": "import operator\nfrom functools import reduce\n\nfrom django.contrib.postgres.search import SearchVector\nfrom django.db import models\nfrom django.db.models import Value\nfrom django.db.models.signals import post_save, post_delete\nfrom django.dispatch import receiver\n\nimport re\n\nfrom main_app.models import Chant\nfrom main_app.models import Sequence\nfrom main_app.models import Feast\n\n\n@receiver(post_save, sender=Chant)\ndef on_chant_save(instance, **kwargs):\n update_source_chant_count(instance)\n update_source_melody_count(instance)\n\n update_chant_search_vector(instance)\n update_volpiano_fields(instance)\n\n\n@receiver(post_delete, sender=Chant)\ndef on_chant_delete(instance, **kwargs):\n update_source_chant_count(instance)\n update_source_melody_count(instance)\n\n\n@receiver(post_save, sender=Sequence)\ndef on_sequence_save(instance, **kwargs):\n update_source_chant_count(instance)\n\n\n@receiver(post_delete, sender=Sequence)\ndef on_sequence_delete(instance, **kwargs):\n update_source_chant_count(instance)\n\n\n@receiver(post_save, sender=Feast)\ndef on_feast_save(instance, **kwargs):\n update_prefix_field(instance)\n\n\ndef update_chant_search_vector(instance):\n \"\"\"When saving an instance of Chant, update its search vector field.\n\n Called in on_chant_save()\n \"\"\"\n index_components = instance.index_components()\n pk = instance.pk\n search_vectors = []\n\n for weight, data in index_components.items():\n search_vectors.append(\n SearchVector(Value(data, output_field=models.TextField()), weight=weight)\n )\n instance.__class__.objects.filter(pk=pk).update(\n search_vector=reduce(operator.add, search_vectors)\n )\n\n\ndef update_source_chant_count(instance):\n \"\"\"When saving or deleting a Chant or Sequence, update its Source's number_of_chants field\n\n Called in on_chant_save(), on_chant_delete(), on_sequence_save() and on_sequence_delete()\n \"\"\"\n\n source = instance.source\n if source is not None:\n source.number_of_chants = source.chant_set.count() + source.sequence_set.count()\n source.save()\n\n\ndef update_source_melody_count(instance):\n \"\"\"When saving or deleting a Chant, update its Source's number_of_melodies field\n\n Called in on_chant_save() and on_chant_delete()\n \"\"\"\n source = instance.source\n if source is not None:\n source.number_of_melodies = source.chant_set.filter(\n volpiano__isnull=False\n ).count()\n source.save()\n\n\ndef update_volpiano_fields(instance):\n \"\"\"When saving a Chant, make sure the chant's volpiano_notes and volpiano_intervals are up-to-date\n\n Called in on_chant_save()\n \"\"\"\n\n def generate_volpiano_notes(volpiano):\n \"\"\"\n Populate the ``volpiano_notes`` field of the ``Chant`` model\n\n This field is used for melody search\n\n Args:\n volpiano (str): The content of ``chant.volpiano``\n\n Returns:\n str: Volpiano str with non-note chars and duplicate consecutive notes removed\n \"\"\"\n # unwanted_chars are non-note chars, including the clefs, barlines, and accidentals etc.\n # the `searchMelody.js` on old cantus makes no reference to the b-flat accidentals (\"y\", \"i\", \"z\")\n # so put them in unwanted chars for now\n unwanted_chars = [\n \"-\",\n \"1\",\n \"2\",\n \"3\",\n \"4\",\n \"5\",\n \"6\",\n \"7\",\n \"?\",\n \".\",\n \" \",\n \"y\",\n \"i\",\n \"z\",\n ]\n # convert all charactors to lower-case, upper-case letters stand for liquescent of the same pitch\n volpiano_lower = volpiano.lower()\n # `)` stands for the lowest `g` note liquescent in volpiano, its 'lower case' is `9`\n volpiano_notes = volpiano_lower.replace(\")\", \"9\")\n # remove none-note charactors\n for unwanted_char in unwanted_chars:\n volpiano_notes = volpiano_notes.replace(unwanted_char, \"\")\n # remove duplicate consecutive chars\n volpiano_notes = re.sub(r\"(.)\\1+\", r\"\\1\", volpiano_notes)\n return volpiano_notes\n\n def generate_volpiano_intervals(volpiano_notes):\n \"\"\"\n Populate the ``volpiano_intervals`` field of the ``Chant`` model\n\n This field is used for melody search when searching for transpositions\n\n Args:\n volpiano_notes (str): The content of ``chant.volpiano_notes``,\n populated by the ``generate_volpiano_notes`` function\n\n Returns:\n str: A str of digits, recording the intervals between adjacent notes\n \"\"\"\n # replace '9' (the note G) with the char corresponding to (ASCII(a) - 1), because 'a' denotes the note A\n volpiano_notes = volpiano_notes.replace(\"9\", chr(ord(\"a\") - 1))\n # we model the interval between notes using the difference between the ASCII codes of corresponding letters\n # the letter for the note B is \"j\" (106), note A is \"h\" (104), the letter \"i\" (105) is skipped\n # move all notes above A down by one letter\n volpiano_notes = list(volpiano_notes)\n for j, note in enumerate(volpiano_notes):\n if ord(note) >= 106:\n volpiano_notes[j] = chr(ord(note) - 1)\n\n # `intervals` records the difference between two adjacent notes.\n # Note that intervals are encoded by counting the number of scale\n # steps between adjacent notes: an ascending second is thus encoded\n # as \"1\"; a descending third is encoded \"-2\", and so on.\n intervals = []\n for j in range(1, len(volpiano_notes)):\n intervals.append(ord(volpiano_notes[j]) - ord(volpiano_notes[j - 1]))\n # convert `intervals` to str\n volpiano_intervals = \"\".join([str(interval) for interval in intervals])\n return volpiano_intervals\n\n if instance.volpiano is None:\n return\n\n volpiano_notes = generate_volpiano_notes(instance.volpiano)\n volpiano_intervals = generate_volpiano_intervals(volpiano_notes)\n\n Chant.objects.filter(id=instance.id).update(\n volpiano_notes=volpiano_notes,\n volpiano_intervals=volpiano_intervals,\n )\n\n\ndef update_prefix_field(instance):\n pk = instance.pk\n\n if instance.feast_code:\n prefix = str(instance.feast_code)[0:2]\n instance.__class__.objects.filter(pk=pk).update(prefix=prefix)\n else: # feast_code is None, \"\"\n instance.__class__.objects.filter(pk=pk).update(prefix=\"\")\n", "path": "django/cantusdb_project/main_app/signals.py"}], "after_files": [{"content": "import operator\nfrom functools import reduce\n\nfrom django.contrib.postgres.search import SearchVector\nfrom django.db import models\nfrom django.db.models import Value\nfrom django.db.models.signals import post_save, post_delete\nfrom django.dispatch import receiver\n\nimport re\n\nfrom main_app.models import Chant\nfrom main_app.models import Sequence\nfrom main_app.models import Feast\nfrom main_app.models import Source\n\n\n@receiver(post_save, sender=Chant)\ndef on_chant_save(instance, **kwargs):\n update_source_chant_count(instance)\n update_source_melody_count(instance)\n\n update_chant_search_vector(instance)\n update_volpiano_fields(instance)\n\n\n@receiver(post_delete, sender=Chant)\ndef on_chant_delete(instance, **kwargs):\n update_source_chant_count(instance)\n update_source_melody_count(instance)\n\n\n@receiver(post_save, sender=Sequence)\ndef on_sequence_save(instance, **kwargs):\n update_source_chant_count(instance)\n\n\n@receiver(post_delete, sender=Sequence)\ndef on_sequence_delete(instance, **kwargs):\n update_source_chant_count(instance)\n\n\n@receiver(post_save, sender=Feast)\ndef on_feast_save(instance, **kwargs):\n update_prefix_field(instance)\n\n\ndef update_chant_search_vector(instance):\n \"\"\"When saving an instance of Chant, update its search vector field.\n\n Called in on_chant_save()\n \"\"\"\n index_components = instance.index_components()\n pk = instance.pk\n search_vectors = []\n\n for weight, data in index_components.items():\n search_vectors.append(\n SearchVector(Value(data, output_field=models.TextField()), weight=weight)\n )\n instance.__class__.objects.filter(pk=pk).update(\n search_vector=reduce(operator.add, search_vectors)\n )\n\n\ndef update_source_chant_count(instance):\n \"\"\"When saving or deleting a Chant or Sequence, update its Source's number_of_chants field\n\n Called in on_chant_save(), on_chant_delete(), on_sequence_save() and on_sequence_delete()\n \"\"\"\n\n # When a source is deleted (which in turn calls on_chant_delete() on all of its chants) instance.source does not exist\n try:\n source = instance.source\n except Source.DoesNotExist:\n source = None\n if source is not None:\n source.number_of_chants = source.chant_set.count() + source.sequence_set.count()\n source.save()\n\n\ndef update_source_melody_count(instance):\n \"\"\"When saving or deleting a Chant, update its Source's number_of_melodies field\n\n Called in on_chant_save() and on_chant_delete()\n \"\"\"\n\n # When a source is deleted (which in turn calls on_chant_delete() on all of its chants) instance.source does not exist\n try:\n source = instance.source\n except Source.DoesNotExist:\n source = None\n if source is not None:\n source.number_of_melodies = source.chant_set.filter(\n volpiano__isnull=False\n ).count()\n source.save()\n\n\ndef update_volpiano_fields(instance):\n \"\"\"When saving a Chant, make sure the chant's volpiano_notes and volpiano_intervals are up-to-date\n\n Called in on_chant_save()\n \"\"\"\n\n def generate_volpiano_notes(volpiano):\n \"\"\"\n Populate the ``volpiano_notes`` field of the ``Chant`` model\n\n This field is used for melody search\n\n Args:\n volpiano (str): The content of ``chant.volpiano``\n\n Returns:\n str: Volpiano str with non-note chars and duplicate consecutive notes removed\n \"\"\"\n # unwanted_chars are non-note chars, including the clefs, barlines, and accidentals etc.\n # the `searchMelody.js` on old cantus makes no reference to the b-flat accidentals (\"y\", \"i\", \"z\")\n # so put them in unwanted chars for now\n unwanted_chars = [\n \"-\",\n \"1\",\n \"2\",\n \"3\",\n \"4\",\n \"5\",\n \"6\",\n \"7\",\n \"?\",\n \".\",\n \" \",\n \"y\",\n \"i\",\n \"z\",\n ]\n # convert all charactors to lower-case, upper-case letters stand for liquescent of the same pitch\n volpiano_lower = volpiano.lower()\n # `)` stands for the lowest `g` note liquescent in volpiano, its 'lower case' is `9`\n volpiano_notes = volpiano_lower.replace(\")\", \"9\")\n # remove none-note charactors\n for unwanted_char in unwanted_chars:\n volpiano_notes = volpiano_notes.replace(unwanted_char, \"\")\n # remove duplicate consecutive chars\n volpiano_notes = re.sub(r\"(.)\\1+\", r\"\\1\", volpiano_notes)\n return volpiano_notes\n\n def generate_volpiano_intervals(volpiano_notes):\n \"\"\"\n Populate the ``volpiano_intervals`` field of the ``Chant`` model\n\n This field is used for melody search when searching for transpositions\n\n Args:\n volpiano_notes (str): The content of ``chant.volpiano_notes``,\n populated by the ``generate_volpiano_notes`` function\n\n Returns:\n str: A str of digits, recording the intervals between adjacent notes\n \"\"\"\n # replace '9' (the note G) with the char corresponding to (ASCII(a) - 1), because 'a' denotes the note A\n volpiano_notes = volpiano_notes.replace(\"9\", chr(ord(\"a\") - 1))\n # we model the interval between notes using the difference between the ASCII codes of corresponding letters\n # the letter for the note B is \"j\" (106), note A is \"h\" (104), the letter \"i\" (105) is skipped\n # move all notes above A down by one letter\n volpiano_notes = list(volpiano_notes)\n for j, note in enumerate(volpiano_notes):\n if ord(note) >= 106:\n volpiano_notes[j] = chr(ord(note) - 1)\n\n # `intervals` records the difference between two adjacent notes.\n # Note that intervals are encoded by counting the number of scale\n # steps between adjacent notes: an ascending second is thus encoded\n # as \"1\"; a descending third is encoded \"-2\", and so on.\n intervals = []\n for j in range(1, len(volpiano_notes)):\n intervals.append(ord(volpiano_notes[j]) - ord(volpiano_notes[j - 1]))\n # convert `intervals` to str\n volpiano_intervals = \"\".join([str(interval) for interval in intervals])\n return volpiano_intervals\n\n if instance.volpiano is None:\n return\n\n volpiano_notes = generate_volpiano_notes(instance.volpiano)\n volpiano_intervals = generate_volpiano_intervals(volpiano_notes)\n\n Chant.objects.filter(id=instance.id).update(\n volpiano_notes=volpiano_notes,\n volpiano_intervals=volpiano_intervals,\n )\n\n\ndef update_prefix_field(instance):\n pk = instance.pk\n\n if instance.feast_code:\n prefix = str(instance.feast_code)[0:2]\n instance.__class__.objects.filter(pk=pk).update(prefix=prefix)\n else: # feast_code is None, \"\"\n instance.__class__.objects.filter(pk=pk).update(prefix=\"\")\n", "path": "django/cantusdb_project/main_app/signals.py"}]}
2,337
362
gh_patches_debug_16991
rasdani/github-patches
git_diff
pypi__warehouse-3989
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Search reindex task leaves empty index. The 'Search projects' function does not work for me on [https://pypi.org](https://pypi.org). Irrespective of the query, the search does not return any results. (Example: [https://pypi.org/search/?q=numpy](https://pypi.org/search/?q=numpy)) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `warehouse/search/tasks.py` Content: ``` 1 # Licensed under the Apache License, Version 2.0 (the "License"); 2 # you may not use this file except in compliance with the License. 3 # You may obtain a copy of the License at 4 # 5 # http://www.apache.org/licenses/LICENSE-2.0 6 # 7 # Unless required by applicable law or agreed to in writing, software 8 # distributed under the License is distributed on an "AS IS" BASIS, 9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 10 # See the License for the specific language governing permissions and 11 # limitations under the License. 12 13 import binascii 14 import urllib 15 import os 16 17 from elasticsearch.helpers import parallel_bulk 18 from elasticsearch_dsl import serializer 19 from sqlalchemy import and_, func 20 from sqlalchemy.orm import aliased 21 import certifi 22 import elasticsearch 23 24 from warehouse.packaging.models import ( 25 Classifier, Project, Release, release_classifiers) 26 from warehouse.packaging.search import Project as ProjectDocType 27 from warehouse.search.utils import get_index 28 from warehouse import tasks 29 from warehouse.utils.db import windowed_query 30 31 32 def _project_docs(db): 33 34 releases_list = ( 35 db.query(Release.name, Release.version) 36 .order_by( 37 Release.name, 38 Release.is_prerelease.nullslast(), 39 Release._pypi_ordering.desc(), 40 ) 41 .distinct(Release.name) 42 .subquery("release_list") 43 ) 44 45 r = aliased(Release, name="r") 46 47 all_versions = ( 48 db.query(func.array_agg(r.version)) 49 .filter(r.name == Release.name) 50 .correlate(Release) 51 .as_scalar() 52 .label("all_versions") 53 ) 54 55 classifiers = ( 56 db.query(func.array_agg(Classifier.classifier)) 57 .select_from(release_classifiers) 58 .join(Classifier, Classifier.id == release_classifiers.c.trove_id) 59 .filter(Release.name == release_classifiers.c.name) 60 .filter(Release.version == release_classifiers.c.version) 61 .correlate(Release) 62 .as_scalar() 63 .label("classifiers") 64 ) 65 66 release_data = ( 67 db.query( 68 Release.description, 69 Release.name, 70 Release.version.label("latest_version"), 71 all_versions, 72 Release.author, 73 Release.author_email, 74 Release.maintainer, 75 Release.maintainer_email, 76 Release.home_page, 77 Release.summary, 78 Release.keywords, 79 Release.platform, 80 Release.download_url, 81 Release.created, 82 classifiers, 83 Project.normalized_name, 84 Project.name, 85 ) 86 .select_from(releases_list) 87 .join(Release, and_( 88 Release.name == releases_list.c.name, 89 Release.version == releases_list.c.version)) 90 .outerjoin(Release.project) 91 .order_by(Release.name) 92 ) 93 94 for release in windowed_query(release_data, Release.name, 50000): 95 p = ProjectDocType.from_db(release) 96 p.full_clean() 97 yield p.to_dict(include_meta=True) 98 99 100 @tasks.task(ignore_result=True, acks_late=True) 101 def reindex(request): 102 """ 103 Recreate the Search Index. 104 """ 105 p = urllib.parse.urlparse(request.registry.settings["elasticsearch.url"]) 106 client = elasticsearch.Elasticsearch( 107 [urllib.parse.urlunparse(p[:2] + ("",) * 4)], 108 verify_certs=True, 109 ca_certs=certifi.where(), 110 timeout=30, 111 retry_on_timeout=True, 112 serializer=serializer.serializer, 113 ) 114 number_of_replicas = request.registry.get("elasticsearch.replicas", 0) 115 refresh_interval = request.registry.get("elasticsearch.interval", "1s") 116 117 # We use a randomly named index so that we can do a zero downtime reindex. 118 # Essentially we'll use a randomly named index which we will use until all 119 # of the data has been reindexed, at which point we'll point an alias at 120 # our randomly named index, and then delete the old randomly named index. 121 122 # Create the new index and associate all of our doc types with it. 123 index_base = request.registry["elasticsearch.index"] 124 random_token = binascii.hexlify(os.urandom(5)).decode("ascii") 125 new_index_name = "{}-{}".format(index_base, random_token) 126 doc_types = request.registry.get("search.doc_types", set()) 127 shards = request.registry.get("elasticsearch.shards", 1) 128 129 # Create the new index with zero replicas and index refreshes disabled 130 # while we are bulk indexing. 131 new_index = get_index( 132 new_index_name, 133 doc_types, 134 using=client, 135 shards=shards, 136 replicas=0, 137 interval="-1", 138 ) 139 new_index.create(wait_for_active_shards=shards) 140 141 # From this point on, if any error occurs, we want to be able to delete our 142 # in progress index. 143 try: 144 request.db.execute("SET statement_timeout = '600s'") 145 146 for _ in parallel_bulk(client, _project_docs(request.db)): 147 pass 148 except: # noqa 149 new_index.delete() 150 raise 151 finally: 152 request.db.rollback() 153 request.db.close() 154 155 # Now that we've finished indexing all of our data we can update the 156 # replicas and refresh intervals. 157 client.indices.put_settings( 158 index=new_index_name, 159 body={ 160 "index": { 161 "number_of_replicas": number_of_replicas, 162 "refresh_interval": refresh_interval, 163 } 164 } 165 ) 166 167 # Point the alias at our new randomly named index and delete the old index. 168 if client.indices.exists_alias(name=index_base): 169 to_delete = set() 170 actions = [] 171 for name in client.indices.get_alias(name=index_base): 172 to_delete.add(name) 173 actions.append({"remove": {"index": name, "alias": index_base}}) 174 actions.append({"add": {"index": new_index_name, "alias": index_base}}) 175 client.indices.update_aliases({"actions": actions}) 176 client.indices.delete(",".join(to_delete)) 177 else: 178 client.indices.put_alias(name=index_base, index=new_index_name) 179 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/warehouse/search/tasks.py b/warehouse/search/tasks.py --- a/warehouse/search/tasks.py +++ b/warehouse/search/tasks.py @@ -94,7 +94,9 @@ for release in windowed_query(release_data, Release.name, 50000): p = ProjectDocType.from_db(release) p.full_clean() - yield p.to_dict(include_meta=True) + doc = p.to_dict(include_meta=True) + doc.pop('_index', None) + yield doc @tasks.task(ignore_result=True, acks_late=True) @@ -143,7 +145,8 @@ try: request.db.execute("SET statement_timeout = '600s'") - for _ in parallel_bulk(client, _project_docs(request.db)): + for _ in parallel_bulk(client, _project_docs(request.db), + index=new_index_name): pass except: # noqa new_index.delete()
{"golden_diff": "diff --git a/warehouse/search/tasks.py b/warehouse/search/tasks.py\n--- a/warehouse/search/tasks.py\n+++ b/warehouse/search/tasks.py\n@@ -94,7 +94,9 @@\n for release in windowed_query(release_data, Release.name, 50000):\n p = ProjectDocType.from_db(release)\n p.full_clean()\n- yield p.to_dict(include_meta=True)\n+ doc = p.to_dict(include_meta=True)\n+ doc.pop('_index', None)\n+ yield doc\n \n \n @tasks.task(ignore_result=True, acks_late=True)\n@@ -143,7 +145,8 @@\n try:\n request.db.execute(\"SET statement_timeout = '600s'\")\n \n- for _ in parallel_bulk(client, _project_docs(request.db)):\n+ for _ in parallel_bulk(client, _project_docs(request.db),\n+ index=new_index_name):\n pass\n except: # noqa\n new_index.delete()\n", "issue": "Search reindex task leaves empty index.\nThe 'Search projects' function does not work for me on [https://pypi.org](https://pypi.org). Irrespective of the query, the search does not return any results. (Example: [https://pypi.org/search/?q=numpy](https://pypi.org/search/?q=numpy))\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport binascii\nimport urllib\nimport os\n\nfrom elasticsearch.helpers import parallel_bulk\nfrom elasticsearch_dsl import serializer\nfrom sqlalchemy import and_, func\nfrom sqlalchemy.orm import aliased\nimport certifi\nimport elasticsearch\n\nfrom warehouse.packaging.models import (\n Classifier, Project, Release, release_classifiers)\nfrom warehouse.packaging.search import Project as ProjectDocType\nfrom warehouse.search.utils import get_index\nfrom warehouse import tasks\nfrom warehouse.utils.db import windowed_query\n\n\ndef _project_docs(db):\n\n releases_list = (\n db.query(Release.name, Release.version)\n .order_by(\n Release.name,\n Release.is_prerelease.nullslast(),\n Release._pypi_ordering.desc(),\n )\n .distinct(Release.name)\n .subquery(\"release_list\")\n )\n\n r = aliased(Release, name=\"r\")\n\n all_versions = (\n db.query(func.array_agg(r.version))\n .filter(r.name == Release.name)\n .correlate(Release)\n .as_scalar()\n .label(\"all_versions\")\n )\n\n classifiers = (\n db.query(func.array_agg(Classifier.classifier))\n .select_from(release_classifiers)\n .join(Classifier, Classifier.id == release_classifiers.c.trove_id)\n .filter(Release.name == release_classifiers.c.name)\n .filter(Release.version == release_classifiers.c.version)\n .correlate(Release)\n .as_scalar()\n .label(\"classifiers\")\n )\n\n release_data = (\n db.query(\n Release.description,\n Release.name,\n Release.version.label(\"latest_version\"),\n all_versions,\n Release.author,\n Release.author_email,\n Release.maintainer,\n Release.maintainer_email,\n Release.home_page,\n Release.summary,\n Release.keywords,\n Release.platform,\n Release.download_url,\n Release.created,\n classifiers,\n Project.normalized_name,\n Project.name,\n )\n .select_from(releases_list)\n .join(Release, and_(\n Release.name == releases_list.c.name,\n Release.version == releases_list.c.version))\n .outerjoin(Release.project)\n .order_by(Release.name)\n )\n\n for release in windowed_query(release_data, Release.name, 50000):\n p = ProjectDocType.from_db(release)\n p.full_clean()\n yield p.to_dict(include_meta=True)\n\n\[email protected](ignore_result=True, acks_late=True)\ndef reindex(request):\n \"\"\"\n Recreate the Search Index.\n \"\"\"\n p = urllib.parse.urlparse(request.registry.settings[\"elasticsearch.url\"])\n client = elasticsearch.Elasticsearch(\n [urllib.parse.urlunparse(p[:2] + (\"\",) * 4)],\n verify_certs=True,\n ca_certs=certifi.where(),\n timeout=30,\n retry_on_timeout=True,\n serializer=serializer.serializer,\n )\n number_of_replicas = request.registry.get(\"elasticsearch.replicas\", 0)\n refresh_interval = request.registry.get(\"elasticsearch.interval\", \"1s\")\n\n # We use a randomly named index so that we can do a zero downtime reindex.\n # Essentially we'll use a randomly named index which we will use until all\n # of the data has been reindexed, at which point we'll point an alias at\n # our randomly named index, and then delete the old randomly named index.\n\n # Create the new index and associate all of our doc types with it.\n index_base = request.registry[\"elasticsearch.index\"]\n random_token = binascii.hexlify(os.urandom(5)).decode(\"ascii\")\n new_index_name = \"{}-{}\".format(index_base, random_token)\n doc_types = request.registry.get(\"search.doc_types\", set())\n shards = request.registry.get(\"elasticsearch.shards\", 1)\n\n # Create the new index with zero replicas and index refreshes disabled\n # while we are bulk indexing.\n new_index = get_index(\n new_index_name,\n doc_types,\n using=client,\n shards=shards,\n replicas=0,\n interval=\"-1\",\n )\n new_index.create(wait_for_active_shards=shards)\n\n # From this point on, if any error occurs, we want to be able to delete our\n # in progress index.\n try:\n request.db.execute(\"SET statement_timeout = '600s'\")\n\n for _ in parallel_bulk(client, _project_docs(request.db)):\n pass\n except: # noqa\n new_index.delete()\n raise\n finally:\n request.db.rollback()\n request.db.close()\n\n # Now that we've finished indexing all of our data we can update the\n # replicas and refresh intervals.\n client.indices.put_settings(\n index=new_index_name,\n body={\n \"index\": {\n \"number_of_replicas\": number_of_replicas,\n \"refresh_interval\": refresh_interval,\n }\n }\n )\n\n # Point the alias at our new randomly named index and delete the old index.\n if client.indices.exists_alias(name=index_base):\n to_delete = set()\n actions = []\n for name in client.indices.get_alias(name=index_base):\n to_delete.add(name)\n actions.append({\"remove\": {\"index\": name, \"alias\": index_base}})\n actions.append({\"add\": {\"index\": new_index_name, \"alias\": index_base}})\n client.indices.update_aliases({\"actions\": actions})\n client.indices.delete(\",\".join(to_delete))\n else:\n client.indices.put_alias(name=index_base, index=new_index_name)\n", "path": "warehouse/search/tasks.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport binascii\nimport urllib\nimport os\n\nfrom elasticsearch.helpers import parallel_bulk\nfrom elasticsearch_dsl import serializer\nfrom sqlalchemy import and_, func\nfrom sqlalchemy.orm import aliased\nimport certifi\nimport elasticsearch\n\nfrom warehouse.packaging.models import (\n Classifier, Project, Release, release_classifiers)\nfrom warehouse.packaging.search import Project as ProjectDocType\nfrom warehouse.search.utils import get_index\nfrom warehouse import tasks\nfrom warehouse.utils.db import windowed_query\n\n\ndef _project_docs(db):\n\n releases_list = (\n db.query(Release.name, Release.version)\n .order_by(\n Release.name,\n Release.is_prerelease.nullslast(),\n Release._pypi_ordering.desc(),\n )\n .distinct(Release.name)\n .subquery(\"release_list\")\n )\n\n r = aliased(Release, name=\"r\")\n\n all_versions = (\n db.query(func.array_agg(r.version))\n .filter(r.name == Release.name)\n .correlate(Release)\n .as_scalar()\n .label(\"all_versions\")\n )\n\n classifiers = (\n db.query(func.array_agg(Classifier.classifier))\n .select_from(release_classifiers)\n .join(Classifier, Classifier.id == release_classifiers.c.trove_id)\n .filter(Release.name == release_classifiers.c.name)\n .filter(Release.version == release_classifiers.c.version)\n .correlate(Release)\n .as_scalar()\n .label(\"classifiers\")\n )\n\n release_data = (\n db.query(\n Release.description,\n Release.name,\n Release.version.label(\"latest_version\"),\n all_versions,\n Release.author,\n Release.author_email,\n Release.maintainer,\n Release.maintainer_email,\n Release.home_page,\n Release.summary,\n Release.keywords,\n Release.platform,\n Release.download_url,\n Release.created,\n classifiers,\n Project.normalized_name,\n Project.name,\n )\n .select_from(releases_list)\n .join(Release, and_(\n Release.name == releases_list.c.name,\n Release.version == releases_list.c.version))\n .outerjoin(Release.project)\n .order_by(Release.name)\n )\n\n for release in windowed_query(release_data, Release.name, 50000):\n p = ProjectDocType.from_db(release)\n p.full_clean()\n doc = p.to_dict(include_meta=True)\n doc.pop('_index', None)\n yield doc\n\n\[email protected](ignore_result=True, acks_late=True)\ndef reindex(request):\n \"\"\"\n Recreate the Search Index.\n \"\"\"\n p = urllib.parse.urlparse(request.registry.settings[\"elasticsearch.url\"])\n client = elasticsearch.Elasticsearch(\n [urllib.parse.urlunparse(p[:2] + (\"\",) * 4)],\n verify_certs=True,\n ca_certs=certifi.where(),\n timeout=30,\n retry_on_timeout=True,\n serializer=serializer.serializer,\n )\n number_of_replicas = request.registry.get(\"elasticsearch.replicas\", 0)\n refresh_interval = request.registry.get(\"elasticsearch.interval\", \"1s\")\n\n # We use a randomly named index so that we can do a zero downtime reindex.\n # Essentially we'll use a randomly named index which we will use until all\n # of the data has been reindexed, at which point we'll point an alias at\n # our randomly named index, and then delete the old randomly named index.\n\n # Create the new index and associate all of our doc types with it.\n index_base = request.registry[\"elasticsearch.index\"]\n random_token = binascii.hexlify(os.urandom(5)).decode(\"ascii\")\n new_index_name = \"{}-{}\".format(index_base, random_token)\n doc_types = request.registry.get(\"search.doc_types\", set())\n shards = request.registry.get(\"elasticsearch.shards\", 1)\n\n # Create the new index with zero replicas and index refreshes disabled\n # while we are bulk indexing.\n new_index = get_index(\n new_index_name,\n doc_types,\n using=client,\n shards=shards,\n replicas=0,\n interval=\"-1\",\n )\n new_index.create(wait_for_active_shards=shards)\n\n # From this point on, if any error occurs, we want to be able to delete our\n # in progress index.\n try:\n request.db.execute(\"SET statement_timeout = '600s'\")\n\n for _ in parallel_bulk(client, _project_docs(request.db),\n index=new_index_name):\n pass\n except: # noqa\n new_index.delete()\n raise\n finally:\n request.db.rollback()\n request.db.close()\n\n # Now that we've finished indexing all of our data we can update the\n # replicas and refresh intervals.\n client.indices.put_settings(\n index=new_index_name,\n body={\n \"index\": {\n \"number_of_replicas\": number_of_replicas,\n \"refresh_interval\": refresh_interval,\n }\n }\n )\n\n # Point the alias at our new randomly named index and delete the old index.\n if client.indices.exists_alias(name=index_base):\n to_delete = set()\n actions = []\n for name in client.indices.get_alias(name=index_base):\n to_delete.add(name)\n actions.append({\"remove\": {\"index\": name, \"alias\": index_base}})\n actions.append({\"add\": {\"index\": new_index_name, \"alias\": index_base}})\n client.indices.update_aliases({\"actions\": actions})\n client.indices.delete(\",\".join(to_delete))\n else:\n client.indices.put_alias(name=index_base, index=new_index_name)\n", "path": "warehouse/search/tasks.py"}]}
2,069
215
gh_patches_debug_1631
rasdani/github-patches
git_diff
vyperlang__vyper-3340
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Bug: compiler dislikes `x not in [a, b]` in 0.3.8, whereas it was fine in 0.3.7 ### Version Information * vyper Version (output of `vyper --version`): 0.3.8 * OS: osx * Python Version (output of `python --version`): 3.10.4 ### What's your issue about? <img width="705" alt="image" src="https://user-images.githubusercontent.com/11488427/230437774-c3b68030-9319-4169-b344-dbb470002102.png"> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `vyper/semantics/environment.py` Content: ``` 1 from typing import Dict 2 3 from vyper.semantics.analysis.base import VarInfo 4 from vyper.semantics.types import AddressT, BytesT, VyperType 5 from vyper.semantics.types.shortcuts import BYTES32_T, UINT256_T 6 7 8 # common properties for environment variables 9 class _EnvType(VyperType): 10 def __eq__(self, other): 11 return self is other 12 13 def __hash__(self): 14 return hash(id(self)) 15 16 17 class _Block(_EnvType): 18 _id = "block" 19 _type_members = { 20 "coinbase": AddressT(), 21 "difficulty": UINT256_T, 22 "prevrandao": UINT256_T, 23 "number": UINT256_T, 24 "gaslimit": UINT256_T, 25 "basefee": UINT256_T, 26 "prevhash": BYTES32_T, 27 "timestamp": UINT256_T, 28 } 29 30 31 class _Chain(_EnvType): 32 _id = "chain" 33 _type_members = {"id": UINT256_T} 34 35 36 class _Msg(_EnvType): 37 _id = "msg" 38 _type_members = {"data": BytesT(), "gas": UINT256_T, "sender": AddressT(), "value": UINT256_T} 39 40 41 class _Tx(_EnvType): 42 _id = "tx" 43 _type_members = {"origin": AddressT(), "gasprice": UINT256_T} 44 45 46 CONSTANT_ENVIRONMENT_VARS = {t._id: t for t in (_Block(), _Chain(), _Tx(), _Msg())} 47 48 49 def get_constant_vars() -> Dict: 50 """ 51 Get a dictionary of constant environment variables. 52 """ 53 result = {} 54 for k, v in CONSTANT_ENVIRONMENT_VARS.items(): 55 result[k] = VarInfo(v, is_constant=True) 56 57 return result 58 59 60 # Not sure this is necessary, but add an ad-hoc type for `self` for clarity 61 class _SelfT(AddressT): 62 pass 63 64 65 MUTABLE_ENVIRONMENT_VARS: Dict[str, type] = {"self": _SelfT} 66 67 68 def get_mutable_vars() -> Dict: 69 """ 70 Get a dictionary of mutable environment variables (those that are 71 modified during the course of contract execution, such as `self`). 72 """ 73 return {name: VarInfo(type_()) for name, type_ in MUTABLE_ENVIRONMENT_VARS.items()} 74 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/vyper/semantics/environment.py b/vyper/semantics/environment.py --- a/vyper/semantics/environment.py +++ b/vyper/semantics/environment.py @@ -57,12 +57,7 @@ return result -# Not sure this is necessary, but add an ad-hoc type for `self` for clarity -class _SelfT(AddressT): - pass - - -MUTABLE_ENVIRONMENT_VARS: Dict[str, type] = {"self": _SelfT} +MUTABLE_ENVIRONMENT_VARS: Dict[str, type] = {"self": AddressT} def get_mutable_vars() -> Dict:
{"golden_diff": "diff --git a/vyper/semantics/environment.py b/vyper/semantics/environment.py\n--- a/vyper/semantics/environment.py\n+++ b/vyper/semantics/environment.py\n@@ -57,12 +57,7 @@\n return result\n \n \n-# Not sure this is necessary, but add an ad-hoc type for `self` for clarity\n-class _SelfT(AddressT):\n- pass\n-\n-\n-MUTABLE_ENVIRONMENT_VARS: Dict[str, type] = {\"self\": _SelfT}\n+MUTABLE_ENVIRONMENT_VARS: Dict[str, type] = {\"self\": AddressT}\n \n \n def get_mutable_vars() -> Dict:\n", "issue": "Bug: compiler dislikes `x not in [a, b]` in 0.3.8, whereas it was fine in 0.3.7\n### Version Information\r\n\r\n* vyper Version (output of `vyper --version`): 0.3.8\r\n* OS: osx\r\n* Python Version (output of `python --version`): 3.10.4\r\n\r\n### What's your issue about?\r\n\r\n<img width=\"705\" alt=\"image\" src=\"https://user-images.githubusercontent.com/11488427/230437774-c3b68030-9319-4169-b344-dbb470002102.png\">\r\n\r\n\n", "before_files": [{"content": "from typing import Dict\n\nfrom vyper.semantics.analysis.base import VarInfo\nfrom vyper.semantics.types import AddressT, BytesT, VyperType\nfrom vyper.semantics.types.shortcuts import BYTES32_T, UINT256_T\n\n\n# common properties for environment variables\nclass _EnvType(VyperType):\n def __eq__(self, other):\n return self is other\n\n def __hash__(self):\n return hash(id(self))\n\n\nclass _Block(_EnvType):\n _id = \"block\"\n _type_members = {\n \"coinbase\": AddressT(),\n \"difficulty\": UINT256_T,\n \"prevrandao\": UINT256_T,\n \"number\": UINT256_T,\n \"gaslimit\": UINT256_T,\n \"basefee\": UINT256_T,\n \"prevhash\": BYTES32_T,\n \"timestamp\": UINT256_T,\n }\n\n\nclass _Chain(_EnvType):\n _id = \"chain\"\n _type_members = {\"id\": UINT256_T}\n\n\nclass _Msg(_EnvType):\n _id = \"msg\"\n _type_members = {\"data\": BytesT(), \"gas\": UINT256_T, \"sender\": AddressT(), \"value\": UINT256_T}\n\n\nclass _Tx(_EnvType):\n _id = \"tx\"\n _type_members = {\"origin\": AddressT(), \"gasprice\": UINT256_T}\n\n\nCONSTANT_ENVIRONMENT_VARS = {t._id: t for t in (_Block(), _Chain(), _Tx(), _Msg())}\n\n\ndef get_constant_vars() -> Dict:\n \"\"\"\n Get a dictionary of constant environment variables.\n \"\"\"\n result = {}\n for k, v in CONSTANT_ENVIRONMENT_VARS.items():\n result[k] = VarInfo(v, is_constant=True)\n\n return result\n\n\n# Not sure this is necessary, but add an ad-hoc type for `self` for clarity\nclass _SelfT(AddressT):\n pass\n\n\nMUTABLE_ENVIRONMENT_VARS: Dict[str, type] = {\"self\": _SelfT}\n\n\ndef get_mutable_vars() -> Dict:\n \"\"\"\n Get a dictionary of mutable environment variables (those that are\n modified during the course of contract execution, such as `self`).\n \"\"\"\n return {name: VarInfo(type_()) for name, type_ in MUTABLE_ENVIRONMENT_VARS.items()}\n", "path": "vyper/semantics/environment.py"}], "after_files": [{"content": "from typing import Dict\n\nfrom vyper.semantics.analysis.base import VarInfo\nfrom vyper.semantics.types import AddressT, BytesT, VyperType\nfrom vyper.semantics.types.shortcuts import BYTES32_T, UINT256_T\n\n\n# common properties for environment variables\nclass _EnvType(VyperType):\n def __eq__(self, other):\n return self is other\n\n def __hash__(self):\n return hash(id(self))\n\n\nclass _Block(_EnvType):\n _id = \"block\"\n _type_members = {\n \"coinbase\": AddressT(),\n \"difficulty\": UINT256_T,\n \"prevrandao\": UINT256_T,\n \"number\": UINT256_T,\n \"gaslimit\": UINT256_T,\n \"basefee\": UINT256_T,\n \"prevhash\": BYTES32_T,\n \"timestamp\": UINT256_T,\n }\n\n\nclass _Chain(_EnvType):\n _id = \"chain\"\n _type_members = {\"id\": UINT256_T}\n\n\nclass _Msg(_EnvType):\n _id = \"msg\"\n _type_members = {\"data\": BytesT(), \"gas\": UINT256_T, \"sender\": AddressT(), \"value\": UINT256_T}\n\n\nclass _Tx(_EnvType):\n _id = \"tx\"\n _type_members = {\"origin\": AddressT(), \"gasprice\": UINT256_T}\n\n\nCONSTANT_ENVIRONMENT_VARS = {t._id: t for t in (_Block(), _Chain(), _Tx(), _Msg())}\n\n\ndef get_constant_vars() -> Dict:\n \"\"\"\n Get a dictionary of constant environment variables.\n \"\"\"\n result = {}\n for k, v in CONSTANT_ENVIRONMENT_VARS.items():\n result[k] = VarInfo(v, is_constant=True)\n\n return result\n\n\nMUTABLE_ENVIRONMENT_VARS: Dict[str, type] = {\"self\": AddressT}\n\n\ndef get_mutable_vars() -> Dict:\n \"\"\"\n Get a dictionary of mutable environment variables (those that are\n modified during the course of contract execution, such as `self`).\n \"\"\"\n return {name: VarInfo(type_()) for name, type_ in MUTABLE_ENVIRONMENT_VARS.items()}\n", "path": "vyper/semantics/environment.py"}]}
1,099
143
gh_patches_debug_41243
rasdani/github-patches
git_diff
biopython__biopython-3285
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Version switching on the Sphinx API docs We now have: - https://biopython.org/docs/1.74/api/ - https://biopython.org/docs/1.75/api/ - https://biopython.org/docs/1.76/api/ - https://biopython.org/docs/1.77/api/ - ... - https://biopython.org/docs/dev/api/ The webpage source for these exists here: https://github.com/biopython/docs The Sphinx configuration to build them is here: https://github.com/biopython/biopython/tree/master/Doc/api Can we add some JavaScript or otherwise like on ReadTheDocs to make it easy to toggle between version from within the browse? This likely means some changes to our Sphinx configuration (e.g. the common templates), and therefore would require backporting to regenerate the old pages - which would be OK. This could also resolve #2904. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `Doc/api/conf.py` Content: ``` 1 #!/usr/bin/env python3 2 """Biopython Sphinx documentation build configuration file. 3 4 After generating ``*.rst`` files from the source code, this 5 file controls running ``sphinx-build`` to turn these into 6 human readable documentation. 7 """ 8 9 import os 10 import shutil 11 import sys 12 import tempfile 13 14 from sphinx.ext import autodoc 15 16 from Bio import __version__, Application 17 18 # -- General configuration ------------------------------------------------ 19 20 # If your documentation needs a minimal Sphinx version, state it here. 21 # 22 # needs_sphinx = '1.0' 23 needs_sphinx = "1.8" 24 25 # Add any Sphinx extension module names here, as strings. They can be 26 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 27 # ones. 28 extensions = [ 29 "sphinx.ext.autodoc", 30 "sphinx.ext.todo", 31 # Don't want to include source code in the API docs 32 # 'sphinx.ext.viewcode', 33 "sphinx.ext.autosummary", 34 "numpydoc", 35 ] 36 37 # Add any paths that contain templates here, relative to this directory. 38 templates_path = ["_templates"] 39 40 # The suffix(es) of source filenames. 41 # You can specify multiple suffix as a list of string: 42 # 43 # source_suffix = ['.rst', '.md'] 44 source_suffix = ".rst" 45 46 # The master toctree document. 47 master_doc = "index" 48 49 # General information about the project. 50 project = "Biopython" 51 copyright = "1999-2020, The Biopython Contributors" 52 author = "The Biopython Contributors" 53 document = "Biopython API Documentation" 54 55 # The version info for the project you're documenting, acts as replacement for 56 # |version| and |release|, also used in various other places throughout the 57 # built documents. 58 # 59 # The short X.Y version. 60 version = __version__ # TODO: Shorten this 61 # The full version, including alpha/beta/rc tags. 62 release = __version__ 63 64 # The language for content autogenerated by Sphinx. Refer to documentation 65 # for a list of supported languages. 66 # 67 # This is also used if you do content translation via gettext catalogs. 68 # Usually you set "language" from the command line for these cases. 69 language = "en" 70 71 # List of patterns, relative to source directory, that match files and 72 # directories to ignore when looking for source files. 73 # This patterns also effect to html_static_path and html_extra_path 74 exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] 75 76 # The name of the Pygments (syntax highlighting) style to use. 77 pygments_style = "sphinx" 78 79 # If true, `todo` and `todoList` produce output, else they produce nothing. 80 todo_include_todos = True 81 82 # -- Options for autodoc -------------------------------------------------- 83 84 # This requires Sphinx 1.8 or later: 85 autodoc_default_options = { 86 "members": None, 87 "undoc-members": None, 88 "special-members": None, 89 "show-inheritance": None, 90 "member-order": "bysource", 91 "exclude-members": "__dict__,__weakref__,__module__", 92 } 93 94 # To avoid import errors. 95 autodoc_mock_imports = ["MySQLdb", "Bio.Restriction.Restriction", "Bio.Alphabet"] 96 97 # -- Options for HTML output ---------------------------------------------- 98 99 # Sphinx default was html_theme = "alabaster" 100 html_theme = "sphinx_rtd_theme" 101 102 # Sphinx Read The Docs theme settings, see 103 # https://sphinx-rtd-theme.readthedocs.io/en/latest/configuring.html 104 html_theme_options = { 105 "prev_next_buttons_location": "both", 106 # Same a Hyde theme sidebar on biopython.org: 107 "style_nav_header_background": "#10100F", 108 # Since we have the Biopython logo via html_logo, 109 "logo_only": True, 110 } 111 112 # Based on: 113 # https://github.com/readthedocs/sphinx_rtd_theme/issues/231#issuecomment-126447493 114 html_context = { 115 "display_github": True, # Add 'Edit on Github' link instead of 'View page source' 116 "github_user": "biopython", 117 "github_repo": "biopython", 118 "github_version": "master", 119 "conf_py_path": "/Doc/api/", 120 # "source_suffix": source_suffix, 121 } 122 123 html_logo = "../images/biopython_logo.svg" 124 125 # The RST source is transient, don't need/want to include it 126 html_show_sourcelink = False 127 html_copy_source = False 128 129 # Add any paths that contain custom static files (such as style sheets) here, 130 # relative to this directory. They are copied after the builtin static files, 131 # so a file named "default.css" will overwrite the builtin "default.css". 132 html_static_path = ["_static"] 133 134 # Custom sidebar templates, must be a dictionary that maps document names 135 # to template names. 136 # 137 # This is required for the alabaster theme 138 # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars 139 html_sidebars = { 140 "**": [ 141 "about.html", 142 "navigation.html", 143 "relations.html", # needs 'show_related': True theme option to display 144 "searchbox.html", 145 "donate.html", 146 ] 147 } 148 149 150 # -- Options for HTMLHelp output ------------------------------------------ 151 152 # Output file base name for HTML help builder. 153 htmlhelp_basename = "Biopython_doc" 154 155 156 # -- Options for LaTeX output --------------------------------------------- 157 158 latex_elements = { 159 # The paper size ('letterpaper' or 'a4paper'). 160 # 161 # 'papersize': 'letterpaper', 162 # The font size ('10pt', '11pt' or '12pt'). 163 # 164 # 'pointsize': '10pt', 165 # Additional stuff for the LaTeX preamble. 166 # 167 # 'preamble': '', 168 # Latex figure (float) alignment 169 # 170 # 'figure_align': 'htbp', 171 } 172 173 # Grouping the document tree into LaTeX files. List of tuples 174 # (source start file, target name, title, 175 # author, documentclass [howto, manual, or own class]). 176 latex_documents = [(master_doc, "Biopython_API.tex", document, author, "manual")] 177 178 179 # -- Options for manual page output --------------------------------------- 180 181 # One entry per manual page. List of tuples 182 # (source start file, name, description, authors, manual section). 183 man_pages = [(master_doc, "biopython", document, [author], 1)] 184 185 186 # -- Options for Texinfo output ------------------------------------------- 187 188 # Grouping the document tree into Texinfo files. List of tuples 189 # (source start file, target name, title, author, 190 # dir menu entry, description, category) 191 texinfo_documents = [ 192 ( 193 master_doc, 194 "Biopython", 195 document, 196 author, 197 "Biopython", 198 "Collection of modules for dealing with biological data in Python.", 199 "Miscellaneous", 200 ) 201 ] 202 203 204 # -- Options for Epub output ---------------------------------------------- 205 206 # Bibliographic Dublin Core info. 207 epub_title = document # project 208 epub_author = author 209 epub_publisher = author 210 epub_copyright = copyright 211 212 # The unique identifier of the text. This can be a ISBN number 213 # or the project homepage. 214 # 215 # epub_identifier = '' 216 217 # A unique identification for the text. 218 # 219 # epub_uid = '' 220 221 # A list of files that should not be packed into the epub file. 222 epub_exclude_files = ["search.html"] 223 224 # -- Options for numpydoc ------------------------------------------------- 225 226 numpydoc_class_members_toctree = False 227 # Prevents the attributes and methods from being shown twice 228 numpydoc_show_class_members = False 229 230 # -- Magic to run sphinx-apidoc automatically ----------------------------- 231 232 # See https://github.com/rtfd/readthedocs.org/issues/1139 233 # on which this is based. 234 235 236 def insert_github_link(filename): 237 """Insert file specific :github_url: metadata for theme breadcrumbs.""" 238 assert "/" not in filename and filename.endswith(".rst") 239 with open(filename) as handle: 240 text = handle.read() 241 if ":github_url:" in text: 242 return 243 244 python = filename[:-4].replace(".", "/") + "/__init__.py" 245 if not os.path.isfile(os.path.join("../../", python)): 246 python = filename[:-4].replace(".", "/") + ".py" 247 if not os.path.isfile(os.path.join("../../", python)): 248 sys.stderr.write( 249 "WARNING: Could not map %s to a Python file, e.g. %s\n" % (filename, python) 250 ) 251 return 252 253 text = ":github_url: https://github.com/%s/%s/blob/%s/%s\n\n%s" % ( 254 html_context["github_user"], 255 html_context["github_repo"], 256 html_context["github_version"], 257 python, 258 text, 259 ) 260 with open(filename, "w") as handle: 261 handle.write(text) 262 263 264 def run_apidoc(_): 265 """Call sphinx-apidoc on Bio and BioSQL modules.""" 266 from sphinx.ext.apidoc import main as apidoc_main 267 268 cur_dir = os.path.abspath(os.path.dirname(__file__)) 269 # Can't see a better way than running apidoc twice, for Bio & BioSQL 270 # We don't care about the index.rst / conf.py (we have our own) 271 # or the Makefile / make.bat (effectively same) clashing, 272 # $ sphinx-apidoc -e -F -o /tmp/api/BioSQL BioSQL 273 # $ sphinx-apidoc -e -F -o /tmp/api/Bio Bio 274 tmp_path = tempfile.mkdtemp() 275 apidoc_main(["-e", "-F", "-o", tmp_path, "../../BioSQL"]) 276 apidoc_main(["-e", "-F", "-o", tmp_path, "../../Bio"]) 277 os.remove(os.path.join(tmp_path, "index.rst")) # Using our own 278 for filename in os.listdir(tmp_path): 279 if filename.endswith(".rst"): 280 shutil.move( 281 os.path.join(tmp_path, filename), os.path.join(cur_dir, filename) 282 ) 283 shutil.rmtree(tmp_path) 284 285 for f in os.listdir(cur_dir): 286 if f.startswith("Bio") and f.endswith(".rst"): 287 insert_github_link(f) 288 289 290 class BioPythonAPI(autodoc.ClassDocumenter): 291 """Custom Class Documenter for AbstractCommandline classes.""" 292 293 def import_object(self): 294 """Import the class.""" 295 ret = super().import_object() 296 297 if not issubclass(self.object, Application.AbstractCommandline): 298 return ret 299 300 try: 301 # If the object is an AbstractCommandline we instantiate it. 302 self.object() 303 except TypeError: 304 # Throws if the object is the base AbstractCommandline class 305 pass 306 return ret 307 308 309 def setup(app): 310 """Over-ride Sphinx setup to trigger sphinx-apidoc.""" 311 app.connect("builder-inited", run_apidoc) 312 313 def add_documenter(app, env, docnames): 314 app.add_autodocumenter(BioPythonAPI, True) 315 316 # Over-ride autodoc documenter 317 app.connect("env-before-read-docs", add_documenter) 318 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/Doc/api/conf.py b/Doc/api/conf.py --- a/Doc/api/conf.py +++ b/Doc/api/conf.py @@ -61,6 +61,25 @@ # The full version, including alpha/beta/rc tags. release = __version__ +# Versions for versions.html: +# (this will break if we have version gaps) +try: + main_version, minor_version, _ = version.split(".") # e.g. 1.79.dev0 + dev_version = True +except ValueError: + main_version, minor_version = version.split(".") # e.g. 1.78 + dev_version = False +prev_minor_version = int(minor_version) - (2 if dev_version else 1) +previous_version = f"{main_version}.{prev_minor_version}" +versions = [ + ("Previous", f"../../{previous_version}/api/"), + ("Latest", "../../latest/api/"), + ("Develop", "../../dev/api/"), +] + +if version < "1.75": # 1.74 is the earliest Sphinx-generated api documentation + del versions[0] + # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # @@ -92,7 +111,9 @@ } # To avoid import errors. -autodoc_mock_imports = ["MySQLdb", "Bio.Restriction.Restriction", "Bio.Alphabet"] +autodoc_mock_imports = ["MySQLdb", "Bio.Restriction.Restriction"] +if version > "1.77": + autodoc_mock_imports.append("Bio.Alphabet") # -- Options for HTML output ---------------------------------------------- @@ -118,9 +139,16 @@ "github_version": "master", "conf_py_path": "/Doc/api/", # "source_suffix": source_suffix, + "theme_display_version": False, + # Biopython-specific values for version-footer (versions.html): + "display_version_footer": True, + "current_version": version, + "versions": versions, + "project_home_url": "https://biopython.org", + "project_github_url": "https://github.com/biopython/biopython", } -html_logo = "../images/biopython_logo.svg" +html_logo = "../images/biopython_logo_white.png" # The RST source is transient, don't need/want to include it html_show_sourcelink = False @@ -131,20 +159,21 @@ # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] +# The following is not applicable to the Read-the-docs theme: # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # This is required for the alabaster theme # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars -html_sidebars = { - "**": [ - "about.html", - "navigation.html", - "relations.html", # needs 'show_related': True theme option to display - "searchbox.html", - "donate.html", - ] -} +# html_sidebars = { +# "**": [ +# "about.html", +# "navigation.html", +# "relations.html", # needs 'show_related': True theme option to display +# "searchbox.html", +# "donate.html", +# ] +# } # -- Options for HTMLHelp output ------------------------------------------ @@ -310,6 +339,8 @@ """Over-ride Sphinx setup to trigger sphinx-apidoc.""" app.connect("builder-inited", run_apidoc) + app.add_css_file("biopython.css") + def add_documenter(app, env, docnames): app.add_autodocumenter(BioPythonAPI, True)
{"golden_diff": "diff --git a/Doc/api/conf.py b/Doc/api/conf.py\n--- a/Doc/api/conf.py\n+++ b/Doc/api/conf.py\n@@ -61,6 +61,25 @@\n # The full version, including alpha/beta/rc tags.\n release = __version__\n \n+# Versions for versions.html:\n+# (this will break if we have version gaps)\n+try:\n+ main_version, minor_version, _ = version.split(\".\") # e.g. 1.79.dev0\n+ dev_version = True\n+except ValueError:\n+ main_version, minor_version = version.split(\".\") # e.g. 1.78\n+ dev_version = False\n+prev_minor_version = int(minor_version) - (2 if dev_version else 1)\n+previous_version = f\"{main_version}.{prev_minor_version}\"\n+versions = [\n+ (\"Previous\", f\"../../{previous_version}/api/\"),\n+ (\"Latest\", \"../../latest/api/\"),\n+ (\"Develop\", \"../../dev/api/\"),\n+]\n+\n+if version < \"1.75\": # 1.74 is the earliest Sphinx-generated api documentation\n+ del versions[0]\n+\n # The language for content autogenerated by Sphinx. Refer to documentation\n # for a list of supported languages.\n #\n@@ -92,7 +111,9 @@\n }\n \n # To avoid import errors.\n-autodoc_mock_imports = [\"MySQLdb\", \"Bio.Restriction.Restriction\", \"Bio.Alphabet\"]\n+autodoc_mock_imports = [\"MySQLdb\", \"Bio.Restriction.Restriction\"]\n+if version > \"1.77\":\n+ autodoc_mock_imports.append(\"Bio.Alphabet\")\n \n # -- Options for HTML output ----------------------------------------------\n \n@@ -118,9 +139,16 @@\n \"github_version\": \"master\",\n \"conf_py_path\": \"/Doc/api/\",\n # \"source_suffix\": source_suffix,\n+ \"theme_display_version\": False,\n+ # Biopython-specific values for version-footer (versions.html):\n+ \"display_version_footer\": True,\n+ \"current_version\": version,\n+ \"versions\": versions,\n+ \"project_home_url\": \"https://biopython.org\",\n+ \"project_github_url\": \"https://github.com/biopython/biopython\",\n }\n \n-html_logo = \"../images/biopython_logo.svg\"\n+html_logo = \"../images/biopython_logo_white.png\"\n \n # The RST source is transient, don't need/want to include it\n html_show_sourcelink = False\n@@ -131,20 +159,21 @@\n # so a file named \"default.css\" will overwrite the builtin \"default.css\".\n html_static_path = [\"_static\"]\n \n+# The following is not applicable to the Read-the-docs theme:\n # Custom sidebar templates, must be a dictionary that maps document names\n # to template names.\n #\n # This is required for the alabaster theme\n # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars\n-html_sidebars = {\n- \"**\": [\n- \"about.html\",\n- \"navigation.html\",\n- \"relations.html\", # needs 'show_related': True theme option to display\n- \"searchbox.html\",\n- \"donate.html\",\n- ]\n-}\n+# html_sidebars = {\n+# \"**\": [\n+# \"about.html\",\n+# \"navigation.html\",\n+# \"relations.html\", # needs 'show_related': True theme option to display\n+# \"searchbox.html\",\n+# \"donate.html\",\n+# ]\n+# }\n \n \n # -- Options for HTMLHelp output ------------------------------------------\n@@ -310,6 +339,8 @@\n \"\"\"Over-ride Sphinx setup to trigger sphinx-apidoc.\"\"\"\n app.connect(\"builder-inited\", run_apidoc)\n \n+ app.add_css_file(\"biopython.css\")\n+\n def add_documenter(app, env, docnames):\n app.add_autodocumenter(BioPythonAPI, True)\n", "issue": "Version switching on the Sphinx API docs\nWe now have:\r\n\r\n- https://biopython.org/docs/1.74/api/\r\n- https://biopython.org/docs/1.75/api/\r\n- https://biopython.org/docs/1.76/api/\r\n- https://biopython.org/docs/1.77/api/\r\n- ...\r\n- https://biopython.org/docs/dev/api/\r\n\r\nThe webpage source for these exists here:\r\n\r\nhttps://github.com/biopython/docs\r\n\r\nThe Sphinx configuration to build them is here:\r\n\r\nhttps://github.com/biopython/biopython/tree/master/Doc/api\r\n\r\nCan we add some JavaScript or otherwise like on ReadTheDocs to make it easy to toggle between version from within the browse?\r\n\r\nThis likely means some changes to our Sphinx configuration (e.g. the common templates), and therefore would require backporting to regenerate the old pages - which would be OK.\r\n\r\nThis could also resolve #2904.\n", "before_files": [{"content": "#!/usr/bin/env python3\n\"\"\"Biopython Sphinx documentation build configuration file.\n\nAfter generating ``*.rst`` files from the source code, this\nfile controls running ``sphinx-build`` to turn these into\nhuman readable documentation.\n\"\"\"\n\nimport os\nimport shutil\nimport sys\nimport tempfile\n\nfrom sphinx.ext import autodoc\n\nfrom Bio import __version__, Application\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\nneeds_sphinx = \"1.8\"\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.todo\",\n # Don't want to include source code in the API docs\n # 'sphinx.ext.viewcode',\n \"sphinx.ext.autosummary\",\n \"numpydoc\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = \".rst\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"Biopython\"\ncopyright = \"1999-2020, The Biopython Contributors\"\nauthor = \"The Biopython Contributors\"\ndocument = \"Biopython API Documentation\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = __version__ # TODO: Shorten this\n# The full version, including alpha/beta/rc tags.\nrelease = __version__\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = \"en\"\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n# -- Options for autodoc --------------------------------------------------\n\n# This requires Sphinx 1.8 or later:\nautodoc_default_options = {\n \"members\": None,\n \"undoc-members\": None,\n \"special-members\": None,\n \"show-inheritance\": None,\n \"member-order\": \"bysource\",\n \"exclude-members\": \"__dict__,__weakref__,__module__\",\n}\n\n# To avoid import errors.\nautodoc_mock_imports = [\"MySQLdb\", \"Bio.Restriction.Restriction\", \"Bio.Alphabet\"]\n\n# -- Options for HTML output ----------------------------------------------\n\n# Sphinx default was html_theme = \"alabaster\"\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Sphinx Read The Docs theme settings, see\n# https://sphinx-rtd-theme.readthedocs.io/en/latest/configuring.html\nhtml_theme_options = {\n \"prev_next_buttons_location\": \"both\",\n # Same a Hyde theme sidebar on biopython.org:\n \"style_nav_header_background\": \"#10100F\",\n # Since we have the Biopython logo via html_logo,\n \"logo_only\": True,\n}\n\n# Based on:\n# https://github.com/readthedocs/sphinx_rtd_theme/issues/231#issuecomment-126447493\nhtml_context = {\n \"display_github\": True, # Add 'Edit on Github' link instead of 'View page source'\n \"github_user\": \"biopython\",\n \"github_repo\": \"biopython\",\n \"github_version\": \"master\",\n \"conf_py_path\": \"/Doc/api/\",\n # \"source_suffix\": source_suffix,\n}\n\nhtml_logo = \"../images/biopython_logo.svg\"\n\n# The RST source is transient, don't need/want to include it\nhtml_show_sourcelink = False\nhtml_copy_source = False\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# This is required for the alabaster theme\n# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars\nhtml_sidebars = {\n \"**\": [\n \"about.html\",\n \"navigation.html\",\n \"relations.html\", # needs 'show_related': True theme option to display\n \"searchbox.html\",\n \"donate.html\",\n ]\n}\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"Biopython_doc\"\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [(master_doc, \"Biopython_API.tex\", document, author, \"manual\")]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"biopython\", document, [author], 1)]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"Biopython\",\n document,\n author,\n \"Biopython\",\n \"Collection of modules for dealing with biological data in Python.\",\n \"Miscellaneous\",\n )\n]\n\n\n# -- Options for Epub output ----------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = document # project\nepub_author = author\nepub_publisher = author\nepub_copyright = copyright\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#\n# epub_identifier = ''\n\n# A unique identification for the text.\n#\n# epub_uid = ''\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = [\"search.html\"]\n\n# -- Options for numpydoc -------------------------------------------------\n\nnumpydoc_class_members_toctree = False\n# Prevents the attributes and methods from being shown twice\nnumpydoc_show_class_members = False\n\n# -- Magic to run sphinx-apidoc automatically -----------------------------\n\n# See https://github.com/rtfd/readthedocs.org/issues/1139\n# on which this is based.\n\n\ndef insert_github_link(filename):\n \"\"\"Insert file specific :github_url: metadata for theme breadcrumbs.\"\"\"\n assert \"/\" not in filename and filename.endswith(\".rst\")\n with open(filename) as handle:\n text = handle.read()\n if \":github_url:\" in text:\n return\n\n python = filename[:-4].replace(\".\", \"/\") + \"/__init__.py\"\n if not os.path.isfile(os.path.join(\"../../\", python)):\n python = filename[:-4].replace(\".\", \"/\") + \".py\"\n if not os.path.isfile(os.path.join(\"../../\", python)):\n sys.stderr.write(\n \"WARNING: Could not map %s to a Python file, e.g. %s\\n\" % (filename, python)\n )\n return\n\n text = \":github_url: https://github.com/%s/%s/blob/%s/%s\\n\\n%s\" % (\n html_context[\"github_user\"],\n html_context[\"github_repo\"],\n html_context[\"github_version\"],\n python,\n text,\n )\n with open(filename, \"w\") as handle:\n handle.write(text)\n\n\ndef run_apidoc(_):\n \"\"\"Call sphinx-apidoc on Bio and BioSQL modules.\"\"\"\n from sphinx.ext.apidoc import main as apidoc_main\n\n cur_dir = os.path.abspath(os.path.dirname(__file__))\n # Can't see a better way than running apidoc twice, for Bio & BioSQL\n # We don't care about the index.rst / conf.py (we have our own)\n # or the Makefile / make.bat (effectively same) clashing,\n # $ sphinx-apidoc -e -F -o /tmp/api/BioSQL BioSQL\n # $ sphinx-apidoc -e -F -o /tmp/api/Bio Bio\n tmp_path = tempfile.mkdtemp()\n apidoc_main([\"-e\", \"-F\", \"-o\", tmp_path, \"../../BioSQL\"])\n apidoc_main([\"-e\", \"-F\", \"-o\", tmp_path, \"../../Bio\"])\n os.remove(os.path.join(tmp_path, \"index.rst\")) # Using our own\n for filename in os.listdir(tmp_path):\n if filename.endswith(\".rst\"):\n shutil.move(\n os.path.join(tmp_path, filename), os.path.join(cur_dir, filename)\n )\n shutil.rmtree(tmp_path)\n\n for f in os.listdir(cur_dir):\n if f.startswith(\"Bio\") and f.endswith(\".rst\"):\n insert_github_link(f)\n\n\nclass BioPythonAPI(autodoc.ClassDocumenter):\n \"\"\"Custom Class Documenter for AbstractCommandline classes.\"\"\"\n\n def import_object(self):\n \"\"\"Import the class.\"\"\"\n ret = super().import_object()\n\n if not issubclass(self.object, Application.AbstractCommandline):\n return ret\n\n try:\n # If the object is an AbstractCommandline we instantiate it.\n self.object()\n except TypeError:\n # Throws if the object is the base AbstractCommandline class\n pass\n return ret\n\n\ndef setup(app):\n \"\"\"Over-ride Sphinx setup to trigger sphinx-apidoc.\"\"\"\n app.connect(\"builder-inited\", run_apidoc)\n\n def add_documenter(app, env, docnames):\n app.add_autodocumenter(BioPythonAPI, True)\n\n # Over-ride autodoc documenter\n app.connect(\"env-before-read-docs\", add_documenter)\n", "path": "Doc/api/conf.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\"\"\"Biopython Sphinx documentation build configuration file.\n\nAfter generating ``*.rst`` files from the source code, this\nfile controls running ``sphinx-build`` to turn these into\nhuman readable documentation.\n\"\"\"\n\nimport os\nimport shutil\nimport sys\nimport tempfile\n\nfrom sphinx.ext import autodoc\n\nfrom Bio import __version__, Application\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\nneeds_sphinx = \"1.8\"\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.todo\",\n # Don't want to include source code in the API docs\n # 'sphinx.ext.viewcode',\n \"sphinx.ext.autosummary\",\n \"numpydoc\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = \".rst\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"Biopython\"\ncopyright = \"1999-2020, The Biopython Contributors\"\nauthor = \"The Biopython Contributors\"\ndocument = \"Biopython API Documentation\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = __version__ # TODO: Shorten this\n# The full version, including alpha/beta/rc tags.\nrelease = __version__\n\n# Versions for versions.html:\n# (this will break if we have version gaps)\ntry:\n main_version, minor_version, _ = version.split(\".\") # e.g. 1.79.dev0\n dev_version = True\nexcept ValueError:\n main_version, minor_version = version.split(\".\") # e.g. 1.78\n dev_version = False\nprev_minor_version = int(minor_version) - (2 if dev_version else 1)\nprevious_version = f\"{main_version}.{prev_minor_version}\"\nversions = [\n (\"Previous\", f\"../../{previous_version}/api/\"),\n (\"Latest\", \"../../latest/api/\"),\n (\"Develop\", \"../../dev/api/\"),\n]\n\nif version < \"1.75\": # 1.74 is the earliest Sphinx-generated api documentation\n del versions[0]\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = \"en\"\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n# -- Options for autodoc --------------------------------------------------\n\n# This requires Sphinx 1.8 or later:\nautodoc_default_options = {\n \"members\": None,\n \"undoc-members\": None,\n \"special-members\": None,\n \"show-inheritance\": None,\n \"member-order\": \"bysource\",\n \"exclude-members\": \"__dict__,__weakref__,__module__\",\n}\n\n# To avoid import errors.\nautodoc_mock_imports = [\"MySQLdb\", \"Bio.Restriction.Restriction\"]\nif version > \"1.77\":\n autodoc_mock_imports.append(\"Bio.Alphabet\")\n\n# -- Options for HTML output ----------------------------------------------\n\n# Sphinx default was html_theme = \"alabaster\"\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Sphinx Read The Docs theme settings, see\n# https://sphinx-rtd-theme.readthedocs.io/en/latest/configuring.html\nhtml_theme_options = {\n \"prev_next_buttons_location\": \"both\",\n # Same a Hyde theme sidebar on biopython.org:\n \"style_nav_header_background\": \"#10100F\",\n # Since we have the Biopython logo via html_logo,\n \"logo_only\": True,\n}\n\n# Based on:\n# https://github.com/readthedocs/sphinx_rtd_theme/issues/231#issuecomment-126447493\nhtml_context = {\n \"display_github\": True, # Add 'Edit on Github' link instead of 'View page source'\n \"github_user\": \"biopython\",\n \"github_repo\": \"biopython\",\n \"github_version\": \"master\",\n \"conf_py_path\": \"/Doc/api/\",\n # \"source_suffix\": source_suffix,\n \"theme_display_version\": False,\n # Biopython-specific values for version-footer (versions.html):\n \"display_version_footer\": True,\n \"current_version\": version,\n \"versions\": versions,\n \"project_home_url\": \"https://biopython.org\",\n \"project_github_url\": \"https://github.com/biopython/biopython\",\n}\n\nhtml_logo = \"../images/biopython_logo_white.png\"\n\n# The RST source is transient, don't need/want to include it\nhtml_show_sourcelink = False\nhtml_copy_source = False\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# The following is not applicable to the Read-the-docs theme:\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# This is required for the alabaster theme\n# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars\n# html_sidebars = {\n# \"**\": [\n# \"about.html\",\n# \"navigation.html\",\n# \"relations.html\", # needs 'show_related': True theme option to display\n# \"searchbox.html\",\n# \"donate.html\",\n# ]\n# }\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"Biopython_doc\"\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [(master_doc, \"Biopython_API.tex\", document, author, \"manual\")]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"biopython\", document, [author], 1)]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"Biopython\",\n document,\n author,\n \"Biopython\",\n \"Collection of modules for dealing with biological data in Python.\",\n \"Miscellaneous\",\n )\n]\n\n\n# -- Options for Epub output ----------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = document # project\nepub_author = author\nepub_publisher = author\nepub_copyright = copyright\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#\n# epub_identifier = ''\n\n# A unique identification for the text.\n#\n# epub_uid = ''\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = [\"search.html\"]\n\n# -- Options for numpydoc -------------------------------------------------\n\nnumpydoc_class_members_toctree = False\n# Prevents the attributes and methods from being shown twice\nnumpydoc_show_class_members = False\n\n# -- Magic to run sphinx-apidoc automatically -----------------------------\n\n# See https://github.com/rtfd/readthedocs.org/issues/1139\n# on which this is based.\n\n\ndef insert_github_link(filename):\n \"\"\"Insert file specific :github_url: metadata for theme breadcrumbs.\"\"\"\n assert \"/\" not in filename and filename.endswith(\".rst\")\n with open(filename) as handle:\n text = handle.read()\n if \":github_url:\" in text:\n return\n\n python = filename[:-4].replace(\".\", \"/\") + \"/__init__.py\"\n if not os.path.isfile(os.path.join(\"../../\", python)):\n python = filename[:-4].replace(\".\", \"/\") + \".py\"\n if not os.path.isfile(os.path.join(\"../../\", python)):\n sys.stderr.write(\n \"WARNING: Could not map %s to a Python file, e.g. %s\\n\" % (filename, python)\n )\n return\n\n text = \":github_url: https://github.com/%s/%s/blob/%s/%s\\n\\n%s\" % (\n html_context[\"github_user\"],\n html_context[\"github_repo\"],\n html_context[\"github_version\"],\n python,\n text,\n )\n with open(filename, \"w\") as handle:\n handle.write(text)\n\n\ndef run_apidoc(_):\n \"\"\"Call sphinx-apidoc on Bio and BioSQL modules.\"\"\"\n from sphinx.ext.apidoc import main as apidoc_main\n\n cur_dir = os.path.abspath(os.path.dirname(__file__))\n # Can't see a better way than running apidoc twice, for Bio & BioSQL\n # We don't care about the index.rst / conf.py (we have our own)\n # or the Makefile / make.bat (effectively same) clashing,\n # $ sphinx-apidoc -e -F -o /tmp/api/BioSQL BioSQL\n # $ sphinx-apidoc -e -F -o /tmp/api/Bio Bio\n tmp_path = tempfile.mkdtemp()\n apidoc_main([\"-e\", \"-F\", \"-o\", tmp_path, \"../../BioSQL\"])\n apidoc_main([\"-e\", \"-F\", \"-o\", tmp_path, \"../../Bio\"])\n os.remove(os.path.join(tmp_path, \"index.rst\")) # Using our own\n for filename in os.listdir(tmp_path):\n if filename.endswith(\".rst\"):\n shutil.move(\n os.path.join(tmp_path, filename), os.path.join(cur_dir, filename)\n )\n shutil.rmtree(tmp_path)\n\n for f in os.listdir(cur_dir):\n if f.startswith(\"Bio\") and f.endswith(\".rst\"):\n insert_github_link(f)\n\n\nclass BioPythonAPI(autodoc.ClassDocumenter):\n \"\"\"Custom Class Documenter for AbstractCommandline classes.\"\"\"\n\n def import_object(self):\n \"\"\"Import the class.\"\"\"\n ret = super().import_object()\n\n if not issubclass(self.object, Application.AbstractCommandline):\n return ret\n\n try:\n # If the object is an AbstractCommandline we instantiate it.\n self.object()\n except TypeError:\n # Throws if the object is the base AbstractCommandline class\n pass\n return ret\n\n\ndef setup(app):\n \"\"\"Over-ride Sphinx setup to trigger sphinx-apidoc.\"\"\"\n app.connect(\"builder-inited\", run_apidoc)\n\n app.add_css_file(\"biopython.css\")\n\n def add_documenter(app, env, docnames):\n app.add_autodocumenter(BioPythonAPI, True)\n\n # Over-ride autodoc documenter\n app.connect(\"env-before-read-docs\", add_documenter)\n", "path": "Doc/api/conf.py"}]}
3,750
879
gh_patches_debug_12382
rasdani/github-patches
git_diff
Lightning-AI__pytorch-lightning-1748
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Checkpoint adding "version_" at the start of the logger name **To reproduce :** ```python logger = pl.loggers.TensorBoardLogger( save_dir='.', version='my_name' name='lightning_logs' ) trainer = pl.Trainer(logger=logger, log_gpu_memory='all', max_epochs=10) ``` **Giving as a result:** - /lightning_logs/my_name: Where is saved the logs - /lightning_logs/version_my_name : Where is saved the checkpoints **Possible Explanation:** It seems like the checkpoint saving add "version_" to the start of the name even if the name have been given as a parameter : https://github.com/PyTorchLightning/pytorch-lightning/blob/3e8f2d99a9951bfb5fc67a98614128317913be1d/pytorch_lightning/trainer/callback_config.py#L52-L57 Even if in the Tensorboard Logger if the name is provided there is no "version_" prefix : https://github.com/PyTorchLightning/pytorch-lightning/blob/8b82ce09039e75f3fcb77a987c964249e38def3b/pytorch_lightning/loggers/tensorboard.py#L81 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pytorch_lightning/trainer/callback_config.py` Content: ``` 1 import os 2 from abc import ABC, abstractmethod 3 from typing import Union, List 4 5 6 from pytorch_lightning.callbacks import Callback, ModelCheckpoint, EarlyStopping, ProgressBarBase, ProgressBar 7 from pytorch_lightning.loggers import LightningLoggerBase 8 from pytorch_lightning.utilities.exceptions import MisconfigurationException 9 10 11 class TrainerCallbackConfigMixin(ABC): 12 13 # this is just a summary on variables used in this abstract class, 14 # the proper values/initialisation should be done in child class 15 callbacks: List[Callback] 16 default_root_dir: str 17 logger: Union[LightningLoggerBase, bool] 18 weights_save_path: str 19 ckpt_path: str 20 checkpoint_callback: ModelCheckpoint 21 progress_bar_refresh_rate: int 22 process_position: int 23 24 @property 25 @abstractmethod 26 def slurm_job_id(self) -> int: 27 """Warning: this is just empty shell for code implemented in other class.""" 28 29 @abstractmethod 30 def save_checkpoint(self, *args): 31 """Warning: this is just empty shell for code implemented in other class.""" 32 33 def configure_checkpoint_callback(self): 34 """ 35 Weight path set in this priority: 36 Checkpoint_callback's path (if passed in). 37 User provided weights_saved_path 38 Otherwise use os.getcwd() 39 """ 40 ckpt_path = self.default_root_dir 41 if self.checkpoint_callback: 42 # init a default one 43 if self.logger is not None: 44 save_dir = (getattr(self.logger, 'save_dir', None) or 45 getattr(self.logger, '_save_dir', None) or 46 self.default_root_dir) 47 48 # weights_save_path overrides anything 49 if self.weights_save_path is not None: 50 save_dir = self.weights_save_path 51 52 ckpt_path = os.path.join( 53 save_dir, 54 self.logger.name, 55 f'version_{self.logger.version}', 56 "checkpoints" 57 ) 58 else: 59 ckpt_path = os.path.join(self.default_root_dir, "checkpoints") 60 61 # when no val step is defined, use 'loss' otherwise 'val_loss' 62 train_step_only = not self.is_overriden('validation_step') 63 monitor_key = 'loss' if train_step_only else 'val_loss' 64 65 if self.checkpoint_callback is True: 66 os.makedirs(ckpt_path, exist_ok=True) 67 self.checkpoint_callback = ModelCheckpoint( 68 filepath=ckpt_path, 69 monitor=monitor_key 70 ) 71 # If user specified None in filepath, override with runtime default 72 elif isinstance(self.checkpoint_callback, ModelCheckpoint) \ 73 and self.checkpoint_callback.dirpath is None: 74 self.checkpoint_callback.dirpath = ckpt_path 75 self.checkpoint_callback.filename = '{epoch}' 76 os.makedirs(self.checkpoint_callback.dirpath, exist_ok=True) 77 elif self.checkpoint_callback is False: 78 self.checkpoint_callback = None 79 80 self.ckpt_path = ckpt_path 81 82 if self.checkpoint_callback: 83 # set the path for the callbacks 84 self.checkpoint_callback.save_function = self.save_checkpoint 85 86 # if checkpoint callback used, then override the weights path 87 self.weights_save_path = self.checkpoint_callback.dirpath 88 89 # if weights_save_path is still none here, set to current working dir 90 if self.weights_save_path is None: 91 self.weights_save_path = self.default_root_dir 92 93 def configure_early_stopping(self, early_stop_callback): 94 if early_stop_callback is True or None: 95 self.early_stop_callback = EarlyStopping( 96 monitor='val_loss', 97 patience=3, 98 strict=True, 99 verbose=True, 100 mode='min' 101 ) 102 self.enable_early_stop = True 103 elif not early_stop_callback: 104 self.early_stop_callback = None 105 self.enable_early_stop = False 106 else: 107 self.early_stop_callback = early_stop_callback 108 self.enable_early_stop = True 109 110 def configure_progress_bar(self): 111 progress_bars = [c for c in self.callbacks if isinstance(c, ProgressBarBase)] 112 if len(progress_bars) > 1: 113 raise MisconfigurationException( 114 'You added multiple progress bar callbacks to the Trainer, but currently only one' 115 ' progress bar is supported.' 116 ) 117 elif len(progress_bars) == 1: 118 self.progress_bar_callback = progress_bars[0] 119 elif self.progress_bar_refresh_rate > 0: 120 self.progress_bar_callback = ProgressBar( 121 refresh_rate=self.progress_bar_refresh_rate, 122 process_position=self.process_position, 123 ) 124 self.callbacks.append(self.progress_bar_callback) 125 else: 126 self.progress_bar_callback = None 127 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pytorch_lightning/trainer/callback_config.py b/pytorch_lightning/trainer/callback_config.py --- a/pytorch_lightning/trainer/callback_config.py +++ b/pytorch_lightning/trainer/callback_config.py @@ -49,10 +49,12 @@ if self.weights_save_path is not None: save_dir = self.weights_save_path + version = self.logger.version if isinstance( + self.logger.version, str) else f'version_{self.logger.version}' ckpt_path = os.path.join( save_dir, self.logger.name, - f'version_{self.logger.version}', + version, "checkpoints" ) else:
{"golden_diff": "diff --git a/pytorch_lightning/trainer/callback_config.py b/pytorch_lightning/trainer/callback_config.py\n--- a/pytorch_lightning/trainer/callback_config.py\n+++ b/pytorch_lightning/trainer/callback_config.py\n@@ -49,10 +49,12 @@\n if self.weights_save_path is not None:\n save_dir = self.weights_save_path\n \n+ version = self.logger.version if isinstance(\n+ self.logger.version, str) else f'version_{self.logger.version}'\n ckpt_path = os.path.join(\n save_dir,\n self.logger.name,\n- f'version_{self.logger.version}',\n+ version,\n \"checkpoints\"\n )\n else:\n", "issue": "Checkpoint adding \"version_\" at the start of the logger name\n**To reproduce :** \r\n```python\r\nlogger = pl.loggers.TensorBoardLogger(\r\n save_dir='.',\r\n version='my_name'\r\n name='lightning_logs'\r\n )\r\n\r\ntrainer = pl.Trainer(logger=logger, log_gpu_memory='all', max_epochs=10)\r\n```\r\n\r\n**Giving as a result:**\r\n\r\n- /lightning_logs/my_name: Where is saved the logs\r\n- /lightning_logs/version_my_name : Where is saved the checkpoints\r\n\r\n\r\n\r\n\r\n**Possible Explanation:** \r\nIt seems like the checkpoint saving add \"version_\" to the start of the name even if the name have been given as a parameter : \r\n\r\nhttps://github.com/PyTorchLightning/pytorch-lightning/blob/3e8f2d99a9951bfb5fc67a98614128317913be1d/pytorch_lightning/trainer/callback_config.py#L52-L57\r\n\r\nEven if in the Tensorboard Logger if the name is provided there is no \"version_\" prefix :\r\n\r\nhttps://github.com/PyTorchLightning/pytorch-lightning/blob/8b82ce09039e75f3fcb77a987c964249e38def3b/pytorch_lightning/loggers/tensorboard.py#L81\r\n\r\n\r\n\n", "before_files": [{"content": "import os\nfrom abc import ABC, abstractmethod\nfrom typing import Union, List\n\n\nfrom pytorch_lightning.callbacks import Callback, ModelCheckpoint, EarlyStopping, ProgressBarBase, ProgressBar\nfrom pytorch_lightning.loggers import LightningLoggerBase\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\n\n\nclass TrainerCallbackConfigMixin(ABC):\n\n # this is just a summary on variables used in this abstract class,\n # the proper values/initialisation should be done in child class\n callbacks: List[Callback]\n default_root_dir: str\n logger: Union[LightningLoggerBase, bool]\n weights_save_path: str\n ckpt_path: str\n checkpoint_callback: ModelCheckpoint\n progress_bar_refresh_rate: int\n process_position: int\n\n @property\n @abstractmethod\n def slurm_job_id(self) -> int:\n \"\"\"Warning: this is just empty shell for code implemented in other class.\"\"\"\n\n @abstractmethod\n def save_checkpoint(self, *args):\n \"\"\"Warning: this is just empty shell for code implemented in other class.\"\"\"\n\n def configure_checkpoint_callback(self):\n \"\"\"\n Weight path set in this priority:\n Checkpoint_callback's path (if passed in).\n User provided weights_saved_path\n Otherwise use os.getcwd()\n \"\"\"\n ckpt_path = self.default_root_dir\n if self.checkpoint_callback:\n # init a default one\n if self.logger is not None:\n save_dir = (getattr(self.logger, 'save_dir', None) or\n getattr(self.logger, '_save_dir', None) or\n self.default_root_dir)\n\n # weights_save_path overrides anything\n if self.weights_save_path is not None:\n save_dir = self.weights_save_path\n\n ckpt_path = os.path.join(\n save_dir,\n self.logger.name,\n f'version_{self.logger.version}',\n \"checkpoints\"\n )\n else:\n ckpt_path = os.path.join(self.default_root_dir, \"checkpoints\")\n\n # when no val step is defined, use 'loss' otherwise 'val_loss'\n train_step_only = not self.is_overriden('validation_step')\n monitor_key = 'loss' if train_step_only else 'val_loss'\n\n if self.checkpoint_callback is True:\n os.makedirs(ckpt_path, exist_ok=True)\n self.checkpoint_callback = ModelCheckpoint(\n filepath=ckpt_path,\n monitor=monitor_key\n )\n # If user specified None in filepath, override with runtime default\n elif isinstance(self.checkpoint_callback, ModelCheckpoint) \\\n and self.checkpoint_callback.dirpath is None:\n self.checkpoint_callback.dirpath = ckpt_path\n self.checkpoint_callback.filename = '{epoch}'\n os.makedirs(self.checkpoint_callback.dirpath, exist_ok=True)\n elif self.checkpoint_callback is False:\n self.checkpoint_callback = None\n\n self.ckpt_path = ckpt_path\n\n if self.checkpoint_callback:\n # set the path for the callbacks\n self.checkpoint_callback.save_function = self.save_checkpoint\n\n # if checkpoint callback used, then override the weights path\n self.weights_save_path = self.checkpoint_callback.dirpath\n\n # if weights_save_path is still none here, set to current working dir\n if self.weights_save_path is None:\n self.weights_save_path = self.default_root_dir\n\n def configure_early_stopping(self, early_stop_callback):\n if early_stop_callback is True or None:\n self.early_stop_callback = EarlyStopping(\n monitor='val_loss',\n patience=3,\n strict=True,\n verbose=True,\n mode='min'\n )\n self.enable_early_stop = True\n elif not early_stop_callback:\n self.early_stop_callback = None\n self.enable_early_stop = False\n else:\n self.early_stop_callback = early_stop_callback\n self.enable_early_stop = True\n\n def configure_progress_bar(self):\n progress_bars = [c for c in self.callbacks if isinstance(c, ProgressBarBase)]\n if len(progress_bars) > 1:\n raise MisconfigurationException(\n 'You added multiple progress bar callbacks to the Trainer, but currently only one'\n ' progress bar is supported.'\n )\n elif len(progress_bars) == 1:\n self.progress_bar_callback = progress_bars[0]\n elif self.progress_bar_refresh_rate > 0:\n self.progress_bar_callback = ProgressBar(\n refresh_rate=self.progress_bar_refresh_rate,\n process_position=self.process_position,\n )\n self.callbacks.append(self.progress_bar_callback)\n else:\n self.progress_bar_callback = None\n", "path": "pytorch_lightning/trainer/callback_config.py"}], "after_files": [{"content": "import os\nfrom abc import ABC, abstractmethod\nfrom typing import Union, List\n\n\nfrom pytorch_lightning.callbacks import Callback, ModelCheckpoint, EarlyStopping, ProgressBarBase, ProgressBar\nfrom pytorch_lightning.loggers import LightningLoggerBase\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\n\n\nclass TrainerCallbackConfigMixin(ABC):\n\n # this is just a summary on variables used in this abstract class,\n # the proper values/initialisation should be done in child class\n callbacks: List[Callback]\n default_root_dir: str\n logger: Union[LightningLoggerBase, bool]\n weights_save_path: str\n ckpt_path: str\n checkpoint_callback: ModelCheckpoint\n progress_bar_refresh_rate: int\n process_position: int\n\n @property\n @abstractmethod\n def slurm_job_id(self) -> int:\n \"\"\"Warning: this is just empty shell for code implemented in other class.\"\"\"\n\n @abstractmethod\n def save_checkpoint(self, *args):\n \"\"\"Warning: this is just empty shell for code implemented in other class.\"\"\"\n\n def configure_checkpoint_callback(self):\n \"\"\"\n Weight path set in this priority:\n Checkpoint_callback's path (if passed in).\n User provided weights_saved_path\n Otherwise use os.getcwd()\n \"\"\"\n ckpt_path = self.default_root_dir\n if self.checkpoint_callback:\n # init a default one\n if self.logger is not None:\n save_dir = (getattr(self.logger, 'save_dir', None) or\n getattr(self.logger, '_save_dir', None) or\n self.default_root_dir)\n\n # weights_save_path overrides anything\n if self.weights_save_path is not None:\n save_dir = self.weights_save_path\n\n version = self.logger.version if isinstance(\n self.logger.version, str) else f'version_{self.logger.version}'\n ckpt_path = os.path.join(\n save_dir,\n self.logger.name,\n version,\n \"checkpoints\"\n )\n else:\n ckpt_path = os.path.join(self.default_root_dir, \"checkpoints\")\n\n # when no val step is defined, use 'loss' otherwise 'val_loss'\n train_step_only = not self.is_overriden('validation_step')\n monitor_key = 'loss' if train_step_only else 'val_loss'\n\n if self.checkpoint_callback is True:\n os.makedirs(ckpt_path, exist_ok=True)\n self.checkpoint_callback = ModelCheckpoint(\n filepath=ckpt_path,\n monitor=monitor_key\n )\n # If user specified None in filepath, override with runtime default\n elif isinstance(self.checkpoint_callback, ModelCheckpoint) \\\n and self.checkpoint_callback.dirpath is None:\n self.checkpoint_callback.dirpath = ckpt_path\n self.checkpoint_callback.filename = '{epoch}'\n os.makedirs(self.checkpoint_callback.dirpath, exist_ok=True)\n elif self.checkpoint_callback is False:\n self.checkpoint_callback = None\n\n self.ckpt_path = ckpt_path\n\n if self.checkpoint_callback:\n # set the path for the callbacks\n self.checkpoint_callback.save_function = self.save_checkpoint\n\n # if checkpoint callback used, then override the weights path\n self.weights_save_path = self.checkpoint_callback.dirpath\n\n # if weights_save_path is still none here, set to current working dir\n if self.weights_save_path is None:\n self.weights_save_path = self.default_root_dir\n\n def configure_early_stopping(self, early_stop_callback):\n if early_stop_callback is True or None:\n self.early_stop_callback = EarlyStopping(\n monitor='val_loss',\n patience=3,\n strict=True,\n verbose=True,\n mode='min'\n )\n self.enable_early_stop = True\n elif not early_stop_callback:\n self.early_stop_callback = None\n self.enable_early_stop = False\n else:\n self.early_stop_callback = early_stop_callback\n self.enable_early_stop = True\n\n def configure_progress_bar(self):\n progress_bars = [c for c in self.callbacks if isinstance(c, ProgressBarBase)]\n if len(progress_bars) > 1:\n raise MisconfigurationException(\n 'You added multiple progress bar callbacks to the Trainer, but currently only one'\n ' progress bar is supported.'\n )\n elif len(progress_bars) == 1:\n self.progress_bar_callback = progress_bars[0]\n elif self.progress_bar_refresh_rate > 0:\n self.progress_bar_callback = ProgressBar(\n refresh_rate=self.progress_bar_refresh_rate,\n process_position=self.process_position,\n )\n self.callbacks.append(self.progress_bar_callback)\n else:\n self.progress_bar_callback = None\n", "path": "pytorch_lightning/trainer/callback_config.py"}]}
1,827
152
gh_patches_debug_20094
rasdani/github-patches
git_diff
Flexget__Flexget-2495
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Unhandled error in plugin exists: 'PosixPath' object has no attribute 'walk' <!--- Before opening an issue, verify: - Is this a feature request? Post it on https://feathub.com/Flexget/Flexget - Did you recently upgrade? Look at the Change Log and Upgrade Actions to make sure that you don't need to make any changes to your config https://flexget.com/ChangeLog https://flexget.com/UpgradeActions - Are you running FlexGet as a daemon? Stop it completely and then start it again https://flexget.com/CLI/daemon - Did you search to see if the issue already exists? https://github.com/Flexget/Flexget/issues - Did you fill out the issue template as completely as possible? The issue template is here because it helps to ensure you submitted all the necessary information the first time, and allows us to more quickly review issues. Please fill it out correctly and do not ignore it, no matter how irrelevant you think it may be. Thanks in advance for your help with this! ---> ### Expected behaviour: After daemon startup, tasks are run without errors <!--- Please don't just say "it doesn't crash" or "it works". Explain what the expected result is. ---> ### Actual behaviour: Tasks are aborted ### Steps to reproduce: - Step 1: Configure exists plugin - Step 2: Run flexget #### Config: AFAIR, any config using exists plugin #### Log: <details> <summary>(click to expand)</summary> ``` 2019-11-13 20:32 CRITICAL task tv_rarbg BUG: Unhandled error in plugin exists: 'PosixPath' object has no attribute 'walk' Traceback (most recent call last): File "/home/carno/envs/flexget3/lib/python3.7/site-packages/flexget/task.py", line 520, in __run_plugin result = method(*args, **kwargs) File "/home/carno/envs/flexget3/lib/python3.7/site-packages/flexget/event.py", line 20, in __call__ return self.func(*args, **kwargs) File "/home/carno/envs/flexget3/lib/python3.7/site-packages/flexget/plugins/filter/exists.py", line 41, in on_task_filter for p in folder.walk(errors='ignore'): AttributeError: 'PosixPath' object has no attribute 'walk' ``` </details> ### Additional information: - FlexGet version: 3.0.1 - Python version: 3.7.2 - Installation method: pip - Using daemon (yes/no): yes - OS and version: debian unstable - Link to crash log: N/A <!--- In config and debug/crash logs, remember to redact any personal or sensitive information such as passwords, API keys, private URLs and so on. Please verify that the following data is present before submitting your issue: - Link to a paste service or paste above the relevant config (preferably full config, including templates if present). Please make sure the paste does not expire, if possible. - Link to a paste service or paste above debug-level logs of the relevant task/s (use `flexget -L debug execute --tasks <Task_name>`). - FlexGet version (use `flexget -V` to get it). - Full Python version, for example `2.7.11` (use `python -V` to get it). - Installation method (pip, git install, etc). - Whether or not you're running FlexGet as a daemon. - OS and version. - Attach crash log if one was generated, in addition to the debug-level log. It can be found in the directory with your config file. ---> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `flexget/plugins/filter/exists.py` Content: ``` 1 import logging 2 import platform 3 from pathlib import Path 4 5 from flexget import plugin 6 from flexget.config_schema import one_or_more 7 from flexget.event import event 8 9 log = logging.getLogger('exists') 10 11 12 class FilterExists: 13 """ 14 Reject entries that already exist in given path. 15 16 Example:: 17 18 exists: /storage/movies/ 19 """ 20 21 schema = one_or_more({'type': 'string', 'format': 'path'}) 22 23 def prepare_config(self, config): 24 # If only a single path is passed turn it into a 1 element list 25 if isinstance(config, str): 26 config = [config] 27 return config 28 29 @plugin.priority(-1) 30 def on_task_filter(self, task, config): 31 if not task.accepted: 32 log.debug('No accepted entries, not scanning for existing.') 33 return 34 log.verbose('Scanning path(s) for existing files.') 35 config = self.prepare_config(config) 36 filenames = {} 37 for folder in config: 38 folder = Path(folder).expanduser() 39 if not folder.exists(): 40 raise plugin.PluginWarning('Path %s does not exist' % folder, log) 41 for p in folder.walk(errors='ignore'): 42 key = p.name 43 # windows file system is not case sensitive 44 if platform.system() == 'Windows': 45 key = key.lower() 46 filenames[key] = p 47 for entry in task.accepted: 48 # priority is: filename, location (filename only), title 49 name = Path(entry.get('filename', entry.get('location', entry['title']))).name 50 if platform.system() == 'Windows': 51 name = name.lower() 52 if name in filenames: 53 log.debug('Found %s in %s' % (name, filenames[name])) 54 entry.reject('exists in %s' % filenames[name]) 55 56 57 @event('plugin.register') 58 def register_plugin(): 59 plugin.register(FilterExists, 'exists', api_ver=2) 60 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/flexget/plugins/filter/exists.py b/flexget/plugins/filter/exists.py --- a/flexget/plugins/filter/exists.py +++ b/flexget/plugins/filter/exists.py @@ -38,12 +38,13 @@ folder = Path(folder).expanduser() if not folder.exists(): raise plugin.PluginWarning('Path %s does not exist' % folder, log) - for p in folder.walk(errors='ignore'): - key = p.name - # windows file system is not case sensitive - if platform.system() == 'Windows': - key = key.lower() - filenames[key] = p + for p in folder.rglob('*'): + if p.is_file(): + key = p.name + # windows file system is not case sensitive + if platform.system() == 'Windows': + key = key.lower() + filenames[key] = p for entry in task.accepted: # priority is: filename, location (filename only), title name = Path(entry.get('filename', entry.get('location', entry['title']))).name
{"golden_diff": "diff --git a/flexget/plugins/filter/exists.py b/flexget/plugins/filter/exists.py\n--- a/flexget/plugins/filter/exists.py\n+++ b/flexget/plugins/filter/exists.py\n@@ -38,12 +38,13 @@\n folder = Path(folder).expanduser()\n if not folder.exists():\n raise plugin.PluginWarning('Path %s does not exist' % folder, log)\n- for p in folder.walk(errors='ignore'):\n- key = p.name\n- # windows file system is not case sensitive\n- if platform.system() == 'Windows':\n- key = key.lower()\n- filenames[key] = p\n+ for p in folder.rglob('*'):\n+ if p.is_file():\n+ key = p.name\n+ # windows file system is not case sensitive\n+ if platform.system() == 'Windows':\n+ key = key.lower()\n+ filenames[key] = p\n for entry in task.accepted:\n # priority is: filename, location (filename only), title\n name = Path(entry.get('filename', entry.get('location', entry['title']))).name\n", "issue": "Unhandled error in plugin exists: 'PosixPath' object has no attribute 'walk'\n<!---\r\nBefore opening an issue, verify:\r\n\r\n- Is this a feature request? Post it on https://feathub.com/Flexget/Flexget\r\n- Did you recently upgrade? Look at the Change Log and Upgrade Actions to make sure that you don't need to make any changes to your config https://flexget.com/ChangeLog https://flexget.com/UpgradeActions\r\n- Are you running FlexGet as a daemon? Stop it completely and then start it again https://flexget.com/CLI/daemon\r\n- Did you search to see if the issue already exists? https://github.com/Flexget/Flexget/issues\r\n- Did you fill out the issue template as completely as possible?\r\n\r\nThe issue template is here because it helps to ensure you submitted all the necessary information the first time, and allows us to more quickly review issues. Please fill it out correctly and do not ignore it, no matter how irrelevant you think it may be. Thanks in advance for your help with this!\r\n--->\r\n\r\n### Expected behaviour:\r\nAfter daemon startup, tasks are run without errors\r\n<!---\r\nPlease don't just say \"it doesn't crash\" or \"it works\". Explain what the expected result is.\r\n--->\r\n\r\n### Actual behaviour:\r\nTasks are aborted\r\n### Steps to reproduce:\r\n- Step 1: Configure exists plugin\r\n- Step 2: Run flexget\r\n\r\n#### Config:\r\nAFAIR, any config using exists plugin\r\n \r\n#### Log:\r\n<details>\r\n <summary>(click to expand)</summary>\r\n\r\n```\r\n2019-11-13 20:32 CRITICAL task tv_rarbg BUG: Unhandled error in plugin exists: 'PosixPath' object has no attribute 'walk'\r\nTraceback (most recent call last):\r\n File \"/home/carno/envs/flexget3/lib/python3.7/site-packages/flexget/task.py\", line 520, in __run_plugin\r\n result = method(*args, **kwargs)\r\n File \"/home/carno/envs/flexget3/lib/python3.7/site-packages/flexget/event.py\", line 20, in __call__\r\n return self.func(*args, **kwargs)\r\n File \"/home/carno/envs/flexget3/lib/python3.7/site-packages/flexget/plugins/filter/exists.py\", line 41, in on_task_filter\r\n for p in folder.walk(errors='ignore'):\r\nAttributeError: 'PosixPath' object has no attribute 'walk'\r\n\r\n```\r\n</details>\r\n\r\n### Additional information:\r\n\r\n- FlexGet version: 3.0.1\r\n- Python version: 3.7.2\r\n- Installation method: pip\r\n- Using daemon (yes/no): yes\r\n- OS and version: debian unstable\r\n- Link to crash log: N/A\r\n\r\n<!---\r\nIn config and debug/crash logs, remember to redact any personal or sensitive information such as passwords, API keys, private URLs and so on.\r\n\r\nPlease verify that the following data is present before submitting your issue:\r\n\r\n- Link to a paste service or paste above the relevant config (preferably full config, including templates if present). Please make sure the paste does not expire, if possible.\r\n- Link to a paste service or paste above debug-level logs of the relevant task/s (use `flexget -L debug execute --tasks <Task_name>`).\r\n- FlexGet version (use `flexget -V` to get it).\r\n- Full Python version, for example `2.7.11` (use `python -V` to get it). \r\n- Installation method (pip, git install, etc).\r\n- Whether or not you're running FlexGet as a daemon.\r\n- OS and version.\r\n- Attach crash log if one was generated, in addition to the debug-level log. It can be found in the directory with your config file.\r\n--->\r\n\n", "before_files": [{"content": "import logging\nimport platform\nfrom pathlib import Path\n\nfrom flexget import plugin\nfrom flexget.config_schema import one_or_more\nfrom flexget.event import event\n\nlog = logging.getLogger('exists')\n\n\nclass FilterExists:\n \"\"\"\n Reject entries that already exist in given path.\n\n Example::\n\n exists: /storage/movies/\n \"\"\"\n\n schema = one_or_more({'type': 'string', 'format': 'path'})\n\n def prepare_config(self, config):\n # If only a single path is passed turn it into a 1 element list\n if isinstance(config, str):\n config = [config]\n return config\n\n @plugin.priority(-1)\n def on_task_filter(self, task, config):\n if not task.accepted:\n log.debug('No accepted entries, not scanning for existing.')\n return\n log.verbose('Scanning path(s) for existing files.')\n config = self.prepare_config(config)\n filenames = {}\n for folder in config:\n folder = Path(folder).expanduser()\n if not folder.exists():\n raise plugin.PluginWarning('Path %s does not exist' % folder, log)\n for p in folder.walk(errors='ignore'):\n key = p.name\n # windows file system is not case sensitive\n if platform.system() == 'Windows':\n key = key.lower()\n filenames[key] = p\n for entry in task.accepted:\n # priority is: filename, location (filename only), title\n name = Path(entry.get('filename', entry.get('location', entry['title']))).name\n if platform.system() == 'Windows':\n name = name.lower()\n if name in filenames:\n log.debug('Found %s in %s' % (name, filenames[name]))\n entry.reject('exists in %s' % filenames[name])\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(FilterExists, 'exists', api_ver=2)\n", "path": "flexget/plugins/filter/exists.py"}], "after_files": [{"content": "import logging\nimport platform\nfrom pathlib import Path\n\nfrom flexget import plugin\nfrom flexget.config_schema import one_or_more\nfrom flexget.event import event\n\nlog = logging.getLogger('exists')\n\n\nclass FilterExists:\n \"\"\"\n Reject entries that already exist in given path.\n\n Example::\n\n exists: /storage/movies/\n \"\"\"\n\n schema = one_or_more({'type': 'string', 'format': 'path'})\n\n def prepare_config(self, config):\n # If only a single path is passed turn it into a 1 element list\n if isinstance(config, str):\n config = [config]\n return config\n\n @plugin.priority(-1)\n def on_task_filter(self, task, config):\n if not task.accepted:\n log.debug('No accepted entries, not scanning for existing.')\n return\n log.verbose('Scanning path(s) for existing files.')\n config = self.prepare_config(config)\n filenames = {}\n for folder in config:\n folder = Path(folder).expanduser()\n if not folder.exists():\n raise plugin.PluginWarning('Path %s does not exist' % folder, log)\n for p in folder.rglob('*'):\n if p.is_file():\n key = p.name\n # windows file system is not case sensitive\n if platform.system() == 'Windows':\n key = key.lower()\n filenames[key] = p\n for entry in task.accepted:\n # priority is: filename, location (filename only), title\n name = Path(entry.get('filename', entry.get('location', entry['title']))).name\n if platform.system() == 'Windows':\n name = name.lower()\n if name in filenames:\n log.debug('Found %s in %s' % (name, filenames[name]))\n entry.reject('exists in %s' % filenames[name])\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(FilterExists, 'exists', api_ver=2)\n", "path": "flexget/plugins/filter/exists.py"}]}
1,607
248
gh_patches_debug_2309
rasdani/github-patches
git_diff
chaoss__augur-759
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Need Model and worker info for releases worker The new releases worker needs to have sections added for the model and the worker in the augur.config.json in order to run. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `augur/cli/configure.py` Content: ``` 1 #SPDX-License-Identifier: MIT 2 """ 3 Augur library script for generating a config file 4 """ 5 6 import os 7 import click 8 import json 9 10 from augur import logger 11 12 ENVVAR_PREFIX = "AUGUR_" 13 14 default_config = { 15 "Database": { 16 "name": "augur", 17 "host": "localhost", 18 "key": "key", 19 "password": "augur", 20 "port": 5432, 21 "user": "augur", 22 "gitlab_api_key":"gitlab_api_key" 23 }, 24 "Housekeeper": { 25 "jobs": [ 26 { 27 "all_focused": 1, 28 "delay": 150000, 29 "given": [ 30 "github_url" 31 ], 32 "model": "issues", 33 "repo_group_id": 0 34 }, 35 { 36 "delay": 150000, 37 "given": [ 38 "github_url" 39 ], 40 "model": "pull_request_commits", 41 "repo_group_id": 0 42 }, 43 { 44 "delay": 150000, 45 "given": [ 46 "github_url" 47 ], 48 "model": "repo_info", 49 "repo_group_id": 0 50 }, 51 { 52 "delay": 150000, 53 "given": [ 54 "repo_group" 55 ], 56 "model": "commits", 57 "repo_group_id": 0 58 }, 59 { 60 "delay": 1000000, 61 "given": [ 62 "github_url" 63 ], 64 "model": "pull_requests", 65 "repo_group_id": 0 66 }, 67 { 68 "delay": 1000000, 69 "given": [ 70 "git_url" 71 ], 72 "model": "contributors", 73 "repo_group_id": 0 74 }, 75 { 76 "delay": 1000000, 77 "given": [ 78 "git_url" 79 ], 80 "model": "insights", 81 "repo_group_id": 0 82 }, 83 { 84 "delay": 1000000, 85 "given": [ 86 "git_url" 87 ], 88 "model": "badges", 89 "repo_group_id": 0 90 }, 91 { 92 "delay": 1000000, 93 "given": [ 94 "git_url" 95 ], 96 "model": "value", 97 "repo_group_id": 0 98 }, 99 { 100 "delay": 100000, 101 "given": [ 102 "github_url" 103 ], 104 "model": "pull_request_files", 105 "repo_group_id": 0 106 } 107 ] 108 }, 109 "Workers": { 110 "facade_worker": { 111 "port": 50100, 112 "repo_directory": "repos/", 113 "switch": 1, 114 "workers": 1 115 }, 116 "github_worker": { 117 "port": 50200, 118 "switch": 1, 119 "workers": 1 120 }, 121 "insight_worker": { 122 "port": 50300, 123 "metrics": {"issues-new": "issues", "code-changes": "commit_count", "code-changes-lines": "added", 124 "reviews": "pull_requests", "contributors-new": "new_contributors"}, 125 "contamination": 0.041, 126 "switch": 0, 127 "workers": 1, 128 "training_days": 365, 129 "anomaly_days": 2 130 }, 131 "linux_badge_worker": { 132 "port": 50400, 133 "switch": 1, 134 "workers": 1 135 }, 136 "metric_status_worker": { 137 "port": 50500, 138 "switch": 0, 139 "workers": 1 140 }, 141 "pull_request_worker": { 142 "port": 50600, 143 "switch": 1, 144 "workers": 1 145 }, 146 "repo_info_worker": { 147 "port": 50700, 148 "switch": 1, 149 "workers": 1 150 }, 151 "value_worker": { 152 "port": 50800, 153 "scc_bin": "scc", 154 "switch": 0, 155 "workers": 1 156 }, 157 "contributor_worker": { 158 "port": 50900, 159 "switch": 1, 160 "workers": 1 161 }, 162 "gitlab_issues_worker": { 163 "port": 51000, 164 "switch": 1, 165 "workers": 1 166 }, 167 }, 168 "Facade": { 169 "check_updates": 1, 170 "clone_repos": 1, 171 "create_xlsx_summary_files": 1, 172 "delete_marked_repos": 0, 173 "fix_affiliations": 1, 174 "force_analysis": 1, 175 "force_invalidate_caches": 1, 176 "force_updates": 1, 177 "limited_run": 0, 178 "multithreaded": 0, 179 "nuke_stored_affiliations": 0, 180 "pull_repos": 1, 181 "rebuild_caches": 1, 182 "run_analysis": 1 183 }, 184 "Server": { 185 "cache_expire": "3600", 186 "host": "0.0.0.0", 187 "port": "5000", 188 "workers": 4, 189 "timeout": 60 190 }, 191 "Frontend": { 192 "host": "0.0.0.0", 193 "port": "5000" 194 }, 195 "Development": { 196 "log_level": "INFO" 197 } 198 } 199 200 @click.group('configure', short_help='Generate an augur.config.json') 201 def cli(): 202 pass 203 204 @cli.command('generate') 205 @click.option('--db_name', help="Database name for your data collection database", envvar=ENVVAR_PREFIX + 'DB_NAME') 206 @click.option('--db_host', help="Host for your data collection database", envvar=ENVVAR_PREFIX + 'DB_HOST') 207 @click.option('--db_user', help="User for your data collection database", envvar=ENVVAR_PREFIX + 'DB_USER') 208 @click.option('--db_port', help="Port for your data collection database", envvar=ENVVAR_PREFIX + 'DB_PORT') 209 @click.option('--db_password', help="Password for your data collection database", envvar=ENVVAR_PREFIX + 'DB_PASSWORD') 210 @click.option('--gitlab_api_key', help="GitLab API key for data collection from the GitLab API", envvar=ENVVAR_PREFIX + 'GITLAB_API_KEY') 211 @click.option('--github_api_key', help="GitHub API key for data collection from the GitHub API", envvar=ENVVAR_PREFIX + 'GITHUB_API_KEY') 212 @click.option('--facade_repo_directory', help="Directory on the database server where Facade should clone repos", envvar=ENVVAR_PREFIX + 'FACADE_REPO_DIRECTORY') 213 @click.option('--rc-config-file', help="File containing existing config whose values will be used as the defaults", type=click.Path(exists=True)) 214 def generate(db_name, db_host, db_user, db_port, db_password, github_api_key, facade_repo_directory, rc_config_file, gitlab_api_key): 215 """ 216 Generate an augur.config.json 217 """ 218 219 config = default_config 220 rc_config = None 221 222 if rc_config_file != None: 223 try: 224 with open(os.path.abspath(rc_config_file), 'r') as f: 225 rc_config = json.load(f) 226 for item in rc_config.items(): 227 if item[0] == 'Workers': 228 for index in range(0, len(item[1])): 229 key = list(item[1].keys())[index] 230 secondary_dict = list(item[1].values())[index] 231 232 for secondary_dict_index in range(0, len(secondary_dict)): 233 secondary_key = list(secondary_dict.keys())[secondary_dict_index] 234 value = list(secondary_dict.values())[secondary_dict_index] 235 236 config[item[0]][key][secondary_key] = value 237 else: 238 for index, key in enumerate(list(item[1].keys())): 239 config[item[0]][key] = list(item[1].values())[index] 240 241 logger.info('Predefined config successfully loaded') 242 243 except Exception as e: 244 logger.error(f"Error opening {rc_config_file}: {str(e)}") 245 246 if db_name is not None: 247 config['Database']['database'] = db_name # this is for backwards compatibility 248 if db_name is not None: 249 config['Database']['name'] = db_name 250 if db_host is not None: 251 config['Database']['host'] = db_host 252 if db_port is not None: 253 config['Database']['port'] = int(db_port) 254 if db_user is not None: 255 config['Database']['user'] = db_user 256 if db_password is not None: 257 config['Database']['password'] = db_password 258 if github_api_key is not None: 259 config['Database']['key'] = github_api_key 260 if gitlab_api_key is not None: 261 config['Database']['gitlab_api_key'] = gitlab_api_key 262 if facade_repo_directory is not None: 263 config['Workers']['facade_worker']['repo_directory'] = facade_repo_directory 264 265 try: 266 with open(os.path.abspath('augur.config.json'), 'w') as f: 267 json.dump(config, f, indent=4) 268 logger.info('augur.config.json successfully created') 269 except Exception as e: 270 logger.error("Error writing augur.config.json " + str(e)) 271 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/augur/cli/configure.py b/augur/cli/configure.py --- a/augur/cli/configure.py +++ b/augur/cli/configure.py @@ -164,6 +164,11 @@ "switch": 1, "workers": 1 }, + "release_worker": { + "port": 51100, + "switch": 1, + "workers": 1 + }, }, "Facade": { "check_updates": 1,
{"golden_diff": "diff --git a/augur/cli/configure.py b/augur/cli/configure.py\n--- a/augur/cli/configure.py\n+++ b/augur/cli/configure.py\n@@ -164,6 +164,11 @@\n \"switch\": 1,\n \"workers\": 1\n },\n+ \"release_worker\": {\n+ \"port\": 51100,\n+ \"switch\": 1,\n+ \"workers\": 1\n+ },\n },\n \"Facade\": {\n \"check_updates\": 1,\n", "issue": "Need Model and worker info for releases worker\nThe new releases worker needs to have sections added for the model and the worker in the augur.config.json in order to run. \r\n\n", "before_files": [{"content": "#SPDX-License-Identifier: MIT\n\"\"\"\nAugur library script for generating a config file\n\"\"\"\n\nimport os\nimport click\nimport json\n\nfrom augur import logger\n\nENVVAR_PREFIX = \"AUGUR_\"\n\ndefault_config = {\n \"Database\": {\n \"name\": \"augur\",\n \"host\": \"localhost\",\n \"key\": \"key\",\n \"password\": \"augur\",\n \"port\": 5432,\n \"user\": \"augur\",\n \"gitlab_api_key\":\"gitlab_api_key\"\n },\n \"Housekeeper\": {\n \"jobs\": [\n {\n \"all_focused\": 1,\n \"delay\": 150000,\n \"given\": [\n \"github_url\"\n ],\n \"model\": \"issues\",\n \"repo_group_id\": 0\n },\n {\n \"delay\": 150000,\n \"given\": [\n \"github_url\"\n ],\n \"model\": \"pull_request_commits\",\n \"repo_group_id\": 0\n },\n {\n \"delay\": 150000,\n \"given\": [\n \"github_url\"\n ],\n \"model\": \"repo_info\",\n \"repo_group_id\": 0\n },\n {\n \"delay\": 150000,\n \"given\": [\n \"repo_group\"\n ],\n \"model\": \"commits\",\n \"repo_group_id\": 0\n },\n {\n \"delay\": 1000000,\n \"given\": [\n \"github_url\"\n ],\n \"model\": \"pull_requests\",\n \"repo_group_id\": 0\n },\n {\n \"delay\": 1000000,\n \"given\": [\n \"git_url\"\n ],\n \"model\": \"contributors\",\n \"repo_group_id\": 0\n }, \n {\n \"delay\": 1000000,\n \"given\": [\n \"git_url\"\n ],\n \"model\": \"insights\",\n \"repo_group_id\": 0\n },\n {\n \"delay\": 1000000,\n \"given\": [\n \"git_url\"\n ],\n \"model\": \"badges\",\n \"repo_group_id\": 0\n },\n {\n \"delay\": 1000000,\n \"given\": [\n \"git_url\"\n ],\n \"model\": \"value\",\n \"repo_group_id\": 0\n },\n {\n \"delay\": 100000,\n \"given\": [\n \"github_url\"\n ],\n \"model\": \"pull_request_files\",\n \"repo_group_id\": 0\n }\n ]\n },\n \"Workers\": {\n \"facade_worker\": {\n \"port\": 50100,\n \"repo_directory\": \"repos/\",\n \"switch\": 1,\n \"workers\": 1\n },\n \"github_worker\": {\n \"port\": 50200,\n \"switch\": 1,\n \"workers\": 1\n },\n \"insight_worker\": {\n \"port\": 50300,\n \"metrics\": {\"issues-new\": \"issues\", \"code-changes\": \"commit_count\", \"code-changes-lines\": \"added\", \n \"reviews\": \"pull_requests\", \"contributors-new\": \"new_contributors\"},\n \"contamination\": 0.041,\n \"switch\": 0,\n \"workers\": 1,\n \"training_days\": 365,\n \"anomaly_days\": 2\n },\n \"linux_badge_worker\": {\n \"port\": 50400,\n \"switch\": 1,\n \"workers\": 1\n },\n \"metric_status_worker\": {\n \"port\": 50500,\n \"switch\": 0,\n \"workers\": 1\n },\n \"pull_request_worker\": {\n \"port\": 50600,\n \"switch\": 1,\n \"workers\": 1\n },\n \"repo_info_worker\": {\n \"port\": 50700,\n \"switch\": 1,\n \"workers\": 1\n },\n \"value_worker\": {\n \"port\": 50800,\n \"scc_bin\": \"scc\",\n \"switch\": 0,\n \"workers\": 1\n },\n \"contributor_worker\": {\n \"port\": 50900,\n \"switch\": 1,\n \"workers\": 1\n },\n \"gitlab_issues_worker\": {\n \"port\": 51000,\n \"switch\": 1,\n \"workers\": 1\n },\n },\n \"Facade\": {\n \"check_updates\": 1,\n \"clone_repos\": 1,\n \"create_xlsx_summary_files\": 1,\n \"delete_marked_repos\": 0,\n \"fix_affiliations\": 1,\n \"force_analysis\": 1,\n \"force_invalidate_caches\": 1,\n \"force_updates\": 1,\n \"limited_run\": 0,\n \"multithreaded\": 0,\n \"nuke_stored_affiliations\": 0,\n \"pull_repos\": 1,\n \"rebuild_caches\": 1,\n \"run_analysis\": 1\n },\n \"Server\": {\n \"cache_expire\": \"3600\",\n \"host\": \"0.0.0.0\",\n \"port\": \"5000\",\n \"workers\": 4,\n \"timeout\": 60\n },\n \"Frontend\": {\n \"host\": \"0.0.0.0\",\n \"port\": \"5000\"\n },\n \"Development\": {\n \"log_level\": \"INFO\"\n }\n }\n\[email protected]('configure', short_help='Generate an augur.config.json')\ndef cli():\n pass\n\[email protected]('generate')\[email protected]('--db_name', help=\"Database name for your data collection database\", envvar=ENVVAR_PREFIX + 'DB_NAME')\[email protected]('--db_host', help=\"Host for your data collection database\", envvar=ENVVAR_PREFIX + 'DB_HOST')\[email protected]('--db_user', help=\"User for your data collection database\", envvar=ENVVAR_PREFIX + 'DB_USER')\[email protected]('--db_port', help=\"Port for your data collection database\", envvar=ENVVAR_PREFIX + 'DB_PORT')\[email protected]('--db_password', help=\"Password for your data collection database\", envvar=ENVVAR_PREFIX + 'DB_PASSWORD')\[email protected]('--gitlab_api_key', help=\"GitLab API key for data collection from the GitLab API\", envvar=ENVVAR_PREFIX + 'GITLAB_API_KEY')\[email protected]('--github_api_key', help=\"GitHub API key for data collection from the GitHub API\", envvar=ENVVAR_PREFIX + 'GITHUB_API_KEY')\[email protected]('--facade_repo_directory', help=\"Directory on the database server where Facade should clone repos\", envvar=ENVVAR_PREFIX + 'FACADE_REPO_DIRECTORY')\[email protected]('--rc-config-file', help=\"File containing existing config whose values will be used as the defaults\", type=click.Path(exists=True))\ndef generate(db_name, db_host, db_user, db_port, db_password, github_api_key, facade_repo_directory, rc_config_file, gitlab_api_key):\n \"\"\"\n Generate an augur.config.json\n \"\"\"\n\n config = default_config\n rc_config = None\n\n if rc_config_file != None:\n try:\n with open(os.path.abspath(rc_config_file), 'r') as f:\n rc_config = json.load(f)\n for item in rc_config.items():\n if item[0] == 'Workers':\n for index in range(0, len(item[1])):\n key = list(item[1].keys())[index]\n secondary_dict = list(item[1].values())[index]\n\n for secondary_dict_index in range(0, len(secondary_dict)):\n secondary_key = list(secondary_dict.keys())[secondary_dict_index]\n value = list(secondary_dict.values())[secondary_dict_index]\n\n config[item[0]][key][secondary_key] = value\n else:\n for index, key in enumerate(list(item[1].keys())):\n config[item[0]][key] = list(item[1].values())[index]\n\n logger.info('Predefined config successfully loaded')\n\n except Exception as e:\n logger.error(f\"Error opening {rc_config_file}: {str(e)}\")\n\n if db_name is not None:\n config['Database']['database'] = db_name # this is for backwards compatibility\n if db_name is not None:\n config['Database']['name'] = db_name\n if db_host is not None:\n config['Database']['host'] = db_host\n if db_port is not None:\n config['Database']['port'] = int(db_port)\n if db_user is not None:\n config['Database']['user'] = db_user\n if db_password is not None:\n config['Database']['password'] = db_password\n if github_api_key is not None:\n config['Database']['key'] = github_api_key\n if gitlab_api_key is not None:\n config['Database']['gitlab_api_key'] = gitlab_api_key\n if facade_repo_directory is not None:\n config['Workers']['facade_worker']['repo_directory'] = facade_repo_directory\n\n try:\n with open(os.path.abspath('augur.config.json'), 'w') as f:\n json.dump(config, f, indent=4)\n logger.info('augur.config.json successfully created')\n except Exception as e:\n logger.error(\"Error writing augur.config.json \" + str(e))\n", "path": "augur/cli/configure.py"}], "after_files": [{"content": "#SPDX-License-Identifier: MIT\n\"\"\"\nAugur library script for generating a config file\n\"\"\"\n\nimport os\nimport click\nimport json\n\nfrom augur import logger\n\nENVVAR_PREFIX = \"AUGUR_\"\n\ndefault_config = {\n \"Database\": {\n \"name\": \"augur\",\n \"host\": \"localhost\",\n \"key\": \"key\",\n \"password\": \"augur\",\n \"port\": 5432,\n \"user\": \"augur\",\n \"gitlab_api_key\":\"gitlab_api_key\"\n },\n \"Housekeeper\": {\n \"jobs\": [\n {\n \"all_focused\": 1,\n \"delay\": 150000,\n \"given\": [\n \"github_url\"\n ],\n \"model\": \"issues\",\n \"repo_group_id\": 0\n },\n {\n \"delay\": 150000,\n \"given\": [\n \"github_url\"\n ],\n \"model\": \"pull_request_commits\",\n \"repo_group_id\": 0\n },\n {\n \"delay\": 150000,\n \"given\": [\n \"github_url\"\n ],\n \"model\": \"repo_info\",\n \"repo_group_id\": 0\n },\n {\n \"delay\": 150000,\n \"given\": [\n \"repo_group\"\n ],\n \"model\": \"commits\",\n \"repo_group_id\": 0\n },\n {\n \"delay\": 1000000,\n \"given\": [\n \"github_url\"\n ],\n \"model\": \"pull_requests\",\n \"repo_group_id\": 0\n },\n {\n \"delay\": 1000000,\n \"given\": [\n \"git_url\"\n ],\n \"model\": \"contributors\",\n \"repo_group_id\": 0\n }, \n {\n \"delay\": 1000000,\n \"given\": [\n \"git_url\"\n ],\n \"model\": \"insights\",\n \"repo_group_id\": 0\n },\n {\n \"delay\": 1000000,\n \"given\": [\n \"git_url\"\n ],\n \"model\": \"badges\",\n \"repo_group_id\": 0\n },\n {\n \"delay\": 1000000,\n \"given\": [\n \"git_url\"\n ],\n \"model\": \"value\",\n \"repo_group_id\": 0\n },\n {\n \"delay\": 100000,\n \"given\": [\n \"github_url\"\n ],\n \"model\": \"pull_request_files\",\n \"repo_group_id\": 0\n }\n ]\n },\n \"Workers\": {\n \"facade_worker\": {\n \"port\": 50100,\n \"repo_directory\": \"repos/\",\n \"switch\": 1,\n \"workers\": 1\n },\n \"github_worker\": {\n \"port\": 50200,\n \"switch\": 1,\n \"workers\": 1\n },\n \"insight_worker\": {\n \"port\": 50300,\n \"metrics\": {\"issues-new\": \"issues\", \"code-changes\": \"commit_count\", \"code-changes-lines\": \"added\", \n \"reviews\": \"pull_requests\", \"contributors-new\": \"new_contributors\"},\n \"contamination\": 0.041,\n \"switch\": 0,\n \"workers\": 1,\n \"training_days\": 365,\n \"anomaly_days\": 2\n },\n \"linux_badge_worker\": {\n \"port\": 50400,\n \"switch\": 1,\n \"workers\": 1\n },\n \"metric_status_worker\": {\n \"port\": 50500,\n \"switch\": 0,\n \"workers\": 1\n },\n \"pull_request_worker\": {\n \"port\": 50600,\n \"switch\": 1,\n \"workers\": 1\n },\n \"repo_info_worker\": {\n \"port\": 50700,\n \"switch\": 1,\n \"workers\": 1\n },\n \"value_worker\": {\n \"port\": 50800,\n \"scc_bin\": \"scc\",\n \"switch\": 0,\n \"workers\": 1\n },\n \"contributor_worker\": {\n \"port\": 50900,\n \"switch\": 1,\n \"workers\": 1\n },\n \"gitlab_issues_worker\": {\n \"port\": 51000,\n \"switch\": 1,\n \"workers\": 1\n },\n \"release_worker\": {\n \"port\": 51100,\n \"switch\": 1,\n \"workers\": 1\n },\n },\n \"Facade\": {\n \"check_updates\": 1,\n \"clone_repos\": 1,\n \"create_xlsx_summary_files\": 1,\n \"delete_marked_repos\": 0,\n \"fix_affiliations\": 1,\n \"force_analysis\": 1,\n \"force_invalidate_caches\": 1,\n \"force_updates\": 1,\n \"limited_run\": 0,\n \"multithreaded\": 0,\n \"nuke_stored_affiliations\": 0,\n \"pull_repos\": 1,\n \"rebuild_caches\": 1,\n \"run_analysis\": 1\n },\n \"Server\": {\n \"cache_expire\": \"3600\",\n \"host\": \"0.0.0.0\",\n \"port\": \"5000\",\n \"workers\": 4,\n \"timeout\": 60\n },\n \"Frontend\": {\n \"host\": \"0.0.0.0\",\n \"port\": \"5000\"\n },\n \"Development\": {\n \"log_level\": \"INFO\"\n }\n }\n\[email protected]('configure', short_help='Generate an augur.config.json')\ndef cli():\n pass\n\[email protected]('generate')\[email protected]('--db_name', help=\"Database name for your data collection database\", envvar=ENVVAR_PREFIX + 'DB_NAME')\[email protected]('--db_host', help=\"Host for your data collection database\", envvar=ENVVAR_PREFIX + 'DB_HOST')\[email protected]('--db_user', help=\"User for your data collection database\", envvar=ENVVAR_PREFIX + 'DB_USER')\[email protected]('--db_port', help=\"Port for your data collection database\", envvar=ENVVAR_PREFIX + 'DB_PORT')\[email protected]('--db_password', help=\"Password for your data collection database\", envvar=ENVVAR_PREFIX + 'DB_PASSWORD')\[email protected]('--gitlab_api_key', help=\"GitLab API key for data collection from the GitLab API\", envvar=ENVVAR_PREFIX + 'GITLAB_API_KEY')\[email protected]('--github_api_key', help=\"GitHub API key for data collection from the GitHub API\", envvar=ENVVAR_PREFIX + 'GITHUB_API_KEY')\[email protected]('--facade_repo_directory', help=\"Directory on the database server where Facade should clone repos\", envvar=ENVVAR_PREFIX + 'FACADE_REPO_DIRECTORY')\[email protected]('--rc-config-file', help=\"File containing existing config whose values will be used as the defaults\", type=click.Path(exists=True))\ndef generate(db_name, db_host, db_user, db_port, db_password, github_api_key, facade_repo_directory, rc_config_file, gitlab_api_key):\n \"\"\"\n Generate an augur.config.json\n \"\"\"\n\n config = default_config\n rc_config = None\n\n if rc_config_file != None:\n try:\n with open(os.path.abspath(rc_config_file), 'r') as f:\n rc_config = json.load(f)\n for item in rc_config.items():\n if item[0] == 'Workers':\n for index in range(0, len(item[1])):\n key = list(item[1].keys())[index]\n secondary_dict = list(item[1].values())[index]\n\n for secondary_dict_index in range(0, len(secondary_dict)):\n secondary_key = list(secondary_dict.keys())[secondary_dict_index]\n value = list(secondary_dict.values())[secondary_dict_index]\n\n config[item[0]][key][secondary_key] = value\n else:\n for index, key in enumerate(list(item[1].keys())):\n config[item[0]][key] = list(item[1].values())[index]\n\n logger.info('Predefined config successfully loaded')\n\n except Exception as e:\n logger.error(f\"Error opening {rc_config_file}: {str(e)}\")\n\n if db_name is not None:\n config['Database']['database'] = db_name # this is for backwards compatibility\n if db_name is not None:\n config['Database']['name'] = db_name\n if db_host is not None:\n config['Database']['host'] = db_host\n if db_port is not None:\n config['Database']['port'] = int(db_port)\n if db_user is not None:\n config['Database']['user'] = db_user\n if db_password is not None:\n config['Database']['password'] = db_password\n if github_api_key is not None:\n config['Database']['key'] = github_api_key\n if gitlab_api_key is not None:\n config['Database']['gitlab_api_key'] = gitlab_api_key\n if facade_repo_directory is not None:\n config['Workers']['facade_worker']['repo_directory'] = facade_repo_directory\n\n try:\n with open(os.path.abspath('augur.config.json'), 'w') as f:\n json.dump(config, f, indent=4)\n logger.info('augur.config.json successfully created')\n except Exception as e:\n logger.error(\"Error writing augur.config.json \" + str(e))\n", "path": "augur/cli/configure.py"}]}
3,136
124
gh_patches_debug_31336
rasdani/github-patches
git_diff
zestedesavoir__zds-site-6556
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- La connexion avec Google ne fonctionne pas sur le serveur de bêta Problème rapporté par Sentry. La connexion avec Google ne fonctionne pas sur le serveur de bêta, on a une erreur 500 à cause d'une erreur dans le paquet social-auth-core. Même en testant avec la dernière version de ce paquet le problème n'est pas corrigé. J'ai ouvert un ticket chez social-auth-core : https://github.com/python-social-auth/social-core/issues/847 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `zds/settings/prod.py` Content: ``` 1 from sentry_sdk.integrations.django import DjangoIntegration 2 from sentry_sdk.integrations.logging import ignore_logger 3 import sentry_sdk 4 5 from .abstract_base import * 6 7 # For secrets, prefer `config[key]` over `config.get(key)` in this 8 # file because we really want to raise an error if a secret is not 9 # defined. 10 11 12 ############################################################################### 13 # DJANGO SETTINGS 14 15 16 DEBUG = False 17 18 USE_L10N = True 19 20 DATABASES = { 21 "default": { 22 "ENGINE": "django.db.backends.mysql", 23 "NAME": config["databases"]["default"].get("name", "zdsdb"), 24 "USER": config["databases"]["default"].get("user", "zds"), 25 "PASSWORD": config["databases"]["default"]["password"], 26 "HOST": "localhost", 27 "PORT": "", 28 "CONN_MAX_AGE": 600, 29 "OPTIONS": { 30 "charset": "utf8mb4", 31 }, 32 }, 33 } 34 35 ALLOWED_HOSTS = [ 36 "beta.zestedesavoir.com", 37 "scaleway.zestedesavoir.com", 38 "zdsappserver", 39 "gandi.zestedesavoir.com", 40 "gandi.zestedesavoir.com.", 41 ".zestedesavoir.com", 42 ".zestedesavoir.com.", 43 "127.0.0.1", 44 "localhost", 45 "163.172.171.246", 46 ] 47 48 EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend" 49 EMAIL_USE_TLS = False 50 EMAIL_HOST = "localhost" 51 EMAIL_PORT = 25 52 53 CACHES = { 54 "default": { 55 "BACKEND": "django.core.cache.backends.memcached.PyMemcacheCache", 56 "LOCATION": "127.0.0.1:11211", 57 } 58 } 59 60 SESSION_ENGINE = "django.contrib.sessions.backends.cached_db" 61 SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 4 62 63 MEDIA_ROOT = Path("/opt/zds/data/media") 64 65 STATIC_ROOT = Path("/opt/zds/data/static") 66 STATICFILES_STORAGE = "django.contrib.staticfiles.storage.ManifestStaticFilesStorage" 67 68 django_template_engine["APP_DIRS"] = False 69 django_template_engine["OPTIONS"]["loaders"] = [ 70 ( 71 "django.template.loaders.cached.Loader", 72 [ 73 "django.template.loaders.filesystem.Loader", 74 "django.template.loaders.app_directories.Loader", 75 ], 76 ), 77 ] 78 79 80 def _get_version(): 81 from zds import __version__, git_version 82 83 if git_version is None: 84 return __version__ 85 else: 86 return f"{__version__}/{git_version[:7]}" 87 88 89 sentry_sdk.init( 90 dsn=config["sentry"]["dsn"], 91 integrations=[DjangoIntegration()], 92 # Set traces_sample_rate to 1.0 to capture 100% 93 # of transactions for performance monitoring. 94 # We recommend adjusting this value in production, 95 traces_sample_rate=1.0, 96 # If you wish to associate users to errors (assuming you are using 97 # django.contrib.auth) you may enable sending PII data. 98 send_default_pii=True, 99 # By default the SDK will try to use the SENTRY_RELEASE 100 # environment variable, or infer a git commit 101 # SHA as release, however you may want to set 102 # something more human-readable. 103 release=_get_version().replace("/", "#"), 104 # /!\ It cannot contain slashes 105 environment=config["sentry"]["environment"], 106 ) 107 108 # Ignoring emarkdown logging because it is too noisy 109 ignore_logger("zds.utils.templatetags.emarkdown") 110 111 112 ############################################################################### 113 # REQUIREMENTS SETTINGS 114 115 116 # easy-thumbnails 117 # http://easy-thumbnails.readthedocs.io/en/2.1/ref/optimize/ 118 THUMBNAIL_OPTIMIZE_COMMAND = { 119 "png": "/usr/bin/optipng {filename}", 120 "gif": "/usr/bin/optipng {filename}", 121 "jpeg": "/usr/bin/jpegoptim {filename}", 122 } 123 124 125 # python-social-auth 126 # http://psa.matiasaguirre.net/docs/configuration/django.html 127 SOCIAL_AUTH_PIPELINE = ( 128 "social.pipeline.social_auth.social_details", 129 "social.pipeline.social_auth.social_uid", 130 "social.pipeline.social_auth.auth_allowed", 131 "social.pipeline.social_auth.social_user", 132 "social.pipeline.user.get_username", 133 "social.pipeline.social_auth.associate_by_email", 134 "social.pipeline.user.create_user", 135 "zds.member.models.save_profile", 136 "social.pipeline.social_auth.associate_user", 137 "social.pipeline.social_auth.load_extra_data", 138 "social.pipeline.user.user_details", 139 ) 140 141 142 ############################################################################### 143 # ZESTE DE SAVOIR SETTINGS 144 145 146 ES_SEARCH_INDEX["shards"] = config["elasticsearch"].get("shards", 3) 147 148 149 ZDS_APP["site"]["association"]["email"] = "[email protected]" 150 151 # content 152 # ZDS_APP['content']['build_pdf_when_published'] = False 153 ZDS_APP["article"]["repo_path"] = "/opt/zds/data/articles-data" 154 ZDS_APP["content"]["repo_private_path"] = "/opt/zds/data/contents-private" 155 ZDS_APP["content"]["repo_public_path"] = "/opt/zds/data/contents-public" 156 ZDS_APP["content"]["extra_content_generation_policy"] = "WATCHDOG" 157 158 ZDS_APP["visual_changes"] = zds_config.get("visual_changes", []) 159 160 ZDS_APP["very_top_banner"] = config.get("very_top_banner", False) 161 ``` Path: `zds/settings/abstract_base/requirements.py` Content: ``` 1 from .config import config 2 3 # best quality, 100 is the same but documentation says 4 # ' values up to 100 are allowed, but this is not recommended' 5 # so let's use 95 6 THUMBNAIL_QUALITY = 95 7 # Let's use the default value BUT if we want to let png in lossless format, we have tu use (png,) instead of None 8 THUMBNAIL_PRESERVE_EXTENSIONS = ("svg",) 9 10 11 social_auth_config = config.get("social_auth", {}) 12 13 SOCIAL_AUTH_RAISE_EXCEPTIONS = False 14 15 SOCIAL_AUTH_FACEBOOK_SCOPE = ["email"] 16 17 SOCIAL_AUTH_PIPELINE = ( 18 "social.pipeline.social_auth.social_details", 19 "social.pipeline.social_auth.social_uid", 20 "social.pipeline.social_auth.auth_allowed", 21 "social.pipeline.social_auth.social_user", 22 "social.pipeline.user.get_username", 23 "social.pipeline.social_auth.associate_by_email", 24 "social.pipeline.user.create_user", 25 "zds.member.models.save_profile", 26 "social.pipeline.social_auth.associate_user", 27 "social.pipeline.social_auth.load_extra_data", 28 "social.pipeline.user.user_details", 29 ) 30 31 # Before adding new providers such as Facebook and Google, 32 # you need to make sure they validate the user's email address on sign up! 33 # If they don't, a malicious person could take control of someone else account! 34 SOCIAL_AUTH_FACEBOOK_KEY = social_auth_config.get("facebook_key", "") 35 SOCIAL_AUTH_FACEBOOK_SECRET = social_auth_config.get("facebook_secret", "") 36 SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = social_auth_config.get( 37 "google_oauth2_key", 38 "696570367703-r6hc7mdd27t1sktdkivpnc5b25i0uip2.apps.googleusercontent.com", 39 ) 40 SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = social_auth_config.get( 41 "google_oauth2_secret", 42 "mApWNh3stCsYHwsGuWdbZWP8", 43 ) 44 45 SOCIAL_AUTH_SANITIZE_REDIRECTS = social_auth_config.get( 46 "sanitize_redirects", 47 False, 48 ) 49 50 51 recaptcha_config = config.get("recaptcha", {}) 52 53 USE_CAPTCHA = recaptcha_config.get("use_captcha", False) 54 # Seems to be used by `django-recaptcha` (what a poorly-namespaced 55 # setting!). 56 # Set to `True` to use the “No Captcha” engine instead of the old API. 57 NOCAPTCHA = True 58 RECAPTCHA_USE_SSL = True 59 RECAPTCHA_PUBLIC_KEY = recaptcha_config.get("public_key", "dummy") 60 RECAPTCHA_PRIVATE_KEY = recaptcha_config.get("private_key", "dummy") 61 62 63 OAUTH2_PROVIDER = {"OAUTH2_BACKEND_CLASS": "oauth2_provider.oauth2_backends.JSONOAuthLibCore"} 64 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/zds/settings/abstract_base/requirements.py b/zds/settings/abstract_base/requirements.py --- a/zds/settings/abstract_base/requirements.py +++ b/zds/settings/abstract_base/requirements.py @@ -15,17 +15,17 @@ SOCIAL_AUTH_FACEBOOK_SCOPE = ["email"] SOCIAL_AUTH_PIPELINE = ( - "social.pipeline.social_auth.social_details", - "social.pipeline.social_auth.social_uid", - "social.pipeline.social_auth.auth_allowed", - "social.pipeline.social_auth.social_user", - "social.pipeline.user.get_username", - "social.pipeline.social_auth.associate_by_email", - "social.pipeline.user.create_user", + "social_core.pipeline.social_auth.social_details", + "social_core.pipeline.social_auth.social_uid", + "social_core.pipeline.social_auth.auth_allowed", + "social_core.pipeline.social_auth.social_user", + "social_core.pipeline.user.get_username", + "social_core.pipeline.social_auth.associate_by_email", + "social_core.pipeline.user.create_user", "zds.member.models.save_profile", - "social.pipeline.social_auth.associate_user", - "social.pipeline.social_auth.load_extra_data", - "social.pipeline.user.user_details", + "social_core.pipeline.social_auth.associate_user", + "social_core.pipeline.social_auth.load_extra_data", + "social_core.pipeline.user.user_details", ) # Before adding new providers such as Facebook and Google, diff --git a/zds/settings/prod.py b/zds/settings/prod.py --- a/zds/settings/prod.py +++ b/zds/settings/prod.py @@ -122,23 +122,6 @@ } -# python-social-auth -# http://psa.matiasaguirre.net/docs/configuration/django.html -SOCIAL_AUTH_PIPELINE = ( - "social.pipeline.social_auth.social_details", - "social.pipeline.social_auth.social_uid", - "social.pipeline.social_auth.auth_allowed", - "social.pipeline.social_auth.social_user", - "social.pipeline.user.get_username", - "social.pipeline.social_auth.associate_by_email", - "social.pipeline.user.create_user", - "zds.member.models.save_profile", - "social.pipeline.social_auth.associate_user", - "social.pipeline.social_auth.load_extra_data", - "social.pipeline.user.user_details", -) - - ############################################################################### # ZESTE DE SAVOIR SETTINGS
{"golden_diff": "diff --git a/zds/settings/abstract_base/requirements.py b/zds/settings/abstract_base/requirements.py\n--- a/zds/settings/abstract_base/requirements.py\n+++ b/zds/settings/abstract_base/requirements.py\n@@ -15,17 +15,17 @@\n SOCIAL_AUTH_FACEBOOK_SCOPE = [\"email\"]\n \n SOCIAL_AUTH_PIPELINE = (\n- \"social.pipeline.social_auth.social_details\",\n- \"social.pipeline.social_auth.social_uid\",\n- \"social.pipeline.social_auth.auth_allowed\",\n- \"social.pipeline.social_auth.social_user\",\n- \"social.pipeline.user.get_username\",\n- \"social.pipeline.social_auth.associate_by_email\",\n- \"social.pipeline.user.create_user\",\n+ \"social_core.pipeline.social_auth.social_details\",\n+ \"social_core.pipeline.social_auth.social_uid\",\n+ \"social_core.pipeline.social_auth.auth_allowed\",\n+ \"social_core.pipeline.social_auth.social_user\",\n+ \"social_core.pipeline.user.get_username\",\n+ \"social_core.pipeline.social_auth.associate_by_email\",\n+ \"social_core.pipeline.user.create_user\",\n \"zds.member.models.save_profile\",\n- \"social.pipeline.social_auth.associate_user\",\n- \"social.pipeline.social_auth.load_extra_data\",\n- \"social.pipeline.user.user_details\",\n+ \"social_core.pipeline.social_auth.associate_user\",\n+ \"social_core.pipeline.social_auth.load_extra_data\",\n+ \"social_core.pipeline.user.user_details\",\n )\n \n # Before adding new providers such as Facebook and Google,\ndiff --git a/zds/settings/prod.py b/zds/settings/prod.py\n--- a/zds/settings/prod.py\n+++ b/zds/settings/prod.py\n@@ -122,23 +122,6 @@\n }\n \n \n-# python-social-auth\n-# http://psa.matiasaguirre.net/docs/configuration/django.html\n-SOCIAL_AUTH_PIPELINE = (\n- \"social.pipeline.social_auth.social_details\",\n- \"social.pipeline.social_auth.social_uid\",\n- \"social.pipeline.social_auth.auth_allowed\",\n- \"social.pipeline.social_auth.social_user\",\n- \"social.pipeline.user.get_username\",\n- \"social.pipeline.social_auth.associate_by_email\",\n- \"social.pipeline.user.create_user\",\n- \"zds.member.models.save_profile\",\n- \"social.pipeline.social_auth.associate_user\",\n- \"social.pipeline.social_auth.load_extra_data\",\n- \"social.pipeline.user.user_details\",\n-)\n-\n-\n ###############################################################################\n # ZESTE DE SAVOIR SETTINGS\n", "issue": "La connexion avec Google ne fonctionne pas sur le serveur de b\u00eata\nProbl\u00e8me rapport\u00e9 par Sentry.\r\n\r\nLa connexion avec Google ne fonctionne pas sur le serveur de b\u00eata, on a une erreur 500 \u00e0 cause d'une erreur dans le paquet social-auth-core. M\u00eame en testant avec la derni\u00e8re version de ce paquet le probl\u00e8me n'est pas corrig\u00e9.\r\n\r\nJ'ai ouvert un ticket chez social-auth-core : https://github.com/python-social-auth/social-core/issues/847\r\n\n", "before_files": [{"content": "from sentry_sdk.integrations.django import DjangoIntegration\nfrom sentry_sdk.integrations.logging import ignore_logger\nimport sentry_sdk\n\nfrom .abstract_base import *\n\n# For secrets, prefer `config[key]` over `config.get(key)` in this\n# file because we really want to raise an error if a secret is not\n# defined.\n\n\n###############################################################################\n# DJANGO SETTINGS\n\n\nDEBUG = False\n\nUSE_L10N = True\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.mysql\",\n \"NAME\": config[\"databases\"][\"default\"].get(\"name\", \"zdsdb\"),\n \"USER\": config[\"databases\"][\"default\"].get(\"user\", \"zds\"),\n \"PASSWORD\": config[\"databases\"][\"default\"][\"password\"],\n \"HOST\": \"localhost\",\n \"PORT\": \"\",\n \"CONN_MAX_AGE\": 600,\n \"OPTIONS\": {\n \"charset\": \"utf8mb4\",\n },\n },\n}\n\nALLOWED_HOSTS = [\n \"beta.zestedesavoir.com\",\n \"scaleway.zestedesavoir.com\",\n \"zdsappserver\",\n \"gandi.zestedesavoir.com\",\n \"gandi.zestedesavoir.com.\",\n \".zestedesavoir.com\",\n \".zestedesavoir.com.\",\n \"127.0.0.1\",\n \"localhost\",\n \"163.172.171.246\",\n]\n\nEMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\nEMAIL_USE_TLS = False\nEMAIL_HOST = \"localhost\"\nEMAIL_PORT = 25\n\nCACHES = {\n \"default\": {\n \"BACKEND\": \"django.core.cache.backends.memcached.PyMemcacheCache\",\n \"LOCATION\": \"127.0.0.1:11211\",\n }\n}\n\nSESSION_ENGINE = \"django.contrib.sessions.backends.cached_db\"\nSESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 4\n\nMEDIA_ROOT = Path(\"/opt/zds/data/media\")\n\nSTATIC_ROOT = Path(\"/opt/zds/data/static\")\nSTATICFILES_STORAGE = \"django.contrib.staticfiles.storage.ManifestStaticFilesStorage\"\n\ndjango_template_engine[\"APP_DIRS\"] = False\ndjango_template_engine[\"OPTIONS\"][\"loaders\"] = [\n (\n \"django.template.loaders.cached.Loader\",\n [\n \"django.template.loaders.filesystem.Loader\",\n \"django.template.loaders.app_directories.Loader\",\n ],\n ),\n]\n\n\ndef _get_version():\n from zds import __version__, git_version\n\n if git_version is None:\n return __version__\n else:\n return f\"{__version__}/{git_version[:7]}\"\n\n\nsentry_sdk.init(\n dsn=config[\"sentry\"][\"dsn\"],\n integrations=[DjangoIntegration()],\n # Set traces_sample_rate to 1.0 to capture 100%\n # of transactions for performance monitoring.\n # We recommend adjusting this value in production,\n traces_sample_rate=1.0,\n # If you wish to associate users to errors (assuming you are using\n # django.contrib.auth) you may enable sending PII data.\n send_default_pii=True,\n # By default the SDK will try to use the SENTRY_RELEASE\n # environment variable, or infer a git commit\n # SHA as release, however you may want to set\n # something more human-readable.\n release=_get_version().replace(\"/\", \"#\"),\n # /!\\ It cannot contain slashes\n environment=config[\"sentry\"][\"environment\"],\n)\n\n# Ignoring emarkdown logging because it is too noisy\nignore_logger(\"zds.utils.templatetags.emarkdown\")\n\n\n###############################################################################\n# REQUIREMENTS SETTINGS\n\n\n# easy-thumbnails\n# http://easy-thumbnails.readthedocs.io/en/2.1/ref/optimize/\nTHUMBNAIL_OPTIMIZE_COMMAND = {\n \"png\": \"/usr/bin/optipng {filename}\",\n \"gif\": \"/usr/bin/optipng {filename}\",\n \"jpeg\": \"/usr/bin/jpegoptim {filename}\",\n}\n\n\n# python-social-auth\n# http://psa.matiasaguirre.net/docs/configuration/django.html\nSOCIAL_AUTH_PIPELINE = (\n \"social.pipeline.social_auth.social_details\",\n \"social.pipeline.social_auth.social_uid\",\n \"social.pipeline.social_auth.auth_allowed\",\n \"social.pipeline.social_auth.social_user\",\n \"social.pipeline.user.get_username\",\n \"social.pipeline.social_auth.associate_by_email\",\n \"social.pipeline.user.create_user\",\n \"zds.member.models.save_profile\",\n \"social.pipeline.social_auth.associate_user\",\n \"social.pipeline.social_auth.load_extra_data\",\n \"social.pipeline.user.user_details\",\n)\n\n\n###############################################################################\n# ZESTE DE SAVOIR SETTINGS\n\n\nES_SEARCH_INDEX[\"shards\"] = config[\"elasticsearch\"].get(\"shards\", 3)\n\n\nZDS_APP[\"site\"][\"association\"][\"email\"] = \"[email protected]\"\n\n# content\n# ZDS_APP['content']['build_pdf_when_published'] = False\nZDS_APP[\"article\"][\"repo_path\"] = \"/opt/zds/data/articles-data\"\nZDS_APP[\"content\"][\"repo_private_path\"] = \"/opt/zds/data/contents-private\"\nZDS_APP[\"content\"][\"repo_public_path\"] = \"/opt/zds/data/contents-public\"\nZDS_APP[\"content\"][\"extra_content_generation_policy\"] = \"WATCHDOG\"\n\nZDS_APP[\"visual_changes\"] = zds_config.get(\"visual_changes\", [])\n\nZDS_APP[\"very_top_banner\"] = config.get(\"very_top_banner\", False)\n", "path": "zds/settings/prod.py"}, {"content": "from .config import config\n\n# best quality, 100 is the same but documentation says\n# ' values up to 100 are allowed, but this is not recommended'\n# so let's use 95\nTHUMBNAIL_QUALITY = 95\n# Let's use the default value BUT if we want to let png in lossless format, we have tu use (png,) instead of None\nTHUMBNAIL_PRESERVE_EXTENSIONS = (\"svg\",)\n\n\nsocial_auth_config = config.get(\"social_auth\", {})\n\nSOCIAL_AUTH_RAISE_EXCEPTIONS = False\n\nSOCIAL_AUTH_FACEBOOK_SCOPE = [\"email\"]\n\nSOCIAL_AUTH_PIPELINE = (\n \"social.pipeline.social_auth.social_details\",\n \"social.pipeline.social_auth.social_uid\",\n \"social.pipeline.social_auth.auth_allowed\",\n \"social.pipeline.social_auth.social_user\",\n \"social.pipeline.user.get_username\",\n \"social.pipeline.social_auth.associate_by_email\",\n \"social.pipeline.user.create_user\",\n \"zds.member.models.save_profile\",\n \"social.pipeline.social_auth.associate_user\",\n \"social.pipeline.social_auth.load_extra_data\",\n \"social.pipeline.user.user_details\",\n)\n\n# Before adding new providers such as Facebook and Google,\n# you need to make sure they validate the user's email address on sign up!\n# If they don't, a malicious person could take control of someone else account!\nSOCIAL_AUTH_FACEBOOK_KEY = social_auth_config.get(\"facebook_key\", \"\")\nSOCIAL_AUTH_FACEBOOK_SECRET = social_auth_config.get(\"facebook_secret\", \"\")\nSOCIAL_AUTH_GOOGLE_OAUTH2_KEY = social_auth_config.get(\n \"google_oauth2_key\",\n \"696570367703-r6hc7mdd27t1sktdkivpnc5b25i0uip2.apps.googleusercontent.com\",\n)\nSOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = social_auth_config.get(\n \"google_oauth2_secret\",\n \"mApWNh3stCsYHwsGuWdbZWP8\",\n)\n\nSOCIAL_AUTH_SANITIZE_REDIRECTS = social_auth_config.get(\n \"sanitize_redirects\",\n False,\n)\n\n\nrecaptcha_config = config.get(\"recaptcha\", {})\n\nUSE_CAPTCHA = recaptcha_config.get(\"use_captcha\", False)\n# Seems to be used by `django-recaptcha` (what a poorly-namespaced\n# setting!).\n# Set to `True` to use the \u201cNo Captcha\u201d engine instead of the old API.\nNOCAPTCHA = True\nRECAPTCHA_USE_SSL = True\nRECAPTCHA_PUBLIC_KEY = recaptcha_config.get(\"public_key\", \"dummy\")\nRECAPTCHA_PRIVATE_KEY = recaptcha_config.get(\"private_key\", \"dummy\")\n\n\nOAUTH2_PROVIDER = {\"OAUTH2_BACKEND_CLASS\": \"oauth2_provider.oauth2_backends.JSONOAuthLibCore\"}\n", "path": "zds/settings/abstract_base/requirements.py"}], "after_files": [{"content": "from sentry_sdk.integrations.django import DjangoIntegration\nfrom sentry_sdk.integrations.logging import ignore_logger\nimport sentry_sdk\n\nfrom .abstract_base import *\n\n# For secrets, prefer `config[key]` over `config.get(key)` in this\n# file because we really want to raise an error if a secret is not\n# defined.\n\n\n###############################################################################\n# DJANGO SETTINGS\n\n\nDEBUG = False\n\nUSE_L10N = True\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.mysql\",\n \"NAME\": config[\"databases\"][\"default\"].get(\"name\", \"zdsdb\"),\n \"USER\": config[\"databases\"][\"default\"].get(\"user\", \"zds\"),\n \"PASSWORD\": config[\"databases\"][\"default\"][\"password\"],\n \"HOST\": \"localhost\",\n \"PORT\": \"\",\n \"CONN_MAX_AGE\": 600,\n \"OPTIONS\": {\n \"charset\": \"utf8mb4\",\n },\n },\n}\n\nALLOWED_HOSTS = [\n \"beta.zestedesavoir.com\",\n \"scaleway.zestedesavoir.com\",\n \"zdsappserver\",\n \"gandi.zestedesavoir.com\",\n \"gandi.zestedesavoir.com.\",\n \".zestedesavoir.com\",\n \".zestedesavoir.com.\",\n \"127.0.0.1\",\n \"localhost\",\n \"163.172.171.246\",\n]\n\nEMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\nEMAIL_USE_TLS = False\nEMAIL_HOST = \"localhost\"\nEMAIL_PORT = 25\n\nCACHES = {\n \"default\": {\n \"BACKEND\": \"django.core.cache.backends.memcached.PyMemcacheCache\",\n \"LOCATION\": \"127.0.0.1:11211\",\n }\n}\n\nSESSION_ENGINE = \"django.contrib.sessions.backends.cached_db\"\nSESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 4\n\nMEDIA_ROOT = Path(\"/opt/zds/data/media\")\n\nSTATIC_ROOT = Path(\"/opt/zds/data/static\")\nSTATICFILES_STORAGE = \"django.contrib.staticfiles.storage.ManifestStaticFilesStorage\"\n\ndjango_template_engine[\"APP_DIRS\"] = False\ndjango_template_engine[\"OPTIONS\"][\"loaders\"] = [\n (\n \"django.template.loaders.cached.Loader\",\n [\n \"django.template.loaders.filesystem.Loader\",\n \"django.template.loaders.app_directories.Loader\",\n ],\n ),\n]\n\n\ndef _get_version():\n from zds import __version__, git_version\n\n if git_version is None:\n return __version__\n else:\n return f\"{__version__}/{git_version[:7]}\"\n\n\nsentry_sdk.init(\n dsn=config[\"sentry\"][\"dsn\"],\n integrations=[DjangoIntegration()],\n # Set traces_sample_rate to 1.0 to capture 100%\n # of transactions for performance monitoring.\n # We recommend adjusting this value in production,\n traces_sample_rate=1.0,\n # If you wish to associate users to errors (assuming you are using\n # django.contrib.auth) you may enable sending PII data.\n send_default_pii=True,\n # By default the SDK will try to use the SENTRY_RELEASE\n # environment variable, or infer a git commit\n # SHA as release, however you may want to set\n # something more human-readable.\n release=_get_version().replace(\"/\", \"#\"),\n # /!\\ It cannot contain slashes\n environment=config[\"sentry\"][\"environment\"],\n)\n\n# Ignoring emarkdown logging because it is too noisy\nignore_logger(\"zds.utils.templatetags.emarkdown\")\n\n\n###############################################################################\n# REQUIREMENTS SETTINGS\n\n\n# easy-thumbnails\n# http://easy-thumbnails.readthedocs.io/en/2.1/ref/optimize/\nTHUMBNAIL_OPTIMIZE_COMMAND = {\n \"png\": \"/usr/bin/optipng {filename}\",\n \"gif\": \"/usr/bin/optipng {filename}\",\n \"jpeg\": \"/usr/bin/jpegoptim {filename}\",\n}\n\n\n###############################################################################\n# ZESTE DE SAVOIR SETTINGS\n\n\nES_SEARCH_INDEX[\"shards\"] = config[\"elasticsearch\"].get(\"shards\", 3)\n\n\nZDS_APP[\"site\"][\"association\"][\"email\"] = \"[email protected]\"\n\n# content\n# ZDS_APP['content']['build_pdf_when_published'] = False\nZDS_APP[\"article\"][\"repo_path\"] = \"/opt/zds/data/articles-data\"\nZDS_APP[\"content\"][\"repo_private_path\"] = \"/opt/zds/data/contents-private\"\nZDS_APP[\"content\"][\"repo_public_path\"] = \"/opt/zds/data/contents-public\"\nZDS_APP[\"content\"][\"extra_content_generation_policy\"] = \"WATCHDOG\"\n\nZDS_APP[\"visual_changes\"] = zds_config.get(\"visual_changes\", [])\n\nZDS_APP[\"very_top_banner\"] = config.get(\"very_top_banner\", False)\n", "path": "zds/settings/prod.py"}, {"content": "from .config import config\n\n# best quality, 100 is the same but documentation says\n# ' values up to 100 are allowed, but this is not recommended'\n# so let's use 95\nTHUMBNAIL_QUALITY = 95\n# Let's use the default value BUT if we want to let png in lossless format, we have tu use (png,) instead of None\nTHUMBNAIL_PRESERVE_EXTENSIONS = (\"svg\",)\n\n\nsocial_auth_config = config.get(\"social_auth\", {})\n\nSOCIAL_AUTH_RAISE_EXCEPTIONS = False\n\nSOCIAL_AUTH_FACEBOOK_SCOPE = [\"email\"]\n\nSOCIAL_AUTH_PIPELINE = (\n \"social_core.pipeline.social_auth.social_details\",\n \"social_core.pipeline.social_auth.social_uid\",\n \"social_core.pipeline.social_auth.auth_allowed\",\n \"social_core.pipeline.social_auth.social_user\",\n \"social_core.pipeline.user.get_username\",\n \"social_core.pipeline.social_auth.associate_by_email\",\n \"social_core.pipeline.user.create_user\",\n \"zds.member.models.save_profile\",\n \"social_core.pipeline.social_auth.associate_user\",\n \"social_core.pipeline.social_auth.load_extra_data\",\n \"social_core.pipeline.user.user_details\",\n)\n\n# Before adding new providers such as Facebook and Google,\n# you need to make sure they validate the user's email address on sign up!\n# If they don't, a malicious person could take control of someone else account!\nSOCIAL_AUTH_FACEBOOK_KEY = social_auth_config.get(\"facebook_key\", \"\")\nSOCIAL_AUTH_FACEBOOK_SECRET = social_auth_config.get(\"facebook_secret\", \"\")\nSOCIAL_AUTH_GOOGLE_OAUTH2_KEY = social_auth_config.get(\n \"google_oauth2_key\",\n \"696570367703-r6hc7mdd27t1sktdkivpnc5b25i0uip2.apps.googleusercontent.com\",\n)\nSOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = social_auth_config.get(\n \"google_oauth2_secret\",\n \"mApWNh3stCsYHwsGuWdbZWP8\",\n)\n\nSOCIAL_AUTH_SANITIZE_REDIRECTS = social_auth_config.get(\n \"sanitize_redirects\",\n False,\n)\n\n\nrecaptcha_config = config.get(\"recaptcha\", {})\n\nUSE_CAPTCHA = recaptcha_config.get(\"use_captcha\", False)\n# Seems to be used by `django-recaptcha` (what a poorly-namespaced\n# setting!).\n# Set to `True` to use the \u201cNo Captcha\u201d engine instead of the old API.\nNOCAPTCHA = True\nRECAPTCHA_USE_SSL = True\nRECAPTCHA_PUBLIC_KEY = recaptcha_config.get(\"public_key\", \"dummy\")\nRECAPTCHA_PRIVATE_KEY = recaptcha_config.get(\"private_key\", \"dummy\")\n\n\nOAUTH2_PROVIDER = {\"OAUTH2_BACKEND_CLASS\": \"oauth2_provider.oauth2_backends.JSONOAuthLibCore\"}\n", "path": "zds/settings/abstract_base/requirements.py"}]}
2,689
523
gh_patches_debug_33506
rasdani/github-patches
git_diff
optuna__optuna-1285
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `experimental` decorator breaks class documentation. The `experimental` decorator used on classes break documentation. This could be one manifestation but there is an issue with how the documentation including type hints are propagated to the decorated class. This does not apply for free functions. See https://github.com/optuna/optuna/pull/1265#issuecomment-633195955 for how it may break. ## Expected behavior Class documentation should not be altered by applying the experimental decorator. ## Steps to reproduce 1. Apply the experimental decorator to a class. 1. Build the document (`cd docs && make html`) 1. Open the rendered documentation and note that the class signatures is broken. ## Additional context (optional) - An issue regarding the indentation https://github.com/optuna/optuna/issues/1213. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `optuna/_experimental.py` Content: ``` 1 import functools 2 import inspect 3 from typing import Any 4 from typing import Callable 5 import warnings 6 7 from optuna.exceptions import ExperimentalWarning 8 9 10 # White spaces of each line are necessary to beautifully rendered documentation. 11 # NOTE(crcrpar): When `experimental` decorator is applied to member methods, these lines require 12 # another four spaces. 13 _EXPERIMENTAL_DOCSTRING_TEMPLATE = """ 14 15 .. note:: 16 Added in v{ver} as an experimental feature. The interface may change in newer versions 17 without prior notice. See https://github.com/optuna/optuna/releases/tag/v{ver}. 18 """ 19 20 21 def _make_func_spec_str(func: Callable[..., Any]) -> str: 22 23 name = func.__name__ 24 argspec = inspect.getfullargspec(func) 25 26 n_defaults = len(argspec.defaults) if argspec.defaults is not None else 0 27 offset = int(len(argspec.args) > 0 and argspec.args[0] == "self") 28 29 if n_defaults > 0: 30 args = ", ".join(argspec.args[offset:-n_defaults]) 31 with_default_values = ", ".join( 32 [ 33 "{}={}".format(a, d) 34 for a, d in zip(argspec.args[-n_defaults:], argspec.defaults) # type: ignore 35 ] 36 ) 37 else: 38 args = ", ".join(argspec.args[offset:]) 39 with_default_values = "" 40 41 if len(args) > 0 and len(with_default_values) > 0: 42 args += ", " 43 44 # NOTE(crcrpar): The four spaces are necessary to correctly render documentation. 45 # Different classes or methods require more spaces. 46 str_args_description = "(" + args + with_default_values + ")\n\n " 47 return name + str_args_description 48 49 50 def _validate_version(version: str) -> None: 51 52 if not isinstance(version, str) or len(version.split(".")) != 3: 53 raise ValueError( 54 "Invalid version specification. Must follow `x.y.z` format but `{}` is given".format( 55 version 56 ) 57 ) 58 59 60 def experimental(version: str, name: str = None) -> Any: 61 """Decorate class or function as experimental. 62 63 Args: 64 version: The first version that supports the target feature. 65 name: The name of the feature. Defaults to the function or class name. Optional. 66 """ 67 68 _validate_version(version) 69 70 def _experimental_wrapper(f: Any) -> Any: 71 # f is either func or class. 72 73 def _experimental_func(func: Callable[[Any], Any]) -> Callable[[Any], Any]: 74 75 docstring = _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version) 76 if func.__doc__ is None: 77 func.__doc__ = "" 78 func.__doc__ += docstring 79 80 # TODO(crcrpar): Annotate this correctly. 81 @functools.wraps(func) 82 def new_func(*args: Any, **kwargs: Any) -> Any: 83 """Wrapped function.""" 84 85 warnings.warn( 86 "{} is experimental (supported from v{}). " 87 "The interface can change in the future.".format( 88 name if name is not None else func.__name__, version 89 ), 90 ExperimentalWarning, 91 ) 92 93 return func(*args, **kwargs) # type: ignore 94 95 return new_func 96 97 def _experimental_class(cls: Any) -> Any: 98 """Decorates a class as experimental. 99 100 This decorator is supposed to be applied to the experimental class. 101 """ 102 103 _original_init = cls.__init__ 104 105 def wrapped_init(self, *args, **kwargs) -> None: # type: ignore 106 warnings.warn( 107 "{} is experimental (supported from v{}). " 108 "The interface can change in the future.".format( 109 name if name is not None else cls.__name__, version 110 ), 111 ExperimentalWarning, 112 ) 113 114 _original_init(self, *args, **kwargs) 115 116 cls.__init__ = wrapped_init 117 118 if cls.__doc__ is None: 119 cls.__doc__ = "" 120 cls.__doc__ = ( 121 _make_func_spec_str(_original_init) 122 + cls.__doc__ 123 + _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version) 124 ) 125 return cls 126 127 return _experimental_class(f) if inspect.isclass(f) else _experimental_func(f) 128 129 return _experimental_wrapper 130 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/optuna/_experimental.py b/optuna/_experimental.py --- a/optuna/_experimental.py +++ b/optuna/_experimental.py @@ -18,35 +18,6 @@ """ -def _make_func_spec_str(func: Callable[..., Any]) -> str: - - name = func.__name__ - argspec = inspect.getfullargspec(func) - - n_defaults = len(argspec.defaults) if argspec.defaults is not None else 0 - offset = int(len(argspec.args) > 0 and argspec.args[0] == "self") - - if n_defaults > 0: - args = ", ".join(argspec.args[offset:-n_defaults]) - with_default_values = ", ".join( - [ - "{}={}".format(a, d) - for a, d in zip(argspec.args[-n_defaults:], argspec.defaults) # type: ignore - ] - ) - else: - args = ", ".join(argspec.args[offset:]) - with_default_values = "" - - if len(args) > 0 and len(with_default_values) > 0: - args += ", " - - # NOTE(crcrpar): The four spaces are necessary to correctly render documentation. - # Different classes or methods require more spaces. - str_args_description = "(" + args + with_default_values + ")\n\n " - return name + str_args_description - - def _validate_version(version: str) -> None: if not isinstance(version, str) or len(version.split(".")) != 3: @@ -102,6 +73,7 @@ _original_init = cls.__init__ + @functools.wraps(_original_init) def wrapped_init(self, *args, **kwargs) -> None: # type: ignore warnings.warn( "{} is experimental (supported from v{}). " @@ -117,11 +89,7 @@ if cls.__doc__ is None: cls.__doc__ = "" - cls.__doc__ = ( - _make_func_spec_str(_original_init) - + cls.__doc__ - + _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version) - ) + cls.__doc__ += _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version) return cls return _experimental_class(f) if inspect.isclass(f) else _experimental_func(f)
{"golden_diff": "diff --git a/optuna/_experimental.py b/optuna/_experimental.py\n--- a/optuna/_experimental.py\n+++ b/optuna/_experimental.py\n@@ -18,35 +18,6 @@\n \"\"\"\n \n \n-def _make_func_spec_str(func: Callable[..., Any]) -> str:\n-\n- name = func.__name__\n- argspec = inspect.getfullargspec(func)\n-\n- n_defaults = len(argspec.defaults) if argspec.defaults is not None else 0\n- offset = int(len(argspec.args) > 0 and argspec.args[0] == \"self\")\n-\n- if n_defaults > 0:\n- args = \", \".join(argspec.args[offset:-n_defaults])\n- with_default_values = \", \".join(\n- [\n- \"{}={}\".format(a, d)\n- for a, d in zip(argspec.args[-n_defaults:], argspec.defaults) # type: ignore\n- ]\n- )\n- else:\n- args = \", \".join(argspec.args[offset:])\n- with_default_values = \"\"\n-\n- if len(args) > 0 and len(with_default_values) > 0:\n- args += \", \"\n-\n- # NOTE(crcrpar): The four spaces are necessary to correctly render documentation.\n- # Different classes or methods require more spaces.\n- str_args_description = \"(\" + args + with_default_values + \")\\n\\n \"\n- return name + str_args_description\n-\n-\n def _validate_version(version: str) -> None:\n \n if not isinstance(version, str) or len(version.split(\".\")) != 3:\n@@ -102,6 +73,7 @@\n \n _original_init = cls.__init__\n \n+ @functools.wraps(_original_init)\n def wrapped_init(self, *args, **kwargs) -> None: # type: ignore\n warnings.warn(\n \"{} is experimental (supported from v{}). \"\n@@ -117,11 +89,7 @@\n \n if cls.__doc__ is None:\n cls.__doc__ = \"\"\n- cls.__doc__ = (\n- _make_func_spec_str(_original_init)\n- + cls.__doc__\n- + _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version)\n- )\n+ cls.__doc__ += _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version)\n return cls\n \n return _experimental_class(f) if inspect.isclass(f) else _experimental_func(f)\n", "issue": "`experimental` decorator breaks class documentation.\nThe `experimental` decorator used on classes break documentation. This could be one manifestation but there is an issue with how the documentation including type hints are propagated to the decorated class. This does not apply for free functions.\r\n\r\nSee https://github.com/optuna/optuna/pull/1265#issuecomment-633195955 for how it may break.\r\n\r\n## Expected behavior\r\n\r\nClass documentation should not be altered by applying the experimental decorator.\r\n\r\n## Steps to reproduce\r\n\r\n1. Apply the experimental decorator to a class.\r\n1. Build the document (`cd docs && make html`) \r\n1. Open the rendered documentation and note that the class signatures is broken.\r\n\r\n## Additional context (optional)\r\n\r\n- An issue regarding the indentation https://github.com/optuna/optuna/issues/1213.\r\n\n", "before_files": [{"content": "import functools\nimport inspect\nfrom typing import Any\nfrom typing import Callable\nimport warnings\n\nfrom optuna.exceptions import ExperimentalWarning\n\n\n# White spaces of each line are necessary to beautifully rendered documentation.\n# NOTE(crcrpar): When `experimental` decorator is applied to member methods, these lines require\n# another four spaces.\n_EXPERIMENTAL_DOCSTRING_TEMPLATE = \"\"\"\n\n .. note::\n Added in v{ver} as an experimental feature. The interface may change in newer versions\n without prior notice. See https://github.com/optuna/optuna/releases/tag/v{ver}.\n\"\"\"\n\n\ndef _make_func_spec_str(func: Callable[..., Any]) -> str:\n\n name = func.__name__\n argspec = inspect.getfullargspec(func)\n\n n_defaults = len(argspec.defaults) if argspec.defaults is not None else 0\n offset = int(len(argspec.args) > 0 and argspec.args[0] == \"self\")\n\n if n_defaults > 0:\n args = \", \".join(argspec.args[offset:-n_defaults])\n with_default_values = \", \".join(\n [\n \"{}={}\".format(a, d)\n for a, d in zip(argspec.args[-n_defaults:], argspec.defaults) # type: ignore\n ]\n )\n else:\n args = \", \".join(argspec.args[offset:])\n with_default_values = \"\"\n\n if len(args) > 0 and len(with_default_values) > 0:\n args += \", \"\n\n # NOTE(crcrpar): The four spaces are necessary to correctly render documentation.\n # Different classes or methods require more spaces.\n str_args_description = \"(\" + args + with_default_values + \")\\n\\n \"\n return name + str_args_description\n\n\ndef _validate_version(version: str) -> None:\n\n if not isinstance(version, str) or len(version.split(\".\")) != 3:\n raise ValueError(\n \"Invalid version specification. Must follow `x.y.z` format but `{}` is given\".format(\n version\n )\n )\n\n\ndef experimental(version: str, name: str = None) -> Any:\n \"\"\"Decorate class or function as experimental.\n\n Args:\n version: The first version that supports the target feature.\n name: The name of the feature. Defaults to the function or class name. Optional.\n \"\"\"\n\n _validate_version(version)\n\n def _experimental_wrapper(f: Any) -> Any:\n # f is either func or class.\n\n def _experimental_func(func: Callable[[Any], Any]) -> Callable[[Any], Any]:\n\n docstring = _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version)\n if func.__doc__ is None:\n func.__doc__ = \"\"\n func.__doc__ += docstring\n\n # TODO(crcrpar): Annotate this correctly.\n @functools.wraps(func)\n def new_func(*args: Any, **kwargs: Any) -> Any:\n \"\"\"Wrapped function.\"\"\"\n\n warnings.warn(\n \"{} is experimental (supported from v{}). \"\n \"The interface can change in the future.\".format(\n name if name is not None else func.__name__, version\n ),\n ExperimentalWarning,\n )\n\n return func(*args, **kwargs) # type: ignore\n\n return new_func\n\n def _experimental_class(cls: Any) -> Any:\n \"\"\"Decorates a class as experimental.\n\n This decorator is supposed to be applied to the experimental class.\n \"\"\"\n\n _original_init = cls.__init__\n\n def wrapped_init(self, *args, **kwargs) -> None: # type: ignore\n warnings.warn(\n \"{} is experimental (supported from v{}). \"\n \"The interface can change in the future.\".format(\n name if name is not None else cls.__name__, version\n ),\n ExperimentalWarning,\n )\n\n _original_init(self, *args, **kwargs)\n\n cls.__init__ = wrapped_init\n\n if cls.__doc__ is None:\n cls.__doc__ = \"\"\n cls.__doc__ = (\n _make_func_spec_str(_original_init)\n + cls.__doc__\n + _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version)\n )\n return cls\n\n return _experimental_class(f) if inspect.isclass(f) else _experimental_func(f)\n\n return _experimental_wrapper\n", "path": "optuna/_experimental.py"}], "after_files": [{"content": "import functools\nimport inspect\nfrom typing import Any\nfrom typing import Callable\nimport warnings\n\nfrom optuna.exceptions import ExperimentalWarning\n\n\n# White spaces of each line are necessary to beautifully rendered documentation.\n# NOTE(crcrpar): When `experimental` decorator is applied to member methods, these lines require\n# another four spaces.\n_EXPERIMENTAL_DOCSTRING_TEMPLATE = \"\"\"\n\n .. note::\n Added in v{ver} as an experimental feature. The interface may change in newer versions\n without prior notice. See https://github.com/optuna/optuna/releases/tag/v{ver}.\n\"\"\"\n\n\ndef _validate_version(version: str) -> None:\n\n if not isinstance(version, str) or len(version.split(\".\")) != 3:\n raise ValueError(\n \"Invalid version specification. Must follow `x.y.z` format but `{}` is given\".format(\n version\n )\n )\n\n\ndef experimental(version: str, name: str = None) -> Any:\n \"\"\"Decorate class or function as experimental.\n\n Args:\n version: The first version that supports the target feature.\n name: The name of the feature. Defaults to the function or class name. Optional.\n \"\"\"\n\n _validate_version(version)\n\n def _experimental_wrapper(f: Any) -> Any:\n # f is either func or class.\n\n def _experimental_func(func: Callable[[Any], Any]) -> Callable[[Any], Any]:\n\n docstring = _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version)\n if func.__doc__ is None:\n func.__doc__ = \"\"\n func.__doc__ += docstring\n\n # TODO(crcrpar): Annotate this correctly.\n @functools.wraps(func)\n def new_func(*args: Any, **kwargs: Any) -> Any:\n \"\"\"Wrapped function.\"\"\"\n\n warnings.warn(\n \"{} is experimental (supported from v{}). \"\n \"The interface can change in the future.\".format(\n name if name is not None else func.__name__, version\n ),\n ExperimentalWarning,\n )\n\n return func(*args, **kwargs) # type: ignore\n\n return new_func\n\n def _experimental_class(cls: Any) -> Any:\n \"\"\"Decorates a class as experimental.\n\n This decorator is supposed to be applied to the experimental class.\n \"\"\"\n\n _original_init = cls.__init__\n\n @functools.wraps(_original_init)\n def wrapped_init(self, *args, **kwargs) -> None: # type: ignore\n warnings.warn(\n \"{} is experimental (supported from v{}). \"\n \"The interface can change in the future.\".format(\n name if name is not None else cls.__name__, version\n ),\n ExperimentalWarning,\n )\n\n _original_init(self, *args, **kwargs)\n\n cls.__init__ = wrapped_init\n\n if cls.__doc__ is None:\n cls.__doc__ = \"\"\n cls.__doc__ += _EXPERIMENTAL_DOCSTRING_TEMPLATE.format(ver=version)\n return cls\n\n return _experimental_class(f) if inspect.isclass(f) else _experimental_func(f)\n\n return _experimental_wrapper\n", "path": "optuna/_experimental.py"}]}
1,662
540
gh_patches_debug_25202
rasdani/github-patches
git_diff
opsdroid__opsdroid-12
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Connectors should fork When a connector is started it should fork into its own process. This is because connectors block to accept messages from their source. This requires #5 to enable persistent memory between connector processes. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `opsdroid/core.py` Content: ``` 1 """Core components of OpsDroid.""" 2 3 import logging 4 import sys 5 import weakref 6 from opsdroid.helper import match 7 from opsdroid.memory import Memory 8 9 10 class OpsDroid(): 11 """Root object for opsdroid.""" 12 13 instances = [] 14 15 def __init__(self): 16 """Start opsdroid.""" 17 self.bot_name = 'opsdroid' 18 self.sys_status = 0 19 self.connectors = [] 20 self.skills = [] 21 self.memory = Memory() 22 logging.info("Created main opsdroid object") 23 24 def __enter__(self): 25 """Add self to existing instances.""" 26 if len(self.__class__.instances) == 0: 27 self.__class__.instances.append(weakref.proxy(self)) 28 else: 29 self.critical("opsdroid has already been started", 1) 30 return self 31 32 def __exit__(self, exc_type, exc_value, traceback): 33 """Remove self from existing instances.""" 34 self.__class__.instances = [] 35 36 def exit(self): 37 """Exit application.""" 38 logging.info("Exiting application with return code " + 39 str(self.sys_status)) 40 sys.exit(self.sys_status) 41 42 def critical(self, error, code): 43 """Exit due to unrecoverable error.""" 44 self.sys_status = code 45 logging.critical(error) 46 print("Error: " + error) 47 self.exit() 48 49 def start_connectors(self, connectors): 50 """Start the connectors.""" 51 if len(connectors) == 0: 52 self.critical("All connectors failed to load", 1) 53 for connector_module in connectors: 54 for name, cls in connector_module["module"].__dict__.items(): 55 if isinstance(cls, type) and "Connector" in name: 56 connector_module["config"]["bot-name"] = self.bot_name 57 connector = cls(connector_module["config"]) 58 self.connectors.append(connector) 59 connector.connect(self) 60 61 def start_databases(self, databases): 62 """Start the databases.""" 63 if len(databases) == 0: 64 logging.warning("All databases failed to load") 65 for database_module in databases: 66 for name, cls in database_module["module"].__dict__.items(): 67 if isinstance(cls, type) and "Database" in name: 68 logging.debug("Adding database: " + name) 69 database = cls(database_module["config"]) 70 self.memory.databases.append(database) 71 database.connect() 72 73 def load_regex_skill(self, regex, skill): 74 """Load skills.""" 75 self.skills.append({"regex": regex, "skill": skill}) 76 77 def parse(self, message): 78 """Parse a string against all skills.""" 79 if message.text.strip() != "": 80 logging.debug("Parsing input: " + message.text) 81 for skill in self.skills: 82 if "regex" in skill: 83 regex = match(skill["regex"], message.text) 84 if regex: 85 message.regex = regex 86 skill["skill"](self, message) 87 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/opsdroid/core.py b/opsdroid/core.py --- a/opsdroid/core.py +++ b/opsdroid/core.py @@ -3,6 +3,7 @@ import logging import sys import weakref +from multiprocessing import Process from opsdroid.helper import match from opsdroid.memory import Memory @@ -17,6 +18,7 @@ self.bot_name = 'opsdroid' self.sys_status = 0 self.connectors = [] + self.connector_jobs = [] self.skills = [] self.memory = Memory() logging.info("Created main opsdroid object") @@ -56,7 +58,11 @@ connector_module["config"]["bot-name"] = self.bot_name connector = cls(connector_module["config"]) self.connectors.append(connector) - connector.connect(self) + job = Process(target=connector.connect, args=(self,)) + job.start() + self.connector_jobs.append(job) + for job in self.connector_jobs: + job.join() def start_databases(self, databases): """Start the databases."""
{"golden_diff": "diff --git a/opsdroid/core.py b/opsdroid/core.py\n--- a/opsdroid/core.py\n+++ b/opsdroid/core.py\n@@ -3,6 +3,7 @@\n import logging\n import sys\n import weakref\n+from multiprocessing import Process\n from opsdroid.helper import match\n from opsdroid.memory import Memory\n \n@@ -17,6 +18,7 @@\n self.bot_name = 'opsdroid'\n self.sys_status = 0\n self.connectors = []\n+ self.connector_jobs = []\n self.skills = []\n self.memory = Memory()\n logging.info(\"Created main opsdroid object\")\n@@ -56,7 +58,11 @@\n connector_module[\"config\"][\"bot-name\"] = self.bot_name\n connector = cls(connector_module[\"config\"])\n self.connectors.append(connector)\n- connector.connect(self)\n+ job = Process(target=connector.connect, args=(self,))\n+ job.start()\n+ self.connector_jobs.append(job)\n+ for job in self.connector_jobs:\n+ job.join()\n \n def start_databases(self, databases):\n \"\"\"Start the databases.\"\"\"\n", "issue": "Connectors should fork\nWhen a connector is started it should fork into its own process. This is because connectors block to accept messages from their source.\n\nThis requires #5 to enable persistent memory between connector processes.\n\n", "before_files": [{"content": "\"\"\"Core components of OpsDroid.\"\"\"\n\nimport logging\nimport sys\nimport weakref\nfrom opsdroid.helper import match\nfrom opsdroid.memory import Memory\n\n\nclass OpsDroid():\n \"\"\"Root object for opsdroid.\"\"\"\n\n instances = []\n\n def __init__(self):\n \"\"\"Start opsdroid.\"\"\"\n self.bot_name = 'opsdroid'\n self.sys_status = 0\n self.connectors = []\n self.skills = []\n self.memory = Memory()\n logging.info(\"Created main opsdroid object\")\n\n def __enter__(self):\n \"\"\"Add self to existing instances.\"\"\"\n if len(self.__class__.instances) == 0:\n self.__class__.instances.append(weakref.proxy(self))\n else:\n self.critical(\"opsdroid has already been started\", 1)\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n \"\"\"Remove self from existing instances.\"\"\"\n self.__class__.instances = []\n\n def exit(self):\n \"\"\"Exit application.\"\"\"\n logging.info(\"Exiting application with return code \" +\n str(self.sys_status))\n sys.exit(self.sys_status)\n\n def critical(self, error, code):\n \"\"\"Exit due to unrecoverable error.\"\"\"\n self.sys_status = code\n logging.critical(error)\n print(\"Error: \" + error)\n self.exit()\n\n def start_connectors(self, connectors):\n \"\"\"Start the connectors.\"\"\"\n if len(connectors) == 0:\n self.critical(\"All connectors failed to load\", 1)\n for connector_module in connectors:\n for name, cls in connector_module[\"module\"].__dict__.items():\n if isinstance(cls, type) and \"Connector\" in name:\n connector_module[\"config\"][\"bot-name\"] = self.bot_name\n connector = cls(connector_module[\"config\"])\n self.connectors.append(connector)\n connector.connect(self)\n\n def start_databases(self, databases):\n \"\"\"Start the databases.\"\"\"\n if len(databases) == 0:\n logging.warning(\"All databases failed to load\")\n for database_module in databases:\n for name, cls in database_module[\"module\"].__dict__.items():\n if isinstance(cls, type) and \"Database\" in name:\n logging.debug(\"Adding database: \" + name)\n database = cls(database_module[\"config\"])\n self.memory.databases.append(database)\n database.connect()\n\n def load_regex_skill(self, regex, skill):\n \"\"\"Load skills.\"\"\"\n self.skills.append({\"regex\": regex, \"skill\": skill})\n\n def parse(self, message):\n \"\"\"Parse a string against all skills.\"\"\"\n if message.text.strip() != \"\":\n logging.debug(\"Parsing input: \" + message.text)\n for skill in self.skills:\n if \"regex\" in skill:\n regex = match(skill[\"regex\"], message.text)\n if regex:\n message.regex = regex\n skill[\"skill\"](self, message)\n", "path": "opsdroid/core.py"}], "after_files": [{"content": "\"\"\"Core components of OpsDroid.\"\"\"\n\nimport logging\nimport sys\nimport weakref\nfrom multiprocessing import Process\nfrom opsdroid.helper import match\nfrom opsdroid.memory import Memory\n\n\nclass OpsDroid():\n \"\"\"Root object for opsdroid.\"\"\"\n\n instances = []\n\n def __init__(self):\n \"\"\"Start opsdroid.\"\"\"\n self.bot_name = 'opsdroid'\n self.sys_status = 0\n self.connectors = []\n self.connector_jobs = []\n self.skills = []\n self.memory = Memory()\n logging.info(\"Created main opsdroid object\")\n\n def __enter__(self):\n \"\"\"Add self to existing instances.\"\"\"\n if len(self.__class__.instances) == 0:\n self.__class__.instances.append(weakref.proxy(self))\n else:\n self.critical(\"opsdroid has already been started\", 1)\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n \"\"\"Remove self from existing instances.\"\"\"\n self.__class__.instances = []\n\n def exit(self):\n \"\"\"Exit application.\"\"\"\n logging.info(\"Exiting application with return code \" +\n str(self.sys_status))\n sys.exit(self.sys_status)\n\n def critical(self, error, code):\n \"\"\"Exit due to unrecoverable error.\"\"\"\n self.sys_status = code\n logging.critical(error)\n print(\"Error: \" + error)\n self.exit()\n\n def start_connectors(self, connectors):\n \"\"\"Start the connectors.\"\"\"\n if len(connectors) == 0:\n self.critical(\"All connectors failed to load\", 1)\n for connector_module in connectors:\n for name, cls in connector_module[\"module\"].__dict__.items():\n if isinstance(cls, type) and \"Connector\" in name:\n connector_module[\"config\"][\"bot-name\"] = self.bot_name\n connector = cls(connector_module[\"config\"])\n self.connectors.append(connector)\n job = Process(target=connector.connect, args=(self,))\n job.start()\n self.connector_jobs.append(job)\n for job in self.connector_jobs:\n job.join()\n\n def start_databases(self, databases):\n \"\"\"Start the databases.\"\"\"\n if len(databases) == 0:\n logging.warning(\"All databases failed to load\")\n for database_module in databases:\n for name, cls in database_module[\"module\"].__dict__.items():\n if isinstance(cls, type) and \"Database\" in name:\n logging.debug(\"Adding database: \" + name)\n database = cls(database_module[\"config\"])\n self.memory.databases.append(database)\n database.connect()\n\n def load_regex_skill(self, regex, skill):\n \"\"\"Load skills.\"\"\"\n self.skills.append({\"regex\": regex, \"skill\": skill})\n\n def parse(self, message):\n \"\"\"Parse a string against all skills.\"\"\"\n if message.text.strip() != \"\":\n logging.debug(\"Parsing input: \" + message.text)\n for skill in self.skills:\n if \"regex\" in skill:\n regex = match(skill[\"regex\"], message.text)\n if regex:\n message.regex = regex\n skill[\"skill\"](self, message)\n", "path": "opsdroid/core.py"}]}
1,093
251