problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.1k
25.4k
golden_diff
stringlengths
145
5.13k
verification_info
stringlengths
582
39.1k
num_tokens
int64
271
4.1k
num_tokens_diff
int64
47
1.02k
gh_patches_debug_18747
rasdani/github-patches
git_diff
CTFd__CTFd-1798
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- CTFd pages route is relative when it shouldn't be For some reason CTFd page routes are being generated in the navbar as relative when they shouldn't be. E.g. (`page` instead of `/page`). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `CTFd/plugins/__init__.py` Content: ``` 1 import glob 2 import importlib 3 import os 4 from collections import namedtuple 5 6 from flask import current_app as app 7 from flask import send_file, send_from_directory 8 9 from CTFd.utils.config.pages import get_pages 10 from CTFd.utils.decorators import admins_only as admins_only_wrapper 11 from CTFd.utils.plugins import override_template as utils_override_template 12 from CTFd.utils.plugins import ( 13 register_admin_script as utils_register_admin_plugin_script, 14 ) 15 from CTFd.utils.plugins import ( 16 register_admin_stylesheet as utils_register_admin_plugin_stylesheet, 17 ) 18 from CTFd.utils.plugins import register_script as utils_register_plugin_script 19 from CTFd.utils.plugins import register_stylesheet as utils_register_plugin_stylesheet 20 21 Menu = namedtuple("Menu", ["title", "route"]) 22 23 24 def register_plugin_assets_directory(app, base_path, admins_only=False, endpoint=None): 25 """ 26 Registers a directory to serve assets 27 28 :param app: A CTFd application 29 :param string base_path: The path to the directory 30 :param boolean admins_only: Whether or not the assets served out of the directory should be accessible to the public 31 :return: 32 """ 33 base_path = base_path.strip("/") 34 if endpoint is None: 35 endpoint = base_path.replace("/", ".") 36 37 def assets_handler(path): 38 return send_from_directory(base_path, path) 39 40 rule = "/" + base_path + "/<path:path>" 41 app.add_url_rule(rule=rule, endpoint=endpoint, view_func=assets_handler) 42 43 44 def register_plugin_asset(app, asset_path, admins_only=False, endpoint=None): 45 """ 46 Registers an file path to be served by CTFd 47 48 :param app: A CTFd application 49 :param string asset_path: The path to the asset file 50 :param boolean admins_only: Whether or not this file should be accessible to the public 51 :return: 52 """ 53 asset_path = asset_path.strip("/") 54 if endpoint is None: 55 endpoint = asset_path.replace("/", ".") 56 57 def asset_handler(): 58 return send_file(asset_path) 59 60 if admins_only: 61 asset_handler = admins_only_wrapper(asset_handler) 62 rule = "/" + asset_path 63 app.add_url_rule(rule=rule, endpoint=endpoint, view_func=asset_handler) 64 65 66 def override_template(*args, **kwargs): 67 """ 68 Overrides a template with the provided html content. 69 70 e.g. override_template('scoreboard.html', '<h1>scores</h1>') 71 """ 72 utils_override_template(*args, **kwargs) 73 74 75 def register_plugin_script(*args, **kwargs): 76 """ 77 Adds a given script to the base.html template which all pages inherit from 78 """ 79 utils_register_plugin_script(*args, **kwargs) 80 81 82 def register_plugin_stylesheet(*args, **kwargs): 83 """ 84 Adds a given stylesheet to the base.html template which all pages inherit from. 85 """ 86 utils_register_plugin_stylesheet(*args, **kwargs) 87 88 89 def register_admin_plugin_script(*args, **kwargs): 90 """ 91 Adds a given script to the base.html of the admin theme which all admin pages inherit from 92 :param args: 93 :param kwargs: 94 :return: 95 """ 96 utils_register_admin_plugin_script(*args, **kwargs) 97 98 99 def register_admin_plugin_stylesheet(*args, **kwargs): 100 """ 101 Adds a given stylesheet to the base.html of the admin theme which all admin pages inherit from 102 :param args: 103 :param kwargs: 104 :return: 105 """ 106 utils_register_admin_plugin_stylesheet(*args, **kwargs) 107 108 109 def register_admin_plugin_menu_bar(title, route): 110 """ 111 Registers links on the Admin Panel menubar/navbar 112 113 :param name: A string that is shown on the navbar HTML 114 :param route: A string that is the href used by the link 115 :return: 116 """ 117 am = Menu(title=title, route=route) 118 app.admin_plugin_menu_bar.append(am) 119 120 121 def get_admin_plugin_menu_bar(): 122 """ 123 Access the list used to store the plugin menu bar 124 125 :return: Returns a list of Menu namedtuples. They have name, and route attributes. 126 """ 127 return app.admin_plugin_menu_bar 128 129 130 def register_user_page_menu_bar(title, route): 131 """ 132 Registers links on the User side menubar/navbar 133 134 :param name: A string that is shown on the navbar HTML 135 :param route: A string that is the href used by the link 136 :return: 137 """ 138 p = Menu(title=title, route=route) 139 app.plugin_menu_bar.append(p) 140 141 142 def get_user_page_menu_bar(): 143 """ 144 Access the list used to store the user page menu bar 145 146 :return: Returns a list of Menu namedtuples. They have name, and route attributes. 147 """ 148 return get_pages() + app.plugin_menu_bar 149 150 151 def bypass_csrf_protection(f): 152 """ 153 Decorator that allows a route to bypass the need for a CSRF nonce on POST requests. 154 155 This should be considered beta and may change in future versions. 156 157 :param f: A function that needs to bypass CSRF protection 158 :return: Returns a function with the _bypass_csrf attribute set which tells CTFd to not require CSRF protection. 159 """ 160 f._bypass_csrf = True 161 return f 162 163 164 def get_plugin_names(): 165 modules = sorted(glob.glob(app.plugins_dir + "/*")) 166 blacklist = {"__pycache__"} 167 plugins = [] 168 for module in modules: 169 module_name = os.path.basename(module) 170 if os.path.isdir(module) and module_name not in blacklist: 171 plugins.append(module_name) 172 return plugins 173 174 175 def init_plugins(app): 176 """ 177 Searches for the load function in modules in the CTFd/plugins folder. This function is called with the current CTFd 178 app as a parameter. This allows CTFd plugins to modify CTFd's behavior. 179 180 :param app: A CTFd application 181 :return: 182 """ 183 app.admin_plugin_scripts = [] 184 app.admin_plugin_stylesheets = [] 185 app.plugin_scripts = [] 186 app.plugin_stylesheets = [] 187 188 app.admin_plugin_menu_bar = [] 189 app.plugin_menu_bar = [] 190 app.plugins_dir = os.path.dirname(__file__) 191 192 if app.config.get("SAFE_MODE", False) is False: 193 for plugin in get_plugin_names(): 194 module = "." + plugin 195 module = importlib.import_module(module, package="CTFd.plugins") 196 module.load(app) 197 print(" * Loaded module, %s" % module) 198 199 app.jinja_env.globals.update(get_admin_plugin_menu_bar=get_admin_plugin_menu_bar) 200 app.jinja_env.globals.update(get_user_page_menu_bar=get_user_page_menu_bar) 201 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/CTFd/plugins/__init__.py b/CTFd/plugins/__init__.py --- a/CTFd/plugins/__init__.py +++ b/CTFd/plugins/__init__.py @@ -4,7 +4,7 @@ from collections import namedtuple from flask import current_app as app -from flask import send_file, send_from_directory +from flask import send_file, send_from_directory, url_for from CTFd.utils.config.pages import get_pages from CTFd.utils.decorators import admins_only as admins_only_wrapper @@ -145,7 +145,15 @@ :return: Returns a list of Menu namedtuples. They have name, and route attributes. """ - return get_pages() + app.plugin_menu_bar + pages = [] + for p in get_pages() + app.plugin_menu_bar: + if p.route.startswith("http"): + route = p.route + else: + route = url_for("views.static_html", route=p.route) + print(route) + pages.append(Menu(title=p.title, route=route)) + return pages def bypass_csrf_protection(f):
{"golden_diff": "diff --git a/CTFd/plugins/__init__.py b/CTFd/plugins/__init__.py\n--- a/CTFd/plugins/__init__.py\n+++ b/CTFd/plugins/__init__.py\n@@ -4,7 +4,7 @@\n from collections import namedtuple\n \n from flask import current_app as app\n-from flask import send_file, send_from_directory\n+from flask import send_file, send_from_directory, url_for\n \n from CTFd.utils.config.pages import get_pages\n from CTFd.utils.decorators import admins_only as admins_only_wrapper\n@@ -145,7 +145,15 @@\n \n :return: Returns a list of Menu namedtuples. They have name, and route attributes.\n \"\"\"\n- return get_pages() + app.plugin_menu_bar\n+ pages = []\n+ for p in get_pages() + app.plugin_menu_bar:\n+ if p.route.startswith(\"http\"):\n+ route = p.route\n+ else:\n+ route = url_for(\"views.static_html\", route=p.route)\n+ print(route)\n+ pages.append(Menu(title=p.title, route=route))\n+ return pages\n \n \n def bypass_csrf_protection(f):\n", "issue": "CTFd pages route is relative when it shouldn't be\nFor some reason CTFd page routes are being generated in the navbar as relative when they shouldn't be. E.g. (`page` instead of `/page`).\n", "before_files": [{"content": "import glob\nimport importlib\nimport os\nfrom collections import namedtuple\n\nfrom flask import current_app as app\nfrom flask import send_file, send_from_directory\n\nfrom CTFd.utils.config.pages import get_pages\nfrom CTFd.utils.decorators import admins_only as admins_only_wrapper\nfrom CTFd.utils.plugins import override_template as utils_override_template\nfrom CTFd.utils.plugins import (\n register_admin_script as utils_register_admin_plugin_script,\n)\nfrom CTFd.utils.plugins import (\n register_admin_stylesheet as utils_register_admin_plugin_stylesheet,\n)\nfrom CTFd.utils.plugins import register_script as utils_register_plugin_script\nfrom CTFd.utils.plugins import register_stylesheet as utils_register_plugin_stylesheet\n\nMenu = namedtuple(\"Menu\", [\"title\", \"route\"])\n\n\ndef register_plugin_assets_directory(app, base_path, admins_only=False, endpoint=None):\n \"\"\"\n Registers a directory to serve assets\n\n :param app: A CTFd application\n :param string base_path: The path to the directory\n :param boolean admins_only: Whether or not the assets served out of the directory should be accessible to the public\n :return:\n \"\"\"\n base_path = base_path.strip(\"/\")\n if endpoint is None:\n endpoint = base_path.replace(\"/\", \".\")\n\n def assets_handler(path):\n return send_from_directory(base_path, path)\n\n rule = \"/\" + base_path + \"/<path:path>\"\n app.add_url_rule(rule=rule, endpoint=endpoint, view_func=assets_handler)\n\n\ndef register_plugin_asset(app, asset_path, admins_only=False, endpoint=None):\n \"\"\"\n Registers an file path to be served by CTFd\n\n :param app: A CTFd application\n :param string asset_path: The path to the asset file\n :param boolean admins_only: Whether or not this file should be accessible to the public\n :return:\n \"\"\"\n asset_path = asset_path.strip(\"/\")\n if endpoint is None:\n endpoint = asset_path.replace(\"/\", \".\")\n\n def asset_handler():\n return send_file(asset_path)\n\n if admins_only:\n asset_handler = admins_only_wrapper(asset_handler)\n rule = \"/\" + asset_path\n app.add_url_rule(rule=rule, endpoint=endpoint, view_func=asset_handler)\n\n\ndef override_template(*args, **kwargs):\n \"\"\"\n Overrides a template with the provided html content.\n\n e.g. override_template('scoreboard.html', '<h1>scores</h1>')\n \"\"\"\n utils_override_template(*args, **kwargs)\n\n\ndef register_plugin_script(*args, **kwargs):\n \"\"\"\n Adds a given script to the base.html template which all pages inherit from\n \"\"\"\n utils_register_plugin_script(*args, **kwargs)\n\n\ndef register_plugin_stylesheet(*args, **kwargs):\n \"\"\"\n Adds a given stylesheet to the base.html template which all pages inherit from.\n \"\"\"\n utils_register_plugin_stylesheet(*args, **kwargs)\n\n\ndef register_admin_plugin_script(*args, **kwargs):\n \"\"\"\n Adds a given script to the base.html of the admin theme which all admin pages inherit from\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n utils_register_admin_plugin_script(*args, **kwargs)\n\n\ndef register_admin_plugin_stylesheet(*args, **kwargs):\n \"\"\"\n Adds a given stylesheet to the base.html of the admin theme which all admin pages inherit from\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n utils_register_admin_plugin_stylesheet(*args, **kwargs)\n\n\ndef register_admin_plugin_menu_bar(title, route):\n \"\"\"\n Registers links on the Admin Panel menubar/navbar\n\n :param name: A string that is shown on the navbar HTML\n :param route: A string that is the href used by the link\n :return:\n \"\"\"\n am = Menu(title=title, route=route)\n app.admin_plugin_menu_bar.append(am)\n\n\ndef get_admin_plugin_menu_bar():\n \"\"\"\n Access the list used to store the plugin menu bar\n\n :return: Returns a list of Menu namedtuples. They have name, and route attributes.\n \"\"\"\n return app.admin_plugin_menu_bar\n\n\ndef register_user_page_menu_bar(title, route):\n \"\"\"\n Registers links on the User side menubar/navbar\n\n :param name: A string that is shown on the navbar HTML\n :param route: A string that is the href used by the link\n :return:\n \"\"\"\n p = Menu(title=title, route=route)\n app.plugin_menu_bar.append(p)\n\n\ndef get_user_page_menu_bar():\n \"\"\"\n Access the list used to store the user page menu bar\n\n :return: Returns a list of Menu namedtuples. They have name, and route attributes.\n \"\"\"\n return get_pages() + app.plugin_menu_bar\n\n\ndef bypass_csrf_protection(f):\n \"\"\"\n Decorator that allows a route to bypass the need for a CSRF nonce on POST requests.\n\n This should be considered beta and may change in future versions.\n\n :param f: A function that needs to bypass CSRF protection\n :return: Returns a function with the _bypass_csrf attribute set which tells CTFd to not require CSRF protection.\n \"\"\"\n f._bypass_csrf = True\n return f\n\n\ndef get_plugin_names():\n modules = sorted(glob.glob(app.plugins_dir + \"/*\"))\n blacklist = {\"__pycache__\"}\n plugins = []\n for module in modules:\n module_name = os.path.basename(module)\n if os.path.isdir(module) and module_name not in blacklist:\n plugins.append(module_name)\n return plugins\n\n\ndef init_plugins(app):\n \"\"\"\n Searches for the load function in modules in the CTFd/plugins folder. This function is called with the current CTFd\n app as a parameter. This allows CTFd plugins to modify CTFd's behavior.\n\n :param app: A CTFd application\n :return:\n \"\"\"\n app.admin_plugin_scripts = []\n app.admin_plugin_stylesheets = []\n app.plugin_scripts = []\n app.plugin_stylesheets = []\n\n app.admin_plugin_menu_bar = []\n app.plugin_menu_bar = []\n app.plugins_dir = os.path.dirname(__file__)\n\n if app.config.get(\"SAFE_MODE\", False) is False:\n for plugin in get_plugin_names():\n module = \".\" + plugin\n module = importlib.import_module(module, package=\"CTFd.plugins\")\n module.load(app)\n print(\" * Loaded module, %s\" % module)\n\n app.jinja_env.globals.update(get_admin_plugin_menu_bar=get_admin_plugin_menu_bar)\n app.jinja_env.globals.update(get_user_page_menu_bar=get_user_page_menu_bar)\n", "path": "CTFd/plugins/__init__.py"}], "after_files": [{"content": "import glob\nimport importlib\nimport os\nfrom collections import namedtuple\n\nfrom flask import current_app as app\nfrom flask import send_file, send_from_directory, url_for\n\nfrom CTFd.utils.config.pages import get_pages\nfrom CTFd.utils.decorators import admins_only as admins_only_wrapper\nfrom CTFd.utils.plugins import override_template as utils_override_template\nfrom CTFd.utils.plugins import (\n register_admin_script as utils_register_admin_plugin_script,\n)\nfrom CTFd.utils.plugins import (\n register_admin_stylesheet as utils_register_admin_plugin_stylesheet,\n)\nfrom CTFd.utils.plugins import register_script as utils_register_plugin_script\nfrom CTFd.utils.plugins import register_stylesheet as utils_register_plugin_stylesheet\n\nMenu = namedtuple(\"Menu\", [\"title\", \"route\"])\n\n\ndef register_plugin_assets_directory(app, base_path, admins_only=False, endpoint=None):\n \"\"\"\n Registers a directory to serve assets\n\n :param app: A CTFd application\n :param string base_path: The path to the directory\n :param boolean admins_only: Whether or not the assets served out of the directory should be accessible to the public\n :return:\n \"\"\"\n base_path = base_path.strip(\"/\")\n if endpoint is None:\n endpoint = base_path.replace(\"/\", \".\")\n\n def assets_handler(path):\n return send_from_directory(base_path, path)\n\n rule = \"/\" + base_path + \"/<path:path>\"\n app.add_url_rule(rule=rule, endpoint=endpoint, view_func=assets_handler)\n\n\ndef register_plugin_asset(app, asset_path, admins_only=False, endpoint=None):\n \"\"\"\n Registers an file path to be served by CTFd\n\n :param app: A CTFd application\n :param string asset_path: The path to the asset file\n :param boolean admins_only: Whether or not this file should be accessible to the public\n :return:\n \"\"\"\n asset_path = asset_path.strip(\"/\")\n if endpoint is None:\n endpoint = asset_path.replace(\"/\", \".\")\n\n def asset_handler():\n return send_file(asset_path)\n\n if admins_only:\n asset_handler = admins_only_wrapper(asset_handler)\n rule = \"/\" + asset_path\n app.add_url_rule(rule=rule, endpoint=endpoint, view_func=asset_handler)\n\n\ndef override_template(*args, **kwargs):\n \"\"\"\n Overrides a template with the provided html content.\n\n e.g. override_template('scoreboard.html', '<h1>scores</h1>')\n \"\"\"\n utils_override_template(*args, **kwargs)\n\n\ndef register_plugin_script(*args, **kwargs):\n \"\"\"\n Adds a given script to the base.html template which all pages inherit from\n \"\"\"\n utils_register_plugin_script(*args, **kwargs)\n\n\ndef register_plugin_stylesheet(*args, **kwargs):\n \"\"\"\n Adds a given stylesheet to the base.html template which all pages inherit from.\n \"\"\"\n utils_register_plugin_stylesheet(*args, **kwargs)\n\n\ndef register_admin_plugin_script(*args, **kwargs):\n \"\"\"\n Adds a given script to the base.html of the admin theme which all admin pages inherit from\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n utils_register_admin_plugin_script(*args, **kwargs)\n\n\ndef register_admin_plugin_stylesheet(*args, **kwargs):\n \"\"\"\n Adds a given stylesheet to the base.html of the admin theme which all admin pages inherit from\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n utils_register_admin_plugin_stylesheet(*args, **kwargs)\n\n\ndef register_admin_plugin_menu_bar(title, route):\n \"\"\"\n Registers links on the Admin Panel menubar/navbar\n\n :param name: A string that is shown on the navbar HTML\n :param route: A string that is the href used by the link\n :return:\n \"\"\"\n am = Menu(title=title, route=route)\n app.admin_plugin_menu_bar.append(am)\n\n\ndef get_admin_plugin_menu_bar():\n \"\"\"\n Access the list used to store the plugin menu bar\n\n :return: Returns a list of Menu namedtuples. They have name, and route attributes.\n \"\"\"\n return app.admin_plugin_menu_bar\n\n\ndef register_user_page_menu_bar(title, route):\n \"\"\"\n Registers links on the User side menubar/navbar\n\n :param name: A string that is shown on the navbar HTML\n :param route: A string that is the href used by the link\n :return:\n \"\"\"\n p = Menu(title=title, route=route)\n app.plugin_menu_bar.append(p)\n\n\ndef get_user_page_menu_bar():\n \"\"\"\n Access the list used to store the user page menu bar\n\n :return: Returns a list of Menu namedtuples. They have name, and route attributes.\n \"\"\"\n pages = []\n for p in get_pages() + app.plugin_menu_bar:\n if p.route.startswith(\"http\"):\n route = p.route\n else:\n route = url_for(\"views.static_html\", route=p.route)\n print(route)\n pages.append(Menu(title=p.title, route=route))\n return pages\n\n\ndef bypass_csrf_protection(f):\n \"\"\"\n Decorator that allows a route to bypass the need for a CSRF nonce on POST requests.\n\n This should be considered beta and may change in future versions.\n\n :param f: A function that needs to bypass CSRF protection\n :return: Returns a function with the _bypass_csrf attribute set which tells CTFd to not require CSRF protection.\n \"\"\"\n f._bypass_csrf = True\n return f\n\n\ndef get_plugin_names():\n modules = sorted(glob.glob(app.plugins_dir + \"/*\"))\n blacklist = {\"__pycache__\"}\n plugins = []\n for module in modules:\n module_name = os.path.basename(module)\n if os.path.isdir(module) and module_name not in blacklist:\n plugins.append(module_name)\n return plugins\n\n\ndef init_plugins(app):\n \"\"\"\n Searches for the load function in modules in the CTFd/plugins folder. This function is called with the current CTFd\n app as a parameter. This allows CTFd plugins to modify CTFd's behavior.\n\n :param app: A CTFd application\n :return:\n \"\"\"\n app.admin_plugin_scripts = []\n app.admin_plugin_stylesheets = []\n app.plugin_scripts = []\n app.plugin_stylesheets = []\n\n app.admin_plugin_menu_bar = []\n app.plugin_menu_bar = []\n app.plugins_dir = os.path.dirname(__file__)\n\n if app.config.get(\"SAFE_MODE\", False) is False:\n for plugin in get_plugin_names():\n module = \".\" + plugin\n module = importlib.import_module(module, package=\"CTFd.plugins\")\n module.load(app)\n print(\" * Loaded module, %s\" % module)\n\n app.jinja_env.globals.update(get_admin_plugin_menu_bar=get_admin_plugin_menu_bar)\n app.jinja_env.globals.update(get_user_page_menu_bar=get_user_page_menu_bar)\n", "path": "CTFd/plugins/__init__.py"}]}
2,233
253
gh_patches_debug_28343
rasdani/github-patches
git_diff
sanic-org__sanic-2537
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Enforce exception handler uniquness 1. You should not be able to register the same exception more than once, or at least not on the same App/Blueprint. 2. Handlers should only be fetched in relation to the BP or App context of the matched route. This effectively means that some exceptions (`NotFound` could only be registered app level). _Originally posted by @ahopkins in https://github.com/sanic-org/sanic/issues/2121#issuecomment-827077284_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sanic/handlers.py` Content: ``` 1 from __future__ import annotations 2 3 from typing import Dict, List, Optional, Tuple, Type 4 5 from sanic.errorpages import BaseRenderer, TextRenderer, exception_response 6 from sanic.exceptions import ( 7 HeaderNotFound, 8 InvalidRangeType, 9 RangeNotSatisfiable, 10 ) 11 from sanic.log import deprecation, error_logger 12 from sanic.models.handler_types import RouteHandler 13 from sanic.response import text 14 15 16 class ErrorHandler: 17 """ 18 Provide :class:`sanic.app.Sanic` application with a mechanism to handle 19 and process any and all uncaught exceptions in a way the application 20 developer will set fit. 21 22 This error handling framework is built into the core that can be extended 23 by the developers to perform a wide range of tasks from recording the error 24 stats to reporting them to an external service that can be used for 25 realtime alerting system. 26 27 """ 28 29 def __init__( 30 self, 31 base: Type[BaseRenderer] = TextRenderer, 32 ): 33 self.cached_handlers: Dict[ 34 Tuple[Type[BaseException], Optional[str]], Optional[RouteHandler] 35 ] = {} 36 self.debug = False 37 self.base = base 38 39 @classmethod 40 def finalize(cls, *args, **kwargs): 41 deprecation( 42 "ErrorHandler.finalize is deprecated and no longer needed. " 43 "Please remove update your code to remove it. ", 44 22.12, 45 ) 46 47 def _full_lookup(self, exception, route_name: Optional[str] = None): 48 return self.lookup(exception, route_name) 49 50 def add(self, exception, handler, route_names: Optional[List[str]] = None): 51 """ 52 Add a new exception handler to an already existing handler object. 53 54 :param exception: Type of exception that need to be handled 55 :param handler: Reference to the method that will handle the exception 56 57 :type exception: :class:`sanic.exceptions.SanicException` or 58 :class:`Exception` 59 :type handler: ``function`` 60 61 :return: None 62 """ 63 if route_names: 64 for route in route_names: 65 self.cached_handlers[(exception, route)] = handler 66 else: 67 self.cached_handlers[(exception, None)] = handler 68 69 def lookup(self, exception, route_name: Optional[str] = None): 70 """ 71 Lookup the existing instance of :class:`ErrorHandler` and fetch the 72 registered handler for a specific type of exception. 73 74 This method leverages a dict lookup to speedup the retrieval process. 75 76 :param exception: Type of exception 77 78 :type exception: :class:`sanic.exceptions.SanicException` or 79 :class:`Exception` 80 81 :return: Registered function if found ``None`` otherwise 82 """ 83 exception_class = type(exception) 84 85 for name in (route_name, None): 86 exception_key = (exception_class, name) 87 handler = self.cached_handlers.get(exception_key) 88 if handler: 89 return handler 90 91 for name in (route_name, None): 92 for ancestor in type.mro(exception_class): 93 exception_key = (ancestor, name) 94 if exception_key in self.cached_handlers: 95 handler = self.cached_handlers[exception_key] 96 self.cached_handlers[ 97 (exception_class, route_name) 98 ] = handler 99 return handler 100 101 if ancestor is BaseException: 102 break 103 self.cached_handlers[(exception_class, route_name)] = None 104 handler = None 105 return handler 106 107 _lookup = _full_lookup 108 109 def response(self, request, exception): 110 """Fetches and executes an exception handler and returns a response 111 object 112 113 :param request: Instance of :class:`sanic.request.Request` 114 :param exception: Exception to handle 115 116 :type request: :class:`sanic.request.Request` 117 :type exception: :class:`sanic.exceptions.SanicException` or 118 :class:`Exception` 119 120 :return: Wrap the return value obtained from :func:`default` 121 or registered handler for that type of exception. 122 """ 123 route_name = request.name if request else None 124 handler = self._lookup(exception, route_name) 125 response = None 126 try: 127 if handler: 128 response = handler(request, exception) 129 if response is None: 130 response = self.default(request, exception) 131 except Exception: 132 try: 133 url = repr(request.url) 134 except AttributeError: # no cov 135 url = "unknown" 136 response_message = ( 137 "Exception raised in exception handler " '"%s" for uri: %s' 138 ) 139 error_logger.exception(response_message, handler.__name__, url) 140 141 if self.debug: 142 return text(response_message % (handler.__name__, url), 500) 143 else: 144 return text("An error occurred while handling an error", 500) 145 return response 146 147 def default(self, request, exception): 148 """ 149 Provide a default behavior for the objects of :class:`ErrorHandler`. 150 If a developer chooses to extent the :class:`ErrorHandler` they can 151 provide a custom implementation for this method to behave in a way 152 they see fit. 153 154 :param request: Incoming request 155 :param exception: Exception object 156 157 :type request: :class:`sanic.request.Request` 158 :type exception: :class:`sanic.exceptions.SanicException` or 159 :class:`Exception` 160 :return: 161 """ 162 self.log(request, exception) 163 fallback = request.app.config.FALLBACK_ERROR_FORMAT 164 return exception_response( 165 request, 166 exception, 167 debug=self.debug, 168 base=self.base, 169 fallback=fallback, 170 ) 171 172 @staticmethod 173 def log(request, exception): 174 quiet = getattr(exception, "quiet", False) 175 noisy = getattr(request.app.config, "NOISY_EXCEPTIONS", False) 176 if quiet is False or noisy is True: 177 try: 178 url = repr(request.url) 179 except AttributeError: # no cov 180 url = "unknown" 181 182 error_logger.exception( 183 "Exception occurred while handling uri: %s", url 184 ) 185 186 187 class ContentRangeHandler: 188 """ 189 A mechanism to parse and process the incoming request headers to 190 extract the content range information. 191 192 :param request: Incoming api request 193 :param stats: Stats related to the content 194 195 :type request: :class:`sanic.request.Request` 196 :type stats: :class:`posix.stat_result` 197 198 :ivar start: Content Range start 199 :ivar end: Content Range end 200 :ivar size: Length of the content 201 :ivar total: Total size identified by the :class:`posix.stat_result` 202 instance 203 :ivar ContentRangeHandler.headers: Content range header ``dict`` 204 """ 205 206 __slots__ = ("start", "end", "size", "total", "headers") 207 208 def __init__(self, request, stats): 209 self.total = stats.st_size 210 _range = request.headers.getone("range", None) 211 if _range is None: 212 raise HeaderNotFound("Range Header Not Found") 213 unit, _, value = tuple(map(str.strip, _range.partition("="))) 214 if unit != "bytes": 215 raise InvalidRangeType( 216 "%s is not a valid Range Type" % (unit,), self 217 ) 218 start_b, _, end_b = tuple(map(str.strip, value.partition("-"))) 219 try: 220 self.start = int(start_b) if start_b else None 221 except ValueError: 222 raise RangeNotSatisfiable( 223 "'%s' is invalid for Content Range" % (start_b,), self 224 ) 225 try: 226 self.end = int(end_b) if end_b else None 227 except ValueError: 228 raise RangeNotSatisfiable( 229 "'%s' is invalid for Content Range" % (end_b,), self 230 ) 231 if self.end is None: 232 if self.start is None: 233 raise RangeNotSatisfiable( 234 "Invalid for Content Range parameters", self 235 ) 236 else: 237 # this case represents `Content-Range: bytes 5-` 238 self.end = self.total - 1 239 else: 240 if self.start is None: 241 # this case represents `Content-Range: bytes -5` 242 self.start = self.total - self.end 243 self.end = self.total - 1 244 if self.start >= self.end: 245 raise RangeNotSatisfiable( 246 "Invalid for Content Range parameters", self 247 ) 248 self.size = self.end - self.start + 1 249 self.headers = { 250 "Content-Range": "bytes %s-%s/%s" 251 % (self.start, self.end, self.total) 252 } 253 254 def __bool__(self): 255 return self.size > 0 256 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sanic/handlers.py b/sanic/handlers.py --- a/sanic/handlers.py +++ b/sanic/handlers.py @@ -47,6 +47,28 @@ def _full_lookup(self, exception, route_name: Optional[str] = None): return self.lookup(exception, route_name) + def _add( + self, + key: Tuple[Type[BaseException], Optional[str]], + handler: RouteHandler, + ) -> None: + if key in self.cached_handlers: + exc, name = key + if name is None: + name = "__ALL_ROUTES__" + + error_logger.warning( + f"Duplicate exception handler definition on: route={name} " + f"and exception={exc}" + ) + deprecation( + "A duplicate exception handler definition was discovered. " + "This may cause unintended consequences. A warning has been " + "issued now, but it will not be allowed starting in v23.3.", + 23.3, + ) + self.cached_handlers[key] = handler + def add(self, exception, handler, route_names: Optional[List[str]] = None): """ Add a new exception handler to an already existing handler object. @@ -62,9 +84,9 @@ """ if route_names: for route in route_names: - self.cached_handlers[(exception, route)] = handler + self._add((exception, route), handler) else: - self.cached_handlers[(exception, None)] = handler + self._add((exception, None), handler) def lookup(self, exception, route_name: Optional[str] = None): """
{"golden_diff": "diff --git a/sanic/handlers.py b/sanic/handlers.py\n--- a/sanic/handlers.py\n+++ b/sanic/handlers.py\n@@ -47,6 +47,28 @@\n def _full_lookup(self, exception, route_name: Optional[str] = None):\n return self.lookup(exception, route_name)\n \n+ def _add(\n+ self,\n+ key: Tuple[Type[BaseException], Optional[str]],\n+ handler: RouteHandler,\n+ ) -> None:\n+ if key in self.cached_handlers:\n+ exc, name = key\n+ if name is None:\n+ name = \"__ALL_ROUTES__\"\n+\n+ error_logger.warning(\n+ f\"Duplicate exception handler definition on: route={name} \"\n+ f\"and exception={exc}\"\n+ )\n+ deprecation(\n+ \"A duplicate exception handler definition was discovered. \"\n+ \"This may cause unintended consequences. A warning has been \"\n+ \"issued now, but it will not be allowed starting in v23.3.\",\n+ 23.3,\n+ )\n+ self.cached_handlers[key] = handler\n+\n def add(self, exception, handler, route_names: Optional[List[str]] = None):\n \"\"\"\n Add a new exception handler to an already existing handler object.\n@@ -62,9 +84,9 @@\n \"\"\"\n if route_names:\n for route in route_names:\n- self.cached_handlers[(exception, route)] = handler\n+ self._add((exception, route), handler)\n else:\n- self.cached_handlers[(exception, None)] = handler\n+ self._add((exception, None), handler)\n \n def lookup(self, exception, route_name: Optional[str] = None):\n \"\"\"\n", "issue": "Enforce exception handler uniquness\n1. You should not be able to register the same exception more than once, or at least not on the same App/Blueprint.\r\n2. Handlers should only be fetched in relation to the BP or App context of the matched route. This effectively means that some exceptions (`NotFound` could only be registered app level).\r\n\r\n_Originally posted by @ahopkins in https://github.com/sanic-org/sanic/issues/2121#issuecomment-827077284_\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import Dict, List, Optional, Tuple, Type\n\nfrom sanic.errorpages import BaseRenderer, TextRenderer, exception_response\nfrom sanic.exceptions import (\n HeaderNotFound,\n InvalidRangeType,\n RangeNotSatisfiable,\n)\nfrom sanic.log import deprecation, error_logger\nfrom sanic.models.handler_types import RouteHandler\nfrom sanic.response import text\n\n\nclass ErrorHandler:\n \"\"\"\n Provide :class:`sanic.app.Sanic` application with a mechanism to handle\n and process any and all uncaught exceptions in a way the application\n developer will set fit.\n\n This error handling framework is built into the core that can be extended\n by the developers to perform a wide range of tasks from recording the error\n stats to reporting them to an external service that can be used for\n realtime alerting system.\n\n \"\"\"\n\n def __init__(\n self,\n base: Type[BaseRenderer] = TextRenderer,\n ):\n self.cached_handlers: Dict[\n Tuple[Type[BaseException], Optional[str]], Optional[RouteHandler]\n ] = {}\n self.debug = False\n self.base = base\n\n @classmethod\n def finalize(cls, *args, **kwargs):\n deprecation(\n \"ErrorHandler.finalize is deprecated and no longer needed. \"\n \"Please remove update your code to remove it. \",\n 22.12,\n )\n\n def _full_lookup(self, exception, route_name: Optional[str] = None):\n return self.lookup(exception, route_name)\n\n def add(self, exception, handler, route_names: Optional[List[str]] = None):\n \"\"\"\n Add a new exception handler to an already existing handler object.\n\n :param exception: Type of exception that need to be handled\n :param handler: Reference to the method that will handle the exception\n\n :type exception: :class:`sanic.exceptions.SanicException` or\n :class:`Exception`\n :type handler: ``function``\n\n :return: None\n \"\"\"\n if route_names:\n for route in route_names:\n self.cached_handlers[(exception, route)] = handler\n else:\n self.cached_handlers[(exception, None)] = handler\n\n def lookup(self, exception, route_name: Optional[str] = None):\n \"\"\"\n Lookup the existing instance of :class:`ErrorHandler` and fetch the\n registered handler for a specific type of exception.\n\n This method leverages a dict lookup to speedup the retrieval process.\n\n :param exception: Type of exception\n\n :type exception: :class:`sanic.exceptions.SanicException` or\n :class:`Exception`\n\n :return: Registered function if found ``None`` otherwise\n \"\"\"\n exception_class = type(exception)\n\n for name in (route_name, None):\n exception_key = (exception_class, name)\n handler = self.cached_handlers.get(exception_key)\n if handler:\n return handler\n\n for name in (route_name, None):\n for ancestor in type.mro(exception_class):\n exception_key = (ancestor, name)\n if exception_key in self.cached_handlers:\n handler = self.cached_handlers[exception_key]\n self.cached_handlers[\n (exception_class, route_name)\n ] = handler\n return handler\n\n if ancestor is BaseException:\n break\n self.cached_handlers[(exception_class, route_name)] = None\n handler = None\n return handler\n\n _lookup = _full_lookup\n\n def response(self, request, exception):\n \"\"\"Fetches and executes an exception handler and returns a response\n object\n\n :param request: Instance of :class:`sanic.request.Request`\n :param exception: Exception to handle\n\n :type request: :class:`sanic.request.Request`\n :type exception: :class:`sanic.exceptions.SanicException` or\n :class:`Exception`\n\n :return: Wrap the return value obtained from :func:`default`\n or registered handler for that type of exception.\n \"\"\"\n route_name = request.name if request else None\n handler = self._lookup(exception, route_name)\n response = None\n try:\n if handler:\n response = handler(request, exception)\n if response is None:\n response = self.default(request, exception)\n except Exception:\n try:\n url = repr(request.url)\n except AttributeError: # no cov\n url = \"unknown\"\n response_message = (\n \"Exception raised in exception handler \" '\"%s\" for uri: %s'\n )\n error_logger.exception(response_message, handler.__name__, url)\n\n if self.debug:\n return text(response_message % (handler.__name__, url), 500)\n else:\n return text(\"An error occurred while handling an error\", 500)\n return response\n\n def default(self, request, exception):\n \"\"\"\n Provide a default behavior for the objects of :class:`ErrorHandler`.\n If a developer chooses to extent the :class:`ErrorHandler` they can\n provide a custom implementation for this method to behave in a way\n they see fit.\n\n :param request: Incoming request\n :param exception: Exception object\n\n :type request: :class:`sanic.request.Request`\n :type exception: :class:`sanic.exceptions.SanicException` or\n :class:`Exception`\n :return:\n \"\"\"\n self.log(request, exception)\n fallback = request.app.config.FALLBACK_ERROR_FORMAT\n return exception_response(\n request,\n exception,\n debug=self.debug,\n base=self.base,\n fallback=fallback,\n )\n\n @staticmethod\n def log(request, exception):\n quiet = getattr(exception, \"quiet\", False)\n noisy = getattr(request.app.config, \"NOISY_EXCEPTIONS\", False)\n if quiet is False or noisy is True:\n try:\n url = repr(request.url)\n except AttributeError: # no cov\n url = \"unknown\"\n\n error_logger.exception(\n \"Exception occurred while handling uri: %s\", url\n )\n\n\nclass ContentRangeHandler:\n \"\"\"\n A mechanism to parse and process the incoming request headers to\n extract the content range information.\n\n :param request: Incoming api request\n :param stats: Stats related to the content\n\n :type request: :class:`sanic.request.Request`\n :type stats: :class:`posix.stat_result`\n\n :ivar start: Content Range start\n :ivar end: Content Range end\n :ivar size: Length of the content\n :ivar total: Total size identified by the :class:`posix.stat_result`\n instance\n :ivar ContentRangeHandler.headers: Content range header ``dict``\n \"\"\"\n\n __slots__ = (\"start\", \"end\", \"size\", \"total\", \"headers\")\n\n def __init__(self, request, stats):\n self.total = stats.st_size\n _range = request.headers.getone(\"range\", None)\n if _range is None:\n raise HeaderNotFound(\"Range Header Not Found\")\n unit, _, value = tuple(map(str.strip, _range.partition(\"=\")))\n if unit != \"bytes\":\n raise InvalidRangeType(\n \"%s is not a valid Range Type\" % (unit,), self\n )\n start_b, _, end_b = tuple(map(str.strip, value.partition(\"-\")))\n try:\n self.start = int(start_b) if start_b else None\n except ValueError:\n raise RangeNotSatisfiable(\n \"'%s' is invalid for Content Range\" % (start_b,), self\n )\n try:\n self.end = int(end_b) if end_b else None\n except ValueError:\n raise RangeNotSatisfiable(\n \"'%s' is invalid for Content Range\" % (end_b,), self\n )\n if self.end is None:\n if self.start is None:\n raise RangeNotSatisfiable(\n \"Invalid for Content Range parameters\", self\n )\n else:\n # this case represents `Content-Range: bytes 5-`\n self.end = self.total - 1\n else:\n if self.start is None:\n # this case represents `Content-Range: bytes -5`\n self.start = self.total - self.end\n self.end = self.total - 1\n if self.start >= self.end:\n raise RangeNotSatisfiable(\n \"Invalid for Content Range parameters\", self\n )\n self.size = self.end - self.start + 1\n self.headers = {\n \"Content-Range\": \"bytes %s-%s/%s\"\n % (self.start, self.end, self.total)\n }\n\n def __bool__(self):\n return self.size > 0\n", "path": "sanic/handlers.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import Dict, List, Optional, Tuple, Type\n\nfrom sanic.errorpages import BaseRenderer, TextRenderer, exception_response\nfrom sanic.exceptions import (\n HeaderNotFound,\n InvalidRangeType,\n RangeNotSatisfiable,\n)\nfrom sanic.log import deprecation, error_logger\nfrom sanic.models.handler_types import RouteHandler\nfrom sanic.response import text\n\n\nclass ErrorHandler:\n \"\"\"\n Provide :class:`sanic.app.Sanic` application with a mechanism to handle\n and process any and all uncaught exceptions in a way the application\n developer will set fit.\n\n This error handling framework is built into the core that can be extended\n by the developers to perform a wide range of tasks from recording the error\n stats to reporting them to an external service that can be used for\n realtime alerting system.\n\n \"\"\"\n\n def __init__(\n self,\n base: Type[BaseRenderer] = TextRenderer,\n ):\n self.cached_handlers: Dict[\n Tuple[Type[BaseException], Optional[str]], Optional[RouteHandler]\n ] = {}\n self.debug = False\n self.base = base\n\n @classmethod\n def finalize(cls, *args, **kwargs):\n deprecation(\n \"ErrorHandler.finalize is deprecated and no longer needed. \"\n \"Please remove update your code to remove it. \",\n 22.12,\n )\n\n def _full_lookup(self, exception, route_name: Optional[str] = None):\n return self.lookup(exception, route_name)\n\n def _add(\n self,\n key: Tuple[Type[BaseException], Optional[str]],\n handler: RouteHandler,\n ) -> None:\n if key in self.cached_handlers:\n exc, name = key\n if name is None:\n name = \"__ALL_ROUTES__\"\n\n error_logger.warning(\n f\"Duplicate exception handler definition on: route={name} \"\n f\"and exception={exc}\"\n )\n deprecation(\n \"A duplicate exception handler definition was discovered. \"\n \"This may cause unintended consequences. A warning has been \"\n \"issued now, but it will not be allowed starting in v23.3.\",\n 23.3,\n )\n self.cached_handlers[key] = handler\n\n def add(self, exception, handler, route_names: Optional[List[str]] = None):\n \"\"\"\n Add a new exception handler to an already existing handler object.\n\n :param exception: Type of exception that need to be handled\n :param handler: Reference to the method that will handle the exception\n\n :type exception: :class:`sanic.exceptions.SanicException` or\n :class:`Exception`\n :type handler: ``function``\n\n :return: None\n \"\"\"\n if route_names:\n for route in route_names:\n self._add((exception, route), handler)\n else:\n self._add((exception, None), handler)\n\n def lookup(self, exception, route_name: Optional[str] = None):\n \"\"\"\n Lookup the existing instance of :class:`ErrorHandler` and fetch the\n registered handler for a specific type of exception.\n\n This method leverages a dict lookup to speedup the retrieval process.\n\n :param exception: Type of exception\n\n :type exception: :class:`sanic.exceptions.SanicException` or\n :class:`Exception`\n\n :return: Registered function if found ``None`` otherwise\n \"\"\"\n exception_class = type(exception)\n\n for name in (route_name, None):\n exception_key = (exception_class, name)\n handler = self.cached_handlers.get(exception_key)\n if handler:\n return handler\n\n for name in (route_name, None):\n for ancestor in type.mro(exception_class):\n exception_key = (ancestor, name)\n if exception_key in self.cached_handlers:\n handler = self.cached_handlers[exception_key]\n self.cached_handlers[\n (exception_class, route_name)\n ] = handler\n return handler\n\n if ancestor is BaseException:\n break\n self.cached_handlers[(exception_class, route_name)] = None\n handler = None\n return handler\n\n _lookup = _full_lookup\n\n def response(self, request, exception):\n \"\"\"Fetches and executes an exception handler and returns a response\n object\n\n :param request: Instance of :class:`sanic.request.Request`\n :param exception: Exception to handle\n\n :type request: :class:`sanic.request.Request`\n :type exception: :class:`sanic.exceptions.SanicException` or\n :class:`Exception`\n\n :return: Wrap the return value obtained from :func:`default`\n or registered handler for that type of exception.\n \"\"\"\n route_name = request.name if request else None\n handler = self._lookup(exception, route_name)\n response = None\n try:\n if handler:\n response = handler(request, exception)\n if response is None:\n response = self.default(request, exception)\n except Exception:\n try:\n url = repr(request.url)\n except AttributeError: # no cov\n url = \"unknown\"\n response_message = (\n \"Exception raised in exception handler \" '\"%s\" for uri: %s'\n )\n error_logger.exception(response_message, handler.__name__, url)\n\n if self.debug:\n return text(response_message % (handler.__name__, url), 500)\n else:\n return text(\"An error occurred while handling an error\", 500)\n return response\n\n def default(self, request, exception):\n \"\"\"\n Provide a default behavior for the objects of :class:`ErrorHandler`.\n If a developer chooses to extent the :class:`ErrorHandler` they can\n provide a custom implementation for this method to behave in a way\n they see fit.\n\n :param request: Incoming request\n :param exception: Exception object\n\n :type request: :class:`sanic.request.Request`\n :type exception: :class:`sanic.exceptions.SanicException` or\n :class:`Exception`\n :return:\n \"\"\"\n self.log(request, exception)\n fallback = request.app.config.FALLBACK_ERROR_FORMAT\n return exception_response(\n request,\n exception,\n debug=self.debug,\n base=self.base,\n fallback=fallback,\n )\n\n @staticmethod\n def log(request, exception):\n quiet = getattr(exception, \"quiet\", False)\n noisy = getattr(request.app.config, \"NOISY_EXCEPTIONS\", False)\n if quiet is False or noisy is True:\n try:\n url = repr(request.url)\n except AttributeError: # no cov\n url = \"unknown\"\n\n error_logger.exception(\n \"Exception occurred while handling uri: %s\", url\n )\n\n\nclass ContentRangeHandler:\n \"\"\"\n A mechanism to parse and process the incoming request headers to\n extract the content range information.\n\n :param request: Incoming api request\n :param stats: Stats related to the content\n\n :type request: :class:`sanic.request.Request`\n :type stats: :class:`posix.stat_result`\n\n :ivar start: Content Range start\n :ivar end: Content Range end\n :ivar size: Length of the content\n :ivar total: Total size identified by the :class:`posix.stat_result`\n instance\n :ivar ContentRangeHandler.headers: Content range header ``dict``\n \"\"\"\n\n __slots__ = (\"start\", \"end\", \"size\", \"total\", \"headers\")\n\n def __init__(self, request, stats):\n self.total = stats.st_size\n _range = request.headers.getone(\"range\", None)\n if _range is None:\n raise HeaderNotFound(\"Range Header Not Found\")\n unit, _, value = tuple(map(str.strip, _range.partition(\"=\")))\n if unit != \"bytes\":\n raise InvalidRangeType(\n \"%s is not a valid Range Type\" % (unit,), self\n )\n start_b, _, end_b = tuple(map(str.strip, value.partition(\"-\")))\n try:\n self.start = int(start_b) if start_b else None\n except ValueError:\n raise RangeNotSatisfiable(\n \"'%s' is invalid for Content Range\" % (start_b,), self\n )\n try:\n self.end = int(end_b) if end_b else None\n except ValueError:\n raise RangeNotSatisfiable(\n \"'%s' is invalid for Content Range\" % (end_b,), self\n )\n if self.end is None:\n if self.start is None:\n raise RangeNotSatisfiable(\n \"Invalid for Content Range parameters\", self\n )\n else:\n # this case represents `Content-Range: bytes 5-`\n self.end = self.total - 1\n else:\n if self.start is None:\n # this case represents `Content-Range: bytes -5`\n self.start = self.total - self.end\n self.end = self.total - 1\n if self.start >= self.end:\n raise RangeNotSatisfiable(\n \"Invalid for Content Range parameters\", self\n )\n self.size = self.end - self.start + 1\n self.headers = {\n \"Content-Range\": \"bytes %s-%s/%s\"\n % (self.start, self.end, self.total)\n }\n\n def __bool__(self):\n return self.size > 0\n", "path": "sanic/handlers.py"}]}
2,912
391
gh_patches_debug_23950
rasdani/github-patches
git_diff
cleanlab__cleanlab-514
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Ensure unit tests work with termcolor v2.1.0 tests/test_token_classification.py currently fails after termcolor just upgraded to v2.1.0, specifically `test_color_sentence` - [ ] update unit test code to make them pass with termcolor v2.1.0 - [ ] ensure new unit test code also works with older versions of termcolor pre v2.1.0 (suboptimal but ok if the unit test only works with versions post v2.0.0, as long as the package works with all termcolor versions currently supported). - [ ] remove version upper bound on termcolor if it has been added to package in the meantime. https://pypi.org/project/termcolor/ https://github.com/termcolor/termcolor/pull/25/files https://github.com/cleanlab/cleanlab/actions/runs/3357515340/jobs/5563372689 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cleanlab/internal/token_classification_utils.py` Content: ``` 1 # Copyright (C) 2017-2022 Cleanlab Inc. 2 # This file is part of cleanlab. 3 # 4 # cleanlab is free software: you can redistribute it and/or modify 5 # it under the terms of the GNU Affero General Public License as published 6 # by the Free Software Foundation, either version 3 of the License, or 7 # (at your option) any later version. 8 # 9 # cleanlab is distributed in the hope that it will be useful, 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 # GNU Affero General Public License for more details. 13 # 14 # You should have received a copy of the GNU Affero General Public License 15 # along with cleanlab. If not, see <https://www.gnu.org/licenses/>. 16 17 """ 18 Helper methods used internally in cleanlab.token_classification 19 """ 20 21 import re 22 import string 23 import numpy as np 24 from termcolor import colored 25 from typing import List, Optional, Callable, Tuple 26 27 28 def get_sentence(words: List[str]) -> str: 29 """ 30 Get sentence formed by a list of words with minor processing for readability 31 32 Parameters 33 ---------- 34 words: 35 list of word-level tokens 36 37 Returns 38 ---------- 39 sentence: 40 sentence formed by list of word-level tokens 41 42 Examples 43 -------- 44 >>> from cleanlab.internal.token_classification_utils import get_sentence 45 >>> words = ["This", "is", "a", "sentence", "."] 46 >>> get_sentence(words) 47 'This is a sentence.' 48 """ 49 sentence = "" 50 for word in words: 51 if word not in string.punctuation or word in ["-", "("]: 52 word = " " + word 53 sentence += word 54 sentence = sentence.replace(" '", "'").replace("( ", "(").strip() 55 return sentence 56 57 58 def filter_sentence( 59 sentences: List[str], 60 condition: Optional[Callable[[str], bool]] = None, 61 ) -> Tuple[List[str], List[bool]]: 62 """ 63 Filter sentence based on some condition, and returns filter mask 64 65 Parameters 66 ---------- 67 sentences: 68 list of sentences 69 70 condition: 71 sentence filtering condition 72 73 Returns 74 --------- 75 sentences: 76 list of sentences filtered 77 78 mask: 79 boolean mask such that `mask[i] == True` if the i'th sentence is included in the 80 filtered sentence, otherwise `mask[i] == False` 81 82 Examples 83 -------- 84 >>> from cleanlab.internal.token_classification_utils import filter_sentence 85 >>> sentences = ["Short sentence.", "This is a longer sentence."] 86 >>> condition = lambda x: len(x.split()) > 2 87 >>> long_sentences, _ = filter_sentence(sentences, condition) 88 >>> long_sentences 89 ['This is a longer sentence.'] 90 >>> document = ["# Headline", "Sentence 1.", "&", "Sentence 2."] 91 >>> sentences, mask = filter_sentence(document) 92 >>> sentences, mask 93 (['Sentence 1.', 'Sentence 2.'], [False, True, False, True]) 94 """ 95 if not condition: 96 condition = lambda sentence: len(sentence) > 1 and "#" not in sentence 97 mask = list(map(condition, sentences)) 98 sentences = [sentence for m, sentence in zip(mask, sentences) if m] 99 return sentences, mask 100 101 102 def process_token(token: str, replace: List[Tuple[str, str]] = [("#", "")]) -> str: 103 """ 104 Replaces special characters in the tokens 105 106 Parameters 107 ---------- 108 token: 109 token which potentially contains special characters 110 111 replace: 112 list of tuples `(s1, s2)`, where all occurances of s1 are replaced by s2 113 114 Returns 115 --------- 116 processed_token: 117 processed token whose special character has been replaced 118 119 Note 120 ---- 121 Only applies to characters in the original input token. 122 123 Examples 124 -------- 125 >>> from cleanlab.internal.token_classification_utils import process_token 126 >>> token = "#Comment" 127 >>> process_token("#Comment") 128 'Comment' 129 130 Specify custom replacement rules 131 132 >>> replace = [("C", "a"), ("a", "C")] 133 >>> process_token("Cleanlab", replace) 134 'aleCnlCb' 135 """ 136 replace_dict = {re.escape(k): v for (k, v) in replace} 137 pattern = "|".join(replace_dict.keys()) 138 compiled_pattern = re.compile(pattern) 139 replacement = lambda match: replace_dict[re.escape(match.group(0))] 140 processed_token = compiled_pattern.sub(replacement, token) 141 return processed_token 142 143 144 def mapping(entities: List[int], maps: List[int]) -> List[int]: 145 """ 146 Map a list of entities to its corresponding entities 147 148 Parameters 149 ---------- 150 entities: 151 a list of given entities 152 153 maps: 154 a list of mapped entities, such that the i'th indexed token should be mapped to `maps[i]` 155 156 Returns 157 --------- 158 mapped_entities: 159 a list of mapped entities 160 161 Examples 162 -------- 163 >>> unique_identities = [0, 1, 2, 3, 4] # ["O", "B-PER", "I-PER", "B-LOC", "I-LOC"] 164 >>> maps = [0, 1, 1, 2, 2] # ["O", "PER", "PER", "LOC", "LOC"] 165 >>> mapping(unique_identities, maps) 166 [0, 1, 1, 2, 2] # ["O", "PER", "PER", "LOC", "LOC"] 167 >>> mapping([0, 0, 4, 4, 3, 4, 0, 2], maps) 168 [0, 0, 2, 2, 2, 2, 0, 1] # ["O", "O", "LOC", "LOC", "LOC", "LOC", "O", "PER"] 169 """ 170 f = lambda x: maps[x] 171 return list(map(f, entities)) 172 173 174 def merge_probs(probs: np.ndarray, maps: List[int]) -> np.ndarray: 175 """ 176 Merges model-predictive probabilities with desired mapping 177 178 Parameters 179 ---------- 180 probs: 181 np.array of shape `(N, K)`, where N is the number of tokens, and K is the number of classes for the model 182 183 maps: 184 a list of mapped index, such that the probability of the token being in the i'th class is mapped to the 185 `maps[i]` index. If `maps[i] == -1`, the i'th column of `probs` is ignored. If `np.any(maps == -1)`, the 186 returned probability is re-normalized. 187 188 Returns 189 --------- 190 probs_merged: 191 np.array of shape ``(N, K')``, where `K` is the number of new classes. Probabilities are merged and 192 re-normalized if necessary. 193 194 Examples 195 -------- 196 >>> import numpy as np 197 >>> from cleanlab.internal.token_classification_utils import merge_probs 198 >>> probs = np.array([ 199 ... [0.55, 0.0125, 0.0375, 0.1, 0.3], 200 ... [0.1, 0.8, 0, 0.075, 0.025], 201 ... ]) 202 >>> maps = [0, 1, 1, 2, 2] 203 >>> merge_probs(probs, maps) 204 array([[0.55, 0.05, 0.4 ], 205 [0.1 , 0.8 , 0.1 ]]) 206 """ 207 old_classes = probs.shape[1] 208 map_size = np.max(maps) + 1 209 probs_merged = np.zeros([len(probs), map_size], dtype=probs.dtype.type) 210 211 for i in range(old_classes): 212 if maps[i] >= 0: 213 probs_merged[:, maps[i]] += probs[:, i] 214 if -1 in maps: 215 row_sums = probs_merged.sum(axis=1) 216 probs_merged /= row_sums[:, np.newaxis] 217 return probs_merged 218 219 220 def color_sentence(sentence: str, word: str) -> str: 221 """ 222 Searches for a given token in the sentence and returns the sentence where the given token is colored red 223 224 Parameters 225 ---------- 226 sentence: 227 a sentence where the word is searched 228 229 word: 230 keyword to find in `sentence`. Assumes the word exists in the sentence. 231 Returns 232 --------- 233 colored_sentence: 234 `sentence` where the every occurrence of the word is colored red, using ``termcolor.colored`` 235 236 Examples 237 -------- 238 >>> from cleanlab.internal.token_classification_utils import color_sentence 239 >>> sentence = "This is a sentence." 240 >>> word = "sentence" 241 >>> color_sentence(sentence, word) 242 'This is a \x1b[31msentence\x1b[0m.' 243 244 Also works for multiple occurrences of the word 245 246 >>> document = "This is a sentence. This is another sentence." 247 >>> word = "sentence" 248 >>> color_sentence(document, word) 249 'This is a \x1b[31msentence\x1b[0m. This is another \x1b[31msentence\x1b[0m.' 250 """ 251 colored_word = colored(word, "red") 252 colored_sentence, number_of_substitions = re.subn( 253 r"\b{}\b".format(re.escape(word)), colored_word, sentence 254 ) 255 if number_of_substitions == 0: 256 # Use basic string manipulation if regex fails 257 colored_sentence = sentence.replace(word, colored_word) 258 return colored_sentence 259 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cleanlab/internal/token_classification_utils.py b/cleanlab/internal/token_classification_utils.py --- a/cleanlab/internal/token_classification_utils.py +++ b/cleanlab/internal/token_classification_utils.py @@ -249,10 +249,35 @@ 'This is a \x1b[31msentence\x1b[0m. This is another \x1b[31msentence\x1b[0m.' """ colored_word = colored(word, "red") - colored_sentence, number_of_substitions = re.subn( - r"\b{}\b".format(re.escape(word)), colored_word, sentence + return _replace_sentence(sentence=sentence, word=word, new_word=colored_word) + + +def _replace_sentence(sentence: str, word: str, new_word: str) -> str: + """ + Searches for a given token in the sentence and returns the sentence where the given token has been replaced by + `new_word`. + + Parameters + ---------- + sentence: + a sentence where the word is searched + + word: + keyword to find in `sentence`. Assumes the word exists in the sentence. + + new_word: + the word to replace the keyword with + + Returns + --------- + new_sentence: + `sentence` where the every occurrence of the word is replaced by `colored_word` + """ + + new_sentence, number_of_substitions = re.subn( + r"\b{}\b".format(re.escape(word)), new_word, sentence ) if number_of_substitions == 0: # Use basic string manipulation if regex fails - colored_sentence = sentence.replace(word, colored_word) - return colored_sentence + new_sentence = sentence.replace(word, new_word) + return new_sentence
{"golden_diff": "diff --git a/cleanlab/internal/token_classification_utils.py b/cleanlab/internal/token_classification_utils.py\n--- a/cleanlab/internal/token_classification_utils.py\n+++ b/cleanlab/internal/token_classification_utils.py\n@@ -249,10 +249,35 @@\n 'This is a \\x1b[31msentence\\x1b[0m. This is another \\x1b[31msentence\\x1b[0m.'\n \"\"\"\n colored_word = colored(word, \"red\")\n- colored_sentence, number_of_substitions = re.subn(\n- r\"\\b{}\\b\".format(re.escape(word)), colored_word, sentence\n+ return _replace_sentence(sentence=sentence, word=word, new_word=colored_word)\n+\n+\n+def _replace_sentence(sentence: str, word: str, new_word: str) -> str:\n+ \"\"\"\n+ Searches for a given token in the sentence and returns the sentence where the given token has been replaced by\n+ `new_word`.\n+\n+ Parameters\n+ ----------\n+ sentence:\n+ a sentence where the word is searched\n+\n+ word:\n+ keyword to find in `sentence`. Assumes the word exists in the sentence.\n+\n+ new_word:\n+ the word to replace the keyword with\n+\n+ Returns\n+ ---------\n+ new_sentence:\n+ `sentence` where the every occurrence of the word is replaced by `colored_word`\n+ \"\"\"\n+\n+ new_sentence, number_of_substitions = re.subn(\n+ r\"\\b{}\\b\".format(re.escape(word)), new_word, sentence\n )\n if number_of_substitions == 0:\n # Use basic string manipulation if regex fails\n- colored_sentence = sentence.replace(word, colored_word)\n- return colored_sentence\n+ new_sentence = sentence.replace(word, new_word)\n+ return new_sentence\n", "issue": "Ensure unit tests\u00a0work with termcolor v2.1.0\ntests/test_token_classification.py currently fails after termcolor just upgraded to v2.1.0, specifically `test_color_sentence` \n\n- [ ] update unit test code to make them pass with termcolor v2.1.0\n- [ ] ensure new unit test code also works with older versions of termcolor pre v2.1.0 (suboptimal but ok if the unit test only works with versions post v2.0.0, as long as the package works with all termcolor versions currently supported).\n- [ ] remove version upper bound on termcolor if it has been added to package in the meantime.\n\nhttps://pypi.org/project/termcolor/\nhttps://github.com/termcolor/termcolor/pull/25/files\nhttps://github.com/cleanlab/cleanlab/actions/runs/3357515340/jobs/5563372689\n\n", "before_files": [{"content": "# Copyright (C) 2017-2022 Cleanlab Inc.\n# This file is part of cleanlab.\n#\n# cleanlab is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published\n# by the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# cleanlab is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with cleanlab. If not, see <https://www.gnu.org/licenses/>.\n\n\"\"\"\nHelper methods used internally in cleanlab.token_classification\n\"\"\"\n\nimport re\nimport string\nimport numpy as np\nfrom termcolor import colored\nfrom typing import List, Optional, Callable, Tuple\n\n\ndef get_sentence(words: List[str]) -> str:\n \"\"\"\n Get sentence formed by a list of words with minor processing for readability\n\n Parameters\n ----------\n words:\n list of word-level tokens\n\n Returns\n ----------\n sentence:\n sentence formed by list of word-level tokens\n\n Examples\n --------\n >>> from cleanlab.internal.token_classification_utils import get_sentence\n >>> words = [\"This\", \"is\", \"a\", \"sentence\", \".\"]\n >>> get_sentence(words)\n 'This is a sentence.'\n \"\"\"\n sentence = \"\"\n for word in words:\n if word not in string.punctuation or word in [\"-\", \"(\"]:\n word = \" \" + word\n sentence += word\n sentence = sentence.replace(\" '\", \"'\").replace(\"( \", \"(\").strip()\n return sentence\n\n\ndef filter_sentence(\n sentences: List[str],\n condition: Optional[Callable[[str], bool]] = None,\n) -> Tuple[List[str], List[bool]]:\n \"\"\"\n Filter sentence based on some condition, and returns filter mask\n\n Parameters\n ----------\n sentences:\n list of sentences\n\n condition:\n sentence filtering condition\n\n Returns\n ---------\n sentences:\n list of sentences filtered\n\n mask:\n boolean mask such that `mask[i] == True` if the i'th sentence is included in the\n filtered sentence, otherwise `mask[i] == False`\n\n Examples\n --------\n >>> from cleanlab.internal.token_classification_utils import filter_sentence\n >>> sentences = [\"Short sentence.\", \"This is a longer sentence.\"]\n >>> condition = lambda x: len(x.split()) > 2\n >>> long_sentences, _ = filter_sentence(sentences, condition)\n >>> long_sentences\n ['This is a longer sentence.']\n >>> document = [\"# Headline\", \"Sentence 1.\", \"&\", \"Sentence 2.\"]\n >>> sentences, mask = filter_sentence(document)\n >>> sentences, mask\n (['Sentence 1.', 'Sentence 2.'], [False, True, False, True])\n \"\"\"\n if not condition:\n condition = lambda sentence: len(sentence) > 1 and \"#\" not in sentence\n mask = list(map(condition, sentences))\n sentences = [sentence for m, sentence in zip(mask, sentences) if m]\n return sentences, mask\n\n\ndef process_token(token: str, replace: List[Tuple[str, str]] = [(\"#\", \"\")]) -> str:\n \"\"\"\n Replaces special characters in the tokens\n\n Parameters\n ----------\n token:\n token which potentially contains special characters\n\n replace:\n list of tuples `(s1, s2)`, where all occurances of s1 are replaced by s2\n\n Returns\n ---------\n processed_token:\n processed token whose special character has been replaced\n\n Note\n ----\n Only applies to characters in the original input token.\n\n Examples\n --------\n >>> from cleanlab.internal.token_classification_utils import process_token\n >>> token = \"#Comment\"\n >>> process_token(\"#Comment\")\n 'Comment'\n\n Specify custom replacement rules\n\n >>> replace = [(\"C\", \"a\"), (\"a\", \"C\")]\n >>> process_token(\"Cleanlab\", replace)\n 'aleCnlCb'\n \"\"\"\n replace_dict = {re.escape(k): v for (k, v) in replace}\n pattern = \"|\".join(replace_dict.keys())\n compiled_pattern = re.compile(pattern)\n replacement = lambda match: replace_dict[re.escape(match.group(0))]\n processed_token = compiled_pattern.sub(replacement, token)\n return processed_token\n\n\ndef mapping(entities: List[int], maps: List[int]) -> List[int]:\n \"\"\"\n Map a list of entities to its corresponding entities\n\n Parameters\n ----------\n entities:\n a list of given entities\n\n maps:\n a list of mapped entities, such that the i'th indexed token should be mapped to `maps[i]`\n\n Returns\n ---------\n mapped_entities:\n a list of mapped entities\n\n Examples\n --------\n >>> unique_identities = [0, 1, 2, 3, 4] # [\"O\", \"B-PER\", \"I-PER\", \"B-LOC\", \"I-LOC\"]\n >>> maps = [0, 1, 1, 2, 2] # [\"O\", \"PER\", \"PER\", \"LOC\", \"LOC\"]\n >>> mapping(unique_identities, maps)\n [0, 1, 1, 2, 2] # [\"O\", \"PER\", \"PER\", \"LOC\", \"LOC\"]\n >>> mapping([0, 0, 4, 4, 3, 4, 0, 2], maps)\n [0, 0, 2, 2, 2, 2, 0, 1] # [\"O\", \"O\", \"LOC\", \"LOC\", \"LOC\", \"LOC\", \"O\", \"PER\"]\n \"\"\"\n f = lambda x: maps[x]\n return list(map(f, entities))\n\n\ndef merge_probs(probs: np.ndarray, maps: List[int]) -> np.ndarray:\n \"\"\"\n Merges model-predictive probabilities with desired mapping\n\n Parameters\n ----------\n probs:\n np.array of shape `(N, K)`, where N is the number of tokens, and K is the number of classes for the model\n\n maps:\n a list of mapped index, such that the probability of the token being in the i'th class is mapped to the\n `maps[i]` index. If `maps[i] == -1`, the i'th column of `probs` is ignored. If `np.any(maps == -1)`, the\n returned probability is re-normalized.\n\n Returns\n ---------\n probs_merged:\n np.array of shape ``(N, K')``, where `K` is the number of new classes. Probabilities are merged and\n re-normalized if necessary.\n\n Examples\n --------\n >>> import numpy as np\n >>> from cleanlab.internal.token_classification_utils import merge_probs\n >>> probs = np.array([\n ... [0.55, 0.0125, 0.0375, 0.1, 0.3],\n ... [0.1, 0.8, 0, 0.075, 0.025],\n ... ])\n >>> maps = [0, 1, 1, 2, 2]\n >>> merge_probs(probs, maps)\n array([[0.55, 0.05, 0.4 ],\n [0.1 , 0.8 , 0.1 ]])\n \"\"\"\n old_classes = probs.shape[1]\n map_size = np.max(maps) + 1\n probs_merged = np.zeros([len(probs), map_size], dtype=probs.dtype.type)\n\n for i in range(old_classes):\n if maps[i] >= 0:\n probs_merged[:, maps[i]] += probs[:, i]\n if -1 in maps:\n row_sums = probs_merged.sum(axis=1)\n probs_merged /= row_sums[:, np.newaxis]\n return probs_merged\n\n\ndef color_sentence(sentence: str, word: str) -> str:\n \"\"\"\n Searches for a given token in the sentence and returns the sentence where the given token is colored red\n\n Parameters\n ----------\n sentence:\n a sentence where the word is searched\n\n word:\n keyword to find in `sentence`. Assumes the word exists in the sentence.\n Returns\n ---------\n colored_sentence:\n `sentence` where the every occurrence of the word is colored red, using ``termcolor.colored``\n\n Examples\n --------\n >>> from cleanlab.internal.token_classification_utils import color_sentence\n >>> sentence = \"This is a sentence.\"\n >>> word = \"sentence\"\n >>> color_sentence(sentence, word)\n 'This is a \\x1b[31msentence\\x1b[0m.'\n\n Also works for multiple occurrences of the word\n\n >>> document = \"This is a sentence. This is another sentence.\"\n >>> word = \"sentence\"\n >>> color_sentence(document, word)\n 'This is a \\x1b[31msentence\\x1b[0m. This is another \\x1b[31msentence\\x1b[0m.'\n \"\"\"\n colored_word = colored(word, \"red\")\n colored_sentence, number_of_substitions = re.subn(\n r\"\\b{}\\b\".format(re.escape(word)), colored_word, sentence\n )\n if number_of_substitions == 0:\n # Use basic string manipulation if regex fails\n colored_sentence = sentence.replace(word, colored_word)\n return colored_sentence\n", "path": "cleanlab/internal/token_classification_utils.py"}], "after_files": [{"content": "# Copyright (C) 2017-2022 Cleanlab Inc.\n# This file is part of cleanlab.\n#\n# cleanlab is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published\n# by the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# cleanlab is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with cleanlab. If not, see <https://www.gnu.org/licenses/>.\n\n\"\"\"\nHelper methods used internally in cleanlab.token_classification\n\"\"\"\n\nimport re\nimport string\nimport numpy as np\nfrom termcolor import colored\nfrom typing import List, Optional, Callable, Tuple\n\n\ndef get_sentence(words: List[str]) -> str:\n \"\"\"\n Get sentence formed by a list of words with minor processing for readability\n\n Parameters\n ----------\n words:\n list of word-level tokens\n\n Returns\n ----------\n sentence:\n sentence formed by list of word-level tokens\n\n Examples\n --------\n >>> from cleanlab.internal.token_classification_utils import get_sentence\n >>> words = [\"This\", \"is\", \"a\", \"sentence\", \".\"]\n >>> get_sentence(words)\n 'This is a sentence.'\n \"\"\"\n sentence = \"\"\n for word in words:\n if word not in string.punctuation or word in [\"-\", \"(\"]:\n word = \" \" + word\n sentence += word\n sentence = sentence.replace(\" '\", \"'\").replace(\"( \", \"(\").strip()\n return sentence\n\n\ndef filter_sentence(\n sentences: List[str],\n condition: Optional[Callable[[str], bool]] = None,\n) -> Tuple[List[str], List[bool]]:\n \"\"\"\n Filter sentence based on some condition, and returns filter mask\n\n Parameters\n ----------\n sentences:\n list of sentences\n\n condition:\n sentence filtering condition\n\n Returns\n ---------\n sentences:\n list of sentences filtered\n\n mask:\n boolean mask such that `mask[i] == True` if the i'th sentence is included in the\n filtered sentence, otherwise `mask[i] == False`\n\n Examples\n --------\n >>> from cleanlab.internal.token_classification_utils import filter_sentence\n >>> sentences = [\"Short sentence.\", \"This is a longer sentence.\"]\n >>> condition = lambda x: len(x.split()) > 2\n >>> long_sentences, _ = filter_sentence(sentences, condition)\n >>> long_sentences\n ['This is a longer sentence.']\n >>> document = [\"# Headline\", \"Sentence 1.\", \"&\", \"Sentence 2.\"]\n >>> sentences, mask = filter_sentence(document)\n >>> sentences, mask\n (['Sentence 1.', 'Sentence 2.'], [False, True, False, True])\n \"\"\"\n if not condition:\n condition = lambda sentence: len(sentence) > 1 and \"#\" not in sentence\n mask = list(map(condition, sentences))\n sentences = [sentence for m, sentence in zip(mask, sentences) if m]\n return sentences, mask\n\n\ndef process_token(token: str, replace: List[Tuple[str, str]] = [(\"#\", \"\")]) -> str:\n \"\"\"\n Replaces special characters in the tokens\n\n Parameters\n ----------\n token:\n token which potentially contains special characters\n\n replace:\n list of tuples `(s1, s2)`, where all occurances of s1 are replaced by s2\n\n Returns\n ---------\n processed_token:\n processed token whose special character has been replaced\n\n Note\n ----\n Only applies to characters in the original input token.\n\n Examples\n --------\n >>> from cleanlab.internal.token_classification_utils import process_token\n >>> token = \"#Comment\"\n >>> process_token(\"#Comment\")\n 'Comment'\n\n Specify custom replacement rules\n\n >>> replace = [(\"C\", \"a\"), (\"a\", \"C\")]\n >>> process_token(\"Cleanlab\", replace)\n 'aleCnlCb'\n \"\"\"\n replace_dict = {re.escape(k): v for (k, v) in replace}\n pattern = \"|\".join(replace_dict.keys())\n compiled_pattern = re.compile(pattern)\n replacement = lambda match: replace_dict[re.escape(match.group(0))]\n processed_token = compiled_pattern.sub(replacement, token)\n return processed_token\n\n\ndef mapping(entities: List[int], maps: List[int]) -> List[int]:\n \"\"\"\n Map a list of entities to its corresponding entities\n\n Parameters\n ----------\n entities:\n a list of given entities\n\n maps:\n a list of mapped entities, such that the i'th indexed token should be mapped to `maps[i]`\n\n Returns\n ---------\n mapped_entities:\n a list of mapped entities\n\n Examples\n --------\n >>> unique_identities = [0, 1, 2, 3, 4] # [\"O\", \"B-PER\", \"I-PER\", \"B-LOC\", \"I-LOC\"]\n >>> maps = [0, 1, 1, 2, 2] # [\"O\", \"PER\", \"PER\", \"LOC\", \"LOC\"]\n >>> mapping(unique_identities, maps)\n [0, 1, 1, 2, 2] # [\"O\", \"PER\", \"PER\", \"LOC\", \"LOC\"]\n >>> mapping([0, 0, 4, 4, 3, 4, 0, 2], maps)\n [0, 0, 2, 2, 2, 2, 0, 1] # [\"O\", \"O\", \"LOC\", \"LOC\", \"LOC\", \"LOC\", \"O\", \"PER\"]\n \"\"\"\n f = lambda x: maps[x]\n return list(map(f, entities))\n\n\ndef merge_probs(probs: np.ndarray, maps: List[int]) -> np.ndarray:\n \"\"\"\n Merges model-predictive probabilities with desired mapping\n\n Parameters\n ----------\n probs:\n np.array of shape `(N, K)`, where N is the number of tokens, and K is the number of classes for the model\n\n maps:\n a list of mapped index, such that the probability of the token being in the i'th class is mapped to the\n `maps[i]` index. If `maps[i] == -1`, the i'th column of `probs` is ignored. If `np.any(maps == -1)`, the\n returned probability is re-normalized.\n\n Returns\n ---------\n probs_merged:\n np.array of shape ``(N, K')``, where `K` is the number of new classes. Probabilities are merged and\n re-normalized if necessary.\n\n Examples\n --------\n >>> import numpy as np\n >>> from cleanlab.internal.token_classification_utils import merge_probs\n >>> probs = np.array([\n ... [0.55, 0.0125, 0.0375, 0.1, 0.3],\n ... [0.1, 0.8, 0, 0.075, 0.025],\n ... ])\n >>> maps = [0, 1, 1, 2, 2]\n >>> merge_probs(probs, maps)\n array([[0.55, 0.05, 0.4 ],\n [0.1 , 0.8 , 0.1 ]])\n \"\"\"\n old_classes = probs.shape[1]\n map_size = np.max(maps) + 1\n probs_merged = np.zeros([len(probs), map_size], dtype=probs.dtype.type)\n\n for i in range(old_classes):\n if maps[i] >= 0:\n probs_merged[:, maps[i]] += probs[:, i]\n if -1 in maps:\n row_sums = probs_merged.sum(axis=1)\n probs_merged /= row_sums[:, np.newaxis]\n return probs_merged\n\n\ndef color_sentence(sentence: str, word: str) -> str:\n \"\"\"\n Searches for a given token in the sentence and returns the sentence where the given token is colored red\n\n Parameters\n ----------\n sentence:\n a sentence where the word is searched\n\n word:\n keyword to find in `sentence`. Assumes the word exists in the sentence.\n Returns\n ---------\n colored_sentence:\n `sentence` where the every occurrence of the word is colored red, using ``termcolor.colored``\n\n Examples\n --------\n >>> from cleanlab.internal.token_classification_utils import color_sentence\n >>> sentence = \"This is a sentence.\"\n >>> word = \"sentence\"\n >>> color_sentence(sentence, word)\n 'This is a \\x1b[31msentence\\x1b[0m.'\n\n Also works for multiple occurrences of the word\n\n >>> document = \"This is a sentence. This is another sentence.\"\n >>> word = \"sentence\"\n >>> color_sentence(document, word)\n 'This is a \\x1b[31msentence\\x1b[0m. This is another \\x1b[31msentence\\x1b[0m.'\n \"\"\"\n colored_word = colored(word, \"red\")\n return _replace_sentence(sentence=sentence, word=word, new_word=colored_word)\n\n\ndef _replace_sentence(sentence: str, word: str, new_word: str) -> str:\n \"\"\"\n Searches for a given token in the sentence and returns the sentence where the given token has been replaced by\n `new_word`.\n\n Parameters\n ----------\n sentence:\n a sentence where the word is searched\n\n word:\n keyword to find in `sentence`. Assumes the word exists in the sentence.\n\n new_word:\n the word to replace the keyword with\n\n Returns\n ---------\n new_sentence:\n `sentence` where the every occurrence of the word is replaced by `colored_word`\n \"\"\"\n\n new_sentence, number_of_substitions = re.subn(\n r\"\\b{}\\b\".format(re.escape(word)), new_word, sentence\n )\n if number_of_substitions == 0:\n # Use basic string manipulation if regex fails\n new_sentence = sentence.replace(word, new_word)\n return new_sentence\n", "path": "cleanlab/internal/token_classification_utils.py"}]}
3,261
411
gh_patches_debug_10644
rasdani/github-patches
git_diff
strawberry-graphql__strawberry-2593
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Provide better error for Union of scalars. This keeps coming from questions on discord ```py union: Union[List[int], List[str]] ``` gives something like ```console File "/root/backend/env/lib/python3.10/site-packages/strawberry/schema/name_converter.py", line 99, in from_union assert hasattr(type_, "_type_definition") AssertionError The above exception was the direct cause of the following exception: File "/root/backend/env/lib/python3.10/site-packages/graphql/type/definition.py", line 811, in fields raise cls(f"{self.name} fields cannot be resolved. {error}") from error TypeError: Query fields cannot be resolved ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `strawberry/exceptions/invalid_union_type.py` Content: ``` 1 from __future__ import annotations 2 3 from inspect import getframeinfo, stack 4 from pathlib import Path 5 from typing import TYPE_CHECKING, Optional, Type 6 7 from strawberry.exceptions.utils.source_finder import SourceFinder 8 from strawberry.utils.cached_property import cached_property 9 10 from .exception import StrawberryException 11 12 if TYPE_CHECKING: 13 from strawberry.union import StrawberryUnion 14 15 from .exception_source import ExceptionSource 16 17 18 class InvalidUnionTypeError(StrawberryException): 19 """The union is constructed with an invalid type""" 20 21 invalid_type: object 22 23 def __init__(self, union_name: str, invalid_type: object) -> None: 24 from strawberry.custom_scalar import ScalarWrapper 25 26 self.union_name = union_name 27 self.invalid_type = invalid_type 28 29 # assuming that the exception happens two stack frames above the current one. 30 # one is our code checking for invalid types, the other is the caller 31 self.frame = getframeinfo(stack()[2][0]) 32 33 if isinstance(invalid_type, ScalarWrapper): 34 type_name = invalid_type.wrap.__name__ 35 else: 36 type_name = invalid_type.__name__ # type: ignore 37 38 self.message = f"Type `{type_name}` cannot be used in a GraphQL Union" 39 self.rich_message = ( 40 f"Type `[underline]{type_name}[/]` cannot be used in a GraphQL Union" 41 ) 42 self.suggestion = ( 43 "To fix this error you should replace the type a strawberry.type" 44 ) 45 self.annotation_message = "invalid type here" 46 47 @cached_property 48 def exception_source(self) -> Optional[ExceptionSource]: 49 path = Path(self.frame.filename) 50 51 source_finder = SourceFinder() 52 53 return source_finder.find_union_call(path, self.union_name, self.invalid_type) 54 55 56 class InvalidTypeForUnionMergeError(StrawberryException): 57 """A specialized version of InvalidUnionTypeError for when trying 58 to merge unions using the pipe operator.""" 59 60 invalid_type: Type 61 62 def __init__(self, union: StrawberryUnion, other: object) -> None: 63 self.union = union 64 self.other = other 65 66 # assuming that the exception happens two stack frames above the current one. 67 # one is our code checking for invalid types, the other is the caller 68 self.frame = getframeinfo(stack()[2][0]) 69 70 other_name = getattr(other, "__name__", str(other)) 71 72 self.message = f"`{other_name}` cannot be used when merging GraphQL Unions" 73 self.rich_message = ( 74 f"`[underline]{other_name}[/]` cannot be used when merging GraphQL Unions" 75 ) 76 self.suggestion = "" 77 self.annotation_message = "invalid type here" 78 79 @cached_property 80 def exception_source(self) -> Optional[ExceptionSource]: 81 source_finder = SourceFinder() 82 83 return source_finder.find_union_merge(self.union, self.other, frame=self.frame) 84 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/strawberry/exceptions/invalid_union_type.py b/strawberry/exceptions/invalid_union_type.py --- a/strawberry/exceptions/invalid_union_type.py +++ b/strawberry/exceptions/invalid_union_type.py @@ -33,7 +33,11 @@ if isinstance(invalid_type, ScalarWrapper): type_name = invalid_type.wrap.__name__ else: - type_name = invalid_type.__name__ # type: ignore + try: + type_name = invalid_type.__name__ # type: ignore + except AttributeError: + # might be StrawberryList instance + type_name = invalid_type.__class__.__name__ self.message = f"Type `{type_name}` cannot be used in a GraphQL Union" self.rich_message = (
{"golden_diff": "diff --git a/strawberry/exceptions/invalid_union_type.py b/strawberry/exceptions/invalid_union_type.py\n--- a/strawberry/exceptions/invalid_union_type.py\n+++ b/strawberry/exceptions/invalid_union_type.py\n@@ -33,7 +33,11 @@\n if isinstance(invalid_type, ScalarWrapper):\n type_name = invalid_type.wrap.__name__\n else:\n- type_name = invalid_type.__name__ # type: ignore\n+ try:\n+ type_name = invalid_type.__name__ # type: ignore\n+ except AttributeError:\n+ # might be StrawberryList instance\n+ type_name = invalid_type.__class__.__name__\n \n self.message = f\"Type `{type_name}` cannot be used in a GraphQL Union\"\n self.rich_message = (\n", "issue": "Provide better error for Union of scalars.\nThis keeps coming from questions on discord\r\n```py\r\nunion: Union[List[int], List[str]]\r\n```\r\ngives something like\r\n```console\r\n File \"/root/backend/env/lib/python3.10/site-packages/strawberry/schema/name_converter.py\", line 99, in from_union\r\n assert hasattr(type_, \"_type_definition\")\r\nAssertionError\r\n\r\nThe above exception was the direct cause of the following exception:\r\n File \"/root/backend/env/lib/python3.10/site-packages/graphql/type/definition.py\", line 811, in fields\r\n raise cls(f\"{self.name} fields cannot be resolved. {error}\") from error\r\nTypeError: Query fields cannot be resolved\r\n```\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom inspect import getframeinfo, stack\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Optional, Type\n\nfrom strawberry.exceptions.utils.source_finder import SourceFinder\nfrom strawberry.utils.cached_property import cached_property\n\nfrom .exception import StrawberryException\n\nif TYPE_CHECKING:\n from strawberry.union import StrawberryUnion\n\n from .exception_source import ExceptionSource\n\n\nclass InvalidUnionTypeError(StrawberryException):\n \"\"\"The union is constructed with an invalid type\"\"\"\n\n invalid_type: object\n\n def __init__(self, union_name: str, invalid_type: object) -> None:\n from strawberry.custom_scalar import ScalarWrapper\n\n self.union_name = union_name\n self.invalid_type = invalid_type\n\n # assuming that the exception happens two stack frames above the current one.\n # one is our code checking for invalid types, the other is the caller\n self.frame = getframeinfo(stack()[2][0])\n\n if isinstance(invalid_type, ScalarWrapper):\n type_name = invalid_type.wrap.__name__\n else:\n type_name = invalid_type.__name__ # type: ignore\n\n self.message = f\"Type `{type_name}` cannot be used in a GraphQL Union\"\n self.rich_message = (\n f\"Type `[underline]{type_name}[/]` cannot be used in a GraphQL Union\"\n )\n self.suggestion = (\n \"To fix this error you should replace the type a strawberry.type\"\n )\n self.annotation_message = \"invalid type here\"\n\n @cached_property\n def exception_source(self) -> Optional[ExceptionSource]:\n path = Path(self.frame.filename)\n\n source_finder = SourceFinder()\n\n return source_finder.find_union_call(path, self.union_name, self.invalid_type)\n\n\nclass InvalidTypeForUnionMergeError(StrawberryException):\n \"\"\"A specialized version of InvalidUnionTypeError for when trying\n to merge unions using the pipe operator.\"\"\"\n\n invalid_type: Type\n\n def __init__(self, union: StrawberryUnion, other: object) -> None:\n self.union = union\n self.other = other\n\n # assuming that the exception happens two stack frames above the current one.\n # one is our code checking for invalid types, the other is the caller\n self.frame = getframeinfo(stack()[2][0])\n\n other_name = getattr(other, \"__name__\", str(other))\n\n self.message = f\"`{other_name}` cannot be used when merging GraphQL Unions\"\n self.rich_message = (\n f\"`[underline]{other_name}[/]` cannot be used when merging GraphQL Unions\"\n )\n self.suggestion = \"\"\n self.annotation_message = \"invalid type here\"\n\n @cached_property\n def exception_source(self) -> Optional[ExceptionSource]:\n source_finder = SourceFinder()\n\n return source_finder.find_union_merge(self.union, self.other, frame=self.frame)\n", "path": "strawberry/exceptions/invalid_union_type.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom inspect import getframeinfo, stack\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Optional, Type\n\nfrom strawberry.exceptions.utils.source_finder import SourceFinder\nfrom strawberry.utils.cached_property import cached_property\n\nfrom .exception import StrawberryException\n\nif TYPE_CHECKING:\n from strawberry.union import StrawberryUnion\n\n from .exception_source import ExceptionSource\n\n\nclass InvalidUnionTypeError(StrawberryException):\n \"\"\"The union is constructed with an invalid type\"\"\"\n\n invalid_type: object\n\n def __init__(self, union_name: str, invalid_type: object) -> None:\n from strawberry.custom_scalar import ScalarWrapper\n\n self.union_name = union_name\n self.invalid_type = invalid_type\n\n # assuming that the exception happens two stack frames above the current one.\n # one is our code checking for invalid types, the other is the caller\n self.frame = getframeinfo(stack()[2][0])\n\n if isinstance(invalid_type, ScalarWrapper):\n type_name = invalid_type.wrap.__name__\n else:\n try:\n type_name = invalid_type.__name__ # type: ignore\n except AttributeError:\n # might be StrawberryList instance\n type_name = invalid_type.__class__.__name__\n\n self.message = f\"Type `{type_name}` cannot be used in a GraphQL Union\"\n self.rich_message = (\n f\"Type `[underline]{type_name}[/]` cannot be used in a GraphQL Union\"\n )\n self.suggestion = (\n \"To fix this error you should replace the type a strawberry.type\"\n )\n self.annotation_message = \"invalid type here\"\n\n @cached_property\n def exception_source(self) -> Optional[ExceptionSource]:\n path = Path(self.frame.filename)\n\n source_finder = SourceFinder()\n\n return source_finder.find_union_call(path, self.union_name, self.invalid_type)\n\n\nclass InvalidTypeForUnionMergeError(StrawberryException):\n \"\"\"A specialized version of InvalidUnionTypeError for when trying\n to merge unions using the pipe operator.\"\"\"\n\n invalid_type: Type\n\n def __init__(self, union: StrawberryUnion, other: object) -> None:\n self.union = union\n self.other = other\n\n # assuming that the exception happens two stack frames above the current one.\n # one is our code checking for invalid types, the other is the caller\n self.frame = getframeinfo(stack()[2][0])\n\n other_name = getattr(other, \"__name__\", str(other))\n\n self.message = f\"`{other_name}` cannot be used when merging GraphQL Unions\"\n self.rich_message = (\n f\"`[underline]{other_name}[/]` cannot be used when merging GraphQL Unions\"\n )\n self.suggestion = \"\"\n self.annotation_message = \"invalid type here\"\n\n @cached_property\n def exception_source(self) -> Optional[ExceptionSource]:\n source_finder = SourceFinder()\n\n return source_finder.find_union_merge(self.union, self.other, frame=self.frame)\n", "path": "strawberry/exceptions/invalid_union_type.py"}]}
1,203
181
gh_patches_debug_32470
rasdani/github-patches
git_diff
cocotb__cocotb-3568
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Thoughts on adding git rev to dev version numbers? How would everyone feel about adding `git rev-parse --short HEAD` to `__version__`? Currently it is `2.0.0.dev0` and I'm not sure what the final `0` is for, but I'm guessing it's to manually increment as one feels like? What if instead it were `2.0.0.dev-d379318e`? I'd propose that we build the file on the fly in `setup.py` but only add the git rev when the non-suffixed version (stored as a variable in `setup.py`) includes `dev`. I can take a stab at this if people are onboard. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/cocotb/_version.py` Content: ``` 1 # Package versioning solution originally found here: 2 # http://stackoverflow.com/q/458550 3 4 # Store the version here so: 5 # 1) we don't load dependencies by storing it in __init__.py 6 # 2) we can import it in setup.py for the same reason 7 # 3) we can import it into your module 8 __version__ = "2.0.0.dev0" 9 ``` Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 ############################################################################### 3 # Copyright (c) 2013 Potential Ventures Ltd 4 # Copyright (c) 2013 SolarFlare Communications Inc 5 # All rights reserved. 6 # 7 # Redistribution and use in source and binary forms, with or without 8 # modification, are permitted provided that the following conditions are met: 9 # * Redistributions of source code must retain the above copyright 10 # notice, this list of conditions and the following disclaimer. 11 # * Redistributions in binary form must reproduce the above copyright 12 # notice, this list of conditions and the following disclaimer in the 13 # documentation and/or other materials provided with the distribution. 14 # * Neither the name of Potential Ventures Ltd, 15 # SolarFlare Communications Inc nor the 16 # names of its contributors may be used to endorse or promote products 17 # derived from this software without specific prior written permission. 18 # 19 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 20 # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 21 # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 22 # DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY 23 # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 24 # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 25 # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 26 # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 28 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 ############################################################################### 30 31 import sys 32 33 if sys.version_info[:2] < (3, 6): # noqa: UP036 | bug in ruff 34 msg = [ 35 "This version of cocotb requires at least Python 3.6,", 36 "you are running Python %d.%d.%d." 37 % (sys.version_info[0], sys.version_info[1], sys.version_info[2]), 38 ] 39 msg += [ 40 "For more information please refer to the documentation at ", 41 "https://cocotb.readthedocs.io.", 42 ] 43 44 raise SystemExit("\n".join(msg)) 45 46 import logging 47 from io import StringIO 48 from os import path, walk 49 50 from setuptools import find_packages, setup 51 52 # Note: cocotb is not installed properly yet and is missing dependencies and binaries 53 # We can still import other files next to setup.py, as long as they're in MANIFEST.in 54 # The below line is necessary for PEP517 support 55 sys.path.append(path.dirname(__file__)) 56 from cocotb_build_libs import build_ext, get_ext # noqa: E402 57 58 59 def read_file(fname): 60 with open(path.join(path.dirname(__file__), fname), encoding="utf8") as f: 61 return f.read() 62 63 64 def package_files(directory): 65 paths = [] 66 for fpath, directories, filenames in walk(directory): 67 for filename in filenames: 68 paths.append(path.join("..", "..", fpath, filename)) 69 return paths 70 71 72 # this sets the __version__ variable 73 exec(read_file(path.join("src", "cocotb", "_version.py"))) 74 75 # store log from build_libs and display at the end in verbose mode 76 # see https://github.com/pypa/pip/issues/6634 77 log_stream = StringIO() 78 handler = logging.StreamHandler(log_stream) 79 log = logging.getLogger("cocotb._build_libs") 80 log.setLevel(logging.INFO) 81 log.addHandler(handler) 82 83 setup( 84 name="cocotb", 85 cmdclass={"build_ext": build_ext}, 86 version=__version__, # noqa: F821 87 description="cocotb is a coroutine based cosimulation library for writing VHDL and Verilog testbenches in Python.", 88 url="https://www.cocotb.org", 89 license="BSD", 90 long_description=read_file("README.md"), 91 long_description_content_type="text/markdown", 92 author="Chris Higgs, Stuart Hodgson", 93 maintainer="cocotb contributors", 94 maintainer_email="[email protected]", 95 install_requires=[ 96 "find_libpython", 97 ], 98 python_requires=">=3.6", 99 packages=find_packages(where="src"), 100 package_dir={"": "src"}, 101 package_data={ 102 "cocotb": ( 103 package_files("src/cocotb/share/makefiles") 104 + package_files("src/cocotb/share/include") # noqa: W504 105 + package_files("src/cocotb/share/def") # noqa: W504 106 + package_files("src/cocotb/share/lib/verilator") # noqa: W504 107 ) 108 }, 109 ext_modules=get_ext(), 110 entry_points={ 111 "console_scripts": [ 112 "cocotb-config=cocotb.config:main", 113 ] 114 }, 115 platforms="any", 116 classifiers=[ 117 "Programming Language :: Python :: 3", 118 "Programming Language :: Python :: 3.6", 119 "Programming Language :: Python :: 3.7", 120 "Programming Language :: Python :: 3.8", 121 "Programming Language :: Python :: 3.9", 122 "Programming Language :: Python :: 3.10", 123 "Programming Language :: Python :: 3.11", 124 "License :: OSI Approved :: BSD License", 125 "Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)", 126 "Framework :: cocotb", 127 ], 128 # these appear in the sidebar on PyPI 129 project_urls={ 130 "Bug Tracker": "https://github.com/cocotb/cocotb/issues", 131 "Source Code": "https://github.com/cocotb/cocotb", 132 "Documentation": "https://docs.cocotb.org", 133 }, 134 ) 135 136 print(log_stream.getvalue()) 137 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -44,6 +44,7 @@ raise SystemExit("\n".join(msg)) import logging +import subprocess from io import StringIO from os import path, walk @@ -69,8 +70,26 @@ return paths -# this sets the __version__ variable -exec(read_file(path.join("src", "cocotb", "_version.py"))) +version_file_path = path.join("src", "cocotb", "_version.py") +__version__ = "2.0.0.dev0" +if "dev" in __version__: + try: + rev = subprocess.check_output( + ["git", "rev-parse", "--short", "HEAD"], universal_newlines=True + ).strip() + __version__ += f"+{rev}" + except Exception as e: + # if this is not a git repository and _version.py already exists, + # we are probably installing from an sdist, so use the existing _version.py + if path.exists(version_file_path): + exec(read_file(version_file_path)) + else: + print(e, file=sys.stderr) +with open(version_file_path, "w") as f: + f.write("# Package version\n") + f.write("# Generated by setup.py -- do not modify directly\n\n") + f.write(f'__version__ = "{__version__}"') + # store log from build_libs and display at the end in verbose mode # see https://github.com/pypa/pip/issues/6634 @@ -83,7 +102,7 @@ setup( name="cocotb", cmdclass={"build_ext": build_ext}, - version=__version__, # noqa: F821 + version=__version__, description="cocotb is a coroutine based cosimulation library for writing VHDL and Verilog testbenches in Python.", url="https://www.cocotb.org", license="BSD", diff --git a/src/cocotb/_version.py b/src/cocotb/_version.py deleted file mode 100644 --- a/src/cocotb/_version.py +++ /dev/null @@ -1,8 +0,0 @@ -# Package versioning solution originally found here: -# http://stackoverflow.com/q/458550 - -# Store the version here so: -# 1) we don't load dependencies by storing it in __init__.py -# 2) we can import it in setup.py for the same reason -# 3) we can import it into your module -__version__ = "2.0.0.dev0"
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -44,6 +44,7 @@\n raise SystemExit(\"\\n\".join(msg))\n \n import logging\n+import subprocess\n from io import StringIO\n from os import path, walk\n \n@@ -69,8 +70,26 @@\n return paths\n \n \n-# this sets the __version__ variable\n-exec(read_file(path.join(\"src\", \"cocotb\", \"_version.py\")))\n+version_file_path = path.join(\"src\", \"cocotb\", \"_version.py\")\n+__version__ = \"2.0.0.dev0\"\n+if \"dev\" in __version__:\n+ try:\n+ rev = subprocess.check_output(\n+ [\"git\", \"rev-parse\", \"--short\", \"HEAD\"], universal_newlines=True\n+ ).strip()\n+ __version__ += f\"+{rev}\"\n+ except Exception as e:\n+ # if this is not a git repository and _version.py already exists,\n+ # we are probably installing from an sdist, so use the existing _version.py\n+ if path.exists(version_file_path):\n+ exec(read_file(version_file_path))\n+ else:\n+ print(e, file=sys.stderr)\n+with open(version_file_path, \"w\") as f:\n+ f.write(\"# Package version\\n\")\n+ f.write(\"# Generated by setup.py -- do not modify directly\\n\\n\")\n+ f.write(f'__version__ = \"{__version__}\"')\n+\n \n # store log from build_libs and display at the end in verbose mode\n # see https://github.com/pypa/pip/issues/6634\n@@ -83,7 +102,7 @@\n setup(\n name=\"cocotb\",\n cmdclass={\"build_ext\": build_ext},\n- version=__version__, # noqa: F821\n+ version=__version__,\n description=\"cocotb is a coroutine based cosimulation library for writing VHDL and Verilog testbenches in Python.\",\n url=\"https://www.cocotb.org\",\n license=\"BSD\",\ndiff --git a/src/cocotb/_version.py b/src/cocotb/_version.py\ndeleted file mode 100644\n--- a/src/cocotb/_version.py\n+++ /dev/null\n@@ -1,8 +0,0 @@\n-# Package versioning solution originally found here:\n-# http://stackoverflow.com/q/458550\n-\n-# Store the version here so:\n-# 1) we don't load dependencies by storing it in __init__.py\n-# 2) we can import it in setup.py for the same reason\n-# 3) we can import it into your module\n-__version__ = \"2.0.0.dev0\"\n", "issue": "Thoughts on adding git rev to dev version numbers?\nHow would everyone feel about adding `git rev-parse --short HEAD` to `__version__`? Currently it is `2.0.0.dev0` and I'm not sure what the final `0` is for, but I'm guessing it's to manually increment as one feels like? What if instead it were `2.0.0.dev-d379318e`? I'd propose that we build the file on the fly in `setup.py` but only add the git rev when the non-suffixed version (stored as a variable in `setup.py`) includes `dev`.\r\n\r\nI can take a stab at this if people are onboard.\n", "before_files": [{"content": "# Package versioning solution originally found here:\n# http://stackoverflow.com/q/458550\n\n# Store the version here so:\n# 1) we don't load dependencies by storing it in __init__.py\n# 2) we can import it in setup.py for the same reason\n# 3) we can import it into your module\n__version__ = \"2.0.0.dev0\"\n", "path": "src/cocotb/_version.py"}, {"content": "#!/usr/bin/env python\n###############################################################################\n# Copyright (c) 2013 Potential Ventures Ltd\n# Copyright (c) 2013 SolarFlare Communications Inc\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd,\n# SolarFlare Communications Inc nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\nimport sys\n\nif sys.version_info[:2] < (3, 6): # noqa: UP036 | bug in ruff\n msg = [\n \"This version of cocotb requires at least Python 3.6,\",\n \"you are running Python %d.%d.%d.\"\n % (sys.version_info[0], sys.version_info[1], sys.version_info[2]),\n ]\n msg += [\n \"For more information please refer to the documentation at \",\n \"https://cocotb.readthedocs.io.\",\n ]\n\n raise SystemExit(\"\\n\".join(msg))\n\nimport logging\nfrom io import StringIO\nfrom os import path, walk\n\nfrom setuptools import find_packages, setup\n\n# Note: cocotb is not installed properly yet and is missing dependencies and binaries\n# We can still import other files next to setup.py, as long as they're in MANIFEST.in\n# The below line is necessary for PEP517 support\nsys.path.append(path.dirname(__file__))\nfrom cocotb_build_libs import build_ext, get_ext # noqa: E402\n\n\ndef read_file(fname):\n with open(path.join(path.dirname(__file__), fname), encoding=\"utf8\") as f:\n return f.read()\n\n\ndef package_files(directory):\n paths = []\n for fpath, directories, filenames in walk(directory):\n for filename in filenames:\n paths.append(path.join(\"..\", \"..\", fpath, filename))\n return paths\n\n\n# this sets the __version__ variable\nexec(read_file(path.join(\"src\", \"cocotb\", \"_version.py\")))\n\n# store log from build_libs and display at the end in verbose mode\n# see https://github.com/pypa/pip/issues/6634\nlog_stream = StringIO()\nhandler = logging.StreamHandler(log_stream)\nlog = logging.getLogger(\"cocotb._build_libs\")\nlog.setLevel(logging.INFO)\nlog.addHandler(handler)\n\nsetup(\n name=\"cocotb\",\n cmdclass={\"build_ext\": build_ext},\n version=__version__, # noqa: F821\n description=\"cocotb is a coroutine based cosimulation library for writing VHDL and Verilog testbenches in Python.\",\n url=\"https://www.cocotb.org\",\n license=\"BSD\",\n long_description=read_file(\"README.md\"),\n long_description_content_type=\"text/markdown\",\n author=\"Chris Higgs, Stuart Hodgson\",\n maintainer=\"cocotb contributors\",\n maintainer_email=\"[email protected]\",\n install_requires=[\n \"find_libpython\",\n ],\n python_requires=\">=3.6\",\n packages=find_packages(where=\"src\"),\n package_dir={\"\": \"src\"},\n package_data={\n \"cocotb\": (\n package_files(\"src/cocotb/share/makefiles\")\n + package_files(\"src/cocotb/share/include\") # noqa: W504\n + package_files(\"src/cocotb/share/def\") # noqa: W504\n + package_files(\"src/cocotb/share/lib/verilator\") # noqa: W504\n )\n },\n ext_modules=get_ext(),\n entry_points={\n \"console_scripts\": [\n \"cocotb-config=cocotb.config:main\",\n ]\n },\n platforms=\"any\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"License :: OSI Approved :: BSD License\",\n \"Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)\",\n \"Framework :: cocotb\",\n ],\n # these appear in the sidebar on PyPI\n project_urls={\n \"Bug Tracker\": \"https://github.com/cocotb/cocotb/issues\",\n \"Source Code\": \"https://github.com/cocotb/cocotb\",\n \"Documentation\": \"https://docs.cocotb.org\",\n },\n)\n\nprint(log_stream.getvalue())\n", "path": "setup.py"}], "after_files": [{"content": null, "path": "src/cocotb/_version.py"}, {"content": "#!/usr/bin/env python\n###############################################################################\n# Copyright (c) 2013 Potential Ventures Ltd\n# Copyright (c) 2013 SolarFlare Communications Inc\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of Potential Ventures Ltd,\n# SolarFlare Communications Inc nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\n\nimport sys\n\nif sys.version_info[:2] < (3, 6): # noqa: UP036 | bug in ruff\n msg = [\n \"This version of cocotb requires at least Python 3.6,\",\n \"you are running Python %d.%d.%d.\"\n % (sys.version_info[0], sys.version_info[1], sys.version_info[2]),\n ]\n msg += [\n \"For more information please refer to the documentation at \",\n \"https://cocotb.readthedocs.io.\",\n ]\n\n raise SystemExit(\"\\n\".join(msg))\n\nimport logging\nimport subprocess\nfrom io import StringIO\nfrom os import path, walk\n\nfrom setuptools import find_packages, setup\n\n# Note: cocotb is not installed properly yet and is missing dependencies and binaries\n# We can still import other files next to setup.py, as long as they're in MANIFEST.in\n# The below line is necessary for PEP517 support\nsys.path.append(path.dirname(__file__))\nfrom cocotb_build_libs import build_ext, get_ext # noqa: E402\n\n\ndef read_file(fname):\n with open(path.join(path.dirname(__file__), fname), encoding=\"utf8\") as f:\n return f.read()\n\n\ndef package_files(directory):\n paths = []\n for fpath, directories, filenames in walk(directory):\n for filename in filenames:\n paths.append(path.join(\"..\", \"..\", fpath, filename))\n return paths\n\n\nversion_file_path = path.join(\"src\", \"cocotb\", \"_version.py\")\n__version__ = \"2.0.0.dev0\"\nif \"dev\" in __version__:\n try:\n rev = subprocess.check_output(\n [\"git\", \"rev-parse\", \"--short\", \"HEAD\"], universal_newlines=True\n ).strip()\n __version__ += f\"+{rev}\"\n except Exception as e:\n # if this is not a git repository and _version.py already exists,\n # we are probably installing from an sdist, so use the existing _version.py\n if path.exists(version_file_path):\n exec(read_file(version_file_path))\n else:\n print(e, file=sys.stderr)\nwith open(version_file_path, \"w\") as f:\n f.write(\"# Package version\\n\")\n f.write(\"# Generated by setup.py -- do not modify directly\\n\\n\")\n f.write(f'__version__ = \"{__version__}\"')\n\n\n# store log from build_libs and display at the end in verbose mode\n# see https://github.com/pypa/pip/issues/6634\nlog_stream = StringIO()\nhandler = logging.StreamHandler(log_stream)\nlog = logging.getLogger(\"cocotb._build_libs\")\nlog.setLevel(logging.INFO)\nlog.addHandler(handler)\n\nsetup(\n name=\"cocotb\",\n cmdclass={\"build_ext\": build_ext},\n version=__version__,\n description=\"cocotb is a coroutine based cosimulation library for writing VHDL and Verilog testbenches in Python.\",\n url=\"https://www.cocotb.org\",\n license=\"BSD\",\n long_description=read_file(\"README.md\"),\n long_description_content_type=\"text/markdown\",\n author=\"Chris Higgs, Stuart Hodgson\",\n maintainer=\"cocotb contributors\",\n maintainer_email=\"[email protected]\",\n install_requires=[\n \"find_libpython\",\n ],\n python_requires=\">=3.6\",\n packages=find_packages(where=\"src\"),\n package_dir={\"\": \"src\"},\n package_data={\n \"cocotb\": (\n package_files(\"src/cocotb/share/makefiles\")\n + package_files(\"src/cocotb/share/include\") # noqa: W504\n + package_files(\"src/cocotb/share/def\") # noqa: W504\n + package_files(\"src/cocotb/share/lib/verilator\") # noqa: W504\n )\n },\n ext_modules=get_ext(),\n entry_points={\n \"console_scripts\": [\n \"cocotb-config=cocotb.config:main\",\n ]\n },\n platforms=\"any\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n \"License :: OSI Approved :: BSD License\",\n \"Topic :: Scientific/Engineering :: Electronic Design Automation (EDA)\",\n \"Framework :: cocotb\",\n ],\n # these appear in the sidebar on PyPI\n project_urls={\n \"Bug Tracker\": \"https://github.com/cocotb/cocotb/issues\",\n \"Source Code\": \"https://github.com/cocotb/cocotb\",\n \"Documentation\": \"https://docs.cocotb.org\",\n },\n)\n\nprint(log_stream.getvalue())\n", "path": "setup.py"}]}
2,104
616
gh_patches_debug_27239
rasdani/github-patches
git_diff
Kinto__kinto-697
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Crash when querying `GET /buckets/collections` <details> <summary> <code>ValueError: You cannot build children keys from its parent key.Trying to build type "collection" from object key "/buckets/collections"</code></summary> ``` return authz_policy.permits(context, principals, permission) File "/home/mathieu/Code/Mozilla/kinto/kinto/core/authorization.py", line 76, in permits get_bound_permissions=self.get_bound_permissions) File "/home/mathieu/Code/Mozilla/kinto/kinto/core/authorization.py", line 169, in check_permission return self._check_permission(self.permission_object_id, *args, **kw) File "/home/mathieu/Code/Mozilla/kinto/kinto/core/permission/postgresql/__init__.py", line 237, in check_permission perms = get_bound_permissions(object_id, permission) File "/home/mathieu/Code/Mozilla/kinto/kinto/authorization.py", line 144, in get_bound_permissions return build_permissions_set(*args, **kwargs) File "/home/mathieu/Code/Mozilla/kinto/kinto/authorization.py", line 136, in build_permissions_set granters.add(build_permission_tuple(obj, permission, obj_parts)) File "/home/mathieu/Code/Mozilla/kinto/kinto/authorization.py", line 105, in build_permission_tuple obj_type, '/'.join(obj_parts))) ValueError: You cannot build children keys from its parent key.Trying to build type "collection" from object key "/buckets/collections". lang=None uid=7447fd40326a7840a5135b3fa2e1acef4c516ece4635b6ebad0522aaa9331c3a ``` </details> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kinto/authorization.py` Content: ``` 1 from kinto.core import authorization as core_authorization 2 from pyramid.security import IAuthorizationPolicy, Authenticated 3 from zope.interface import implementer 4 5 6 # Vocab really matters when you deal with permissions. Let's do a quick recap 7 # of the terms used here: 8 # 9 # Object URI: 10 # An unique identifier for an object. 11 # for instance, /buckets/blog/collections/articles/records/article1 12 # 13 # Object: 14 # A common denomination of an object (e.g. "collection" or "record") 15 # 16 # Unbound permission: 17 # A permission not bound to an object (e.g. "create") 18 # 19 # Bound permission: 20 # A permission bound to an object (e.g. "collection:create") 21 22 23 # Dictionary which list all permissions a given permission enables. 24 PERMISSIONS_INHERITANCE_TREE = { 25 'bucket:write': { 26 'bucket': ['write'] 27 }, 28 'bucket:read': { 29 'bucket': ['write', 'read'] 30 }, 31 'bucket:group:create': { 32 'bucket': ['write', 'group:create'] 33 }, 34 'bucket:collection:create': { 35 'bucket': ['write', 'collection:create'] 36 }, 37 'group:write': { 38 'bucket': ['write'], 39 'group': ['write'] 40 }, 41 'group:read': { 42 'bucket': ['write', 'read'], 43 'group': ['write', 'read'] 44 }, 45 'collection:write': { 46 'bucket': ['write'], 47 'collection': ['write'], 48 }, 49 'collection:read': { 50 'bucket': ['write', 'read'], 51 'collection': ['write', 'read'], 52 }, 53 'collection:record:create': { 54 'bucket': ['write'], 55 'collection': ['write', 'record:create'] 56 }, 57 'record:write': { 58 'bucket': ['write'], 59 'collection': ['write'], 60 'record': ['write'] 61 }, 62 'record:read': { 63 'bucket': ['write', 'read'], 64 'collection': ['write', 'read'], 65 'record': ['write', 'read'] 66 } 67 } 68 69 70 def get_object_type(object_uri): 71 """Return the type of an object from its id.""" 72 73 obj_parts = object_uri.split('/') 74 if len(obj_parts) % 2 == 0: 75 object_uri = '/'.join(obj_parts[:-1]) 76 77 # Order matters here. More precise is tested first. 78 if 'records' in object_uri: 79 obj_type = 'record' 80 elif 'collections' in object_uri: 81 obj_type = 'collection' 82 elif 'groups' in object_uri: 83 obj_type = 'group' 84 elif 'buckets' in object_uri: 85 obj_type = 'bucket' 86 else: 87 obj_type = None 88 return obj_type 89 90 91 def build_permission_tuple(obj_type, unbound_permission, obj_parts): 92 """Returns a tuple of (object_uri, unbound_permission)""" 93 PARTS_LENGTH = { 94 'bucket': 3, 95 'collection': 5, 96 'group': 5, 97 'record': 7 98 } 99 if obj_type not in PARTS_LENGTH: 100 raise ValueError('Invalid object type: %s' % obj_type) 101 102 if PARTS_LENGTH[obj_type] > len(obj_parts): 103 raise ValueError('You cannot build children keys from its parent key.' 104 'Trying to build type "%s" from object key "%s".' % ( 105 obj_type, '/'.join(obj_parts))) 106 length = PARTS_LENGTH[obj_type] 107 return ('/'.join(obj_parts[:length]), unbound_permission) 108 109 110 def build_permissions_set(object_uri, unbound_permission, 111 inheritance_tree=None): 112 """Build a set of all permissions that can grant access to the given 113 object URI and unbound permission. 114 115 >>> build_required_permissions('/buckets/blog', 'write') 116 set(('/buckets/blog', 'write')) 117 118 """ 119 120 if inheritance_tree is None: 121 inheritance_tree = PERMISSIONS_INHERITANCE_TREE 122 123 obj_type = get_object_type(object_uri) 124 125 # Unknown object type, does not map the INHERITANCE_TREE. 126 # In that case, the set of related permissions is empty. 127 if obj_type is None: 128 return set() 129 130 bound_permission = '%s:%s' % (obj_type, unbound_permission) 131 granters = set() 132 133 obj_parts = object_uri.split('/') 134 for obj, permission_list in inheritance_tree[bound_permission].items(): 135 for permission in permission_list: 136 granters.add(build_permission_tuple(obj, permission, obj_parts)) 137 138 return granters 139 140 141 @implementer(IAuthorizationPolicy) 142 class AuthorizationPolicy(core_authorization.AuthorizationPolicy): 143 def get_bound_permissions(self, *args, **kwargs): 144 return build_permissions_set(*args, **kwargs) 145 146 147 class RouteFactory(core_authorization.RouteFactory): 148 pass 149 150 151 class BucketRouteFactory(RouteFactory): 152 def fetch_shared_records(self, perm, principals, get_bound_permissions): 153 """Buckets list is authorized even if no object is accessible for 154 the current principals. 155 """ 156 shared = super(BucketRouteFactory, self).fetch_shared_records( 157 perm, principals, get_bound_permissions) 158 if shared is None and Authenticated in principals: 159 self.shared_ids = [] 160 return self.shared_ids 161 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kinto/authorization.py b/kinto/authorization.py --- a/kinto/authorization.py +++ b/kinto/authorization.py @@ -1,7 +1,9 @@ -from kinto.core import authorization as core_authorization +import re + from pyramid.security import IAuthorizationPolicy, Authenticated from zope.interface import implementer +from kinto.core import authorization as core_authorization # Vocab really matters when you deal with permissions. Let's do a quick recap # of the terms used here: @@ -69,23 +71,15 @@ def get_object_type(object_uri): """Return the type of an object from its id.""" - - obj_parts = object_uri.split('/') - if len(obj_parts) % 2 == 0: - object_uri = '/'.join(obj_parts[:-1]) - - # Order matters here. More precise is tested first. - if 'records' in object_uri: - obj_type = 'record' - elif 'collections' in object_uri: - obj_type = 'collection' - elif 'groups' in object_uri: - obj_type = 'group' - elif 'buckets' in object_uri: - obj_type = 'bucket' - else: - obj_type = None - return obj_type + if re.match(r'/buckets/(.+)/collections/(.+)/records/(.+)?', object_uri): + return 'record' + if re.match(r'/buckets/(.+)/collections/(.+)?', object_uri): + return 'collection' + if re.match(r'/buckets/(.+)/groups/(.+)?', object_uri): + return 'group' + if re.match(r'/buckets/(.+)?', object_uri): + return 'bucket' + return None def build_permission_tuple(obj_type, unbound_permission, obj_parts):
{"golden_diff": "diff --git a/kinto/authorization.py b/kinto/authorization.py\n--- a/kinto/authorization.py\n+++ b/kinto/authorization.py\n@@ -1,7 +1,9 @@\n-from kinto.core import authorization as core_authorization\n+import re\n+\n from pyramid.security import IAuthorizationPolicy, Authenticated\n from zope.interface import implementer\n \n+from kinto.core import authorization as core_authorization\n \n # Vocab really matters when you deal with permissions. Let's do a quick recap\n # of the terms used here:\n@@ -69,23 +71,15 @@\n \n def get_object_type(object_uri):\n \"\"\"Return the type of an object from its id.\"\"\"\n-\n- obj_parts = object_uri.split('/')\n- if len(obj_parts) % 2 == 0:\n- object_uri = '/'.join(obj_parts[:-1])\n-\n- # Order matters here. More precise is tested first.\n- if 'records' in object_uri:\n- obj_type = 'record'\n- elif 'collections' in object_uri:\n- obj_type = 'collection'\n- elif 'groups' in object_uri:\n- obj_type = 'group'\n- elif 'buckets' in object_uri:\n- obj_type = 'bucket'\n- else:\n- obj_type = None\n- return obj_type\n+ if re.match(r'/buckets/(.+)/collections/(.+)/records/(.+)?', object_uri):\n+ return 'record'\n+ if re.match(r'/buckets/(.+)/collections/(.+)?', object_uri):\n+ return 'collection'\n+ if re.match(r'/buckets/(.+)/groups/(.+)?', object_uri):\n+ return 'group'\n+ if re.match(r'/buckets/(.+)?', object_uri):\n+ return 'bucket'\n+ return None\n \n \n def build_permission_tuple(obj_type, unbound_permission, obj_parts):\n", "issue": "Crash when querying `GET /buckets/collections`\n<details>\n <summary>\n\n<code>ValueError: You cannot build children keys from its parent key.Trying to build type \"collection\" from object key \"/buckets/collections\"</code></summary>\n\n\n\n```\n return authz_policy.permits(context, principals, permission)\n File \"/home/mathieu/Code/Mozilla/kinto/kinto/core/authorization.py\", line 76, in permits\n get_bound_permissions=self.get_bound_permissions)\n File \"/home/mathieu/Code/Mozilla/kinto/kinto/core/authorization.py\", line 169, in check_permission\n return self._check_permission(self.permission_object_id, *args, **kw)\n File \"/home/mathieu/Code/Mozilla/kinto/kinto/core/permission/postgresql/__init__.py\", line 237, in check_permission\n perms = get_bound_permissions(object_id, permission)\n File \"/home/mathieu/Code/Mozilla/kinto/kinto/authorization.py\", line 144, in get_bound_permissions\n return build_permissions_set(*args, **kwargs)\n File \"/home/mathieu/Code/Mozilla/kinto/kinto/authorization.py\", line 136, in build_permissions_set\n granters.add(build_permission_tuple(obj, permission, obj_parts))\n File \"/home/mathieu/Code/Mozilla/kinto/kinto/authorization.py\", line 105, in build_permission_tuple\n obj_type, '/'.join(obj_parts)))\nValueError: You cannot build children keys from its parent key.Trying to build type \"collection\" from object key \"/buckets/collections\". lang=None uid=7447fd40326a7840a5135b3fa2e1acef4c516ece4635b6ebad0522aaa9331c3a\n```\n\n</details>\n\n", "before_files": [{"content": "from kinto.core import authorization as core_authorization\nfrom pyramid.security import IAuthorizationPolicy, Authenticated\nfrom zope.interface import implementer\n\n\n# Vocab really matters when you deal with permissions. Let's do a quick recap\n# of the terms used here:\n#\n# Object URI:\n# An unique identifier for an object.\n# for instance, /buckets/blog/collections/articles/records/article1\n#\n# Object:\n# A common denomination of an object (e.g. \"collection\" or \"record\")\n#\n# Unbound permission:\n# A permission not bound to an object (e.g. \"create\")\n#\n# Bound permission:\n# A permission bound to an object (e.g. \"collection:create\")\n\n\n# Dictionary which list all permissions a given permission enables.\nPERMISSIONS_INHERITANCE_TREE = {\n 'bucket:write': {\n 'bucket': ['write']\n },\n 'bucket:read': {\n 'bucket': ['write', 'read']\n },\n 'bucket:group:create': {\n 'bucket': ['write', 'group:create']\n },\n 'bucket:collection:create': {\n 'bucket': ['write', 'collection:create']\n },\n 'group:write': {\n 'bucket': ['write'],\n 'group': ['write']\n },\n 'group:read': {\n 'bucket': ['write', 'read'],\n 'group': ['write', 'read']\n },\n 'collection:write': {\n 'bucket': ['write'],\n 'collection': ['write'],\n },\n 'collection:read': {\n 'bucket': ['write', 'read'],\n 'collection': ['write', 'read'],\n },\n 'collection:record:create': {\n 'bucket': ['write'],\n 'collection': ['write', 'record:create']\n },\n 'record:write': {\n 'bucket': ['write'],\n 'collection': ['write'],\n 'record': ['write']\n },\n 'record:read': {\n 'bucket': ['write', 'read'],\n 'collection': ['write', 'read'],\n 'record': ['write', 'read']\n }\n}\n\n\ndef get_object_type(object_uri):\n \"\"\"Return the type of an object from its id.\"\"\"\n\n obj_parts = object_uri.split('/')\n if len(obj_parts) % 2 == 0:\n object_uri = '/'.join(obj_parts[:-1])\n\n # Order matters here. More precise is tested first.\n if 'records' in object_uri:\n obj_type = 'record'\n elif 'collections' in object_uri:\n obj_type = 'collection'\n elif 'groups' in object_uri:\n obj_type = 'group'\n elif 'buckets' in object_uri:\n obj_type = 'bucket'\n else:\n obj_type = None\n return obj_type\n\n\ndef build_permission_tuple(obj_type, unbound_permission, obj_parts):\n \"\"\"Returns a tuple of (object_uri, unbound_permission)\"\"\"\n PARTS_LENGTH = {\n 'bucket': 3,\n 'collection': 5,\n 'group': 5,\n 'record': 7\n }\n if obj_type not in PARTS_LENGTH:\n raise ValueError('Invalid object type: %s' % obj_type)\n\n if PARTS_LENGTH[obj_type] > len(obj_parts):\n raise ValueError('You cannot build children keys from its parent key.'\n 'Trying to build type \"%s\" from object key \"%s\".' % (\n obj_type, '/'.join(obj_parts)))\n length = PARTS_LENGTH[obj_type]\n return ('/'.join(obj_parts[:length]), unbound_permission)\n\n\ndef build_permissions_set(object_uri, unbound_permission,\n inheritance_tree=None):\n \"\"\"Build a set of all permissions that can grant access to the given\n object URI and unbound permission.\n\n >>> build_required_permissions('/buckets/blog', 'write')\n set(('/buckets/blog', 'write'))\n\n \"\"\"\n\n if inheritance_tree is None:\n inheritance_tree = PERMISSIONS_INHERITANCE_TREE\n\n obj_type = get_object_type(object_uri)\n\n # Unknown object type, does not map the INHERITANCE_TREE.\n # In that case, the set of related permissions is empty.\n if obj_type is None:\n return set()\n\n bound_permission = '%s:%s' % (obj_type, unbound_permission)\n granters = set()\n\n obj_parts = object_uri.split('/')\n for obj, permission_list in inheritance_tree[bound_permission].items():\n for permission in permission_list:\n granters.add(build_permission_tuple(obj, permission, obj_parts))\n\n return granters\n\n\n@implementer(IAuthorizationPolicy)\nclass AuthorizationPolicy(core_authorization.AuthorizationPolicy):\n def get_bound_permissions(self, *args, **kwargs):\n return build_permissions_set(*args, **kwargs)\n\n\nclass RouteFactory(core_authorization.RouteFactory):\n pass\n\n\nclass BucketRouteFactory(RouteFactory):\n def fetch_shared_records(self, perm, principals, get_bound_permissions):\n \"\"\"Buckets list is authorized even if no object is accessible for\n the current principals.\n \"\"\"\n shared = super(BucketRouteFactory, self).fetch_shared_records(\n perm, principals, get_bound_permissions)\n if shared is None and Authenticated in principals:\n self.shared_ids = []\n return self.shared_ids\n", "path": "kinto/authorization.py"}], "after_files": [{"content": "import re\n\nfrom pyramid.security import IAuthorizationPolicy, Authenticated\nfrom zope.interface import implementer\n\nfrom kinto.core import authorization as core_authorization\n\n# Vocab really matters when you deal with permissions. Let's do a quick recap\n# of the terms used here:\n#\n# Object URI:\n# An unique identifier for an object.\n# for instance, /buckets/blog/collections/articles/records/article1\n#\n# Object:\n# A common denomination of an object (e.g. \"collection\" or \"record\")\n#\n# Unbound permission:\n# A permission not bound to an object (e.g. \"create\")\n#\n# Bound permission:\n# A permission bound to an object (e.g. \"collection:create\")\n\n\n# Dictionary which list all permissions a given permission enables.\nPERMISSIONS_INHERITANCE_TREE = {\n 'bucket:write': {\n 'bucket': ['write']\n },\n 'bucket:read': {\n 'bucket': ['write', 'read']\n },\n 'bucket:group:create': {\n 'bucket': ['write', 'group:create']\n },\n 'bucket:collection:create': {\n 'bucket': ['write', 'collection:create']\n },\n 'group:write': {\n 'bucket': ['write'],\n 'group': ['write']\n },\n 'group:read': {\n 'bucket': ['write', 'read'],\n 'group': ['write', 'read']\n },\n 'collection:write': {\n 'bucket': ['write'],\n 'collection': ['write'],\n },\n 'collection:read': {\n 'bucket': ['write', 'read'],\n 'collection': ['write', 'read'],\n },\n 'collection:record:create': {\n 'bucket': ['write'],\n 'collection': ['write', 'record:create']\n },\n 'record:write': {\n 'bucket': ['write'],\n 'collection': ['write'],\n 'record': ['write']\n },\n 'record:read': {\n 'bucket': ['write', 'read'],\n 'collection': ['write', 'read'],\n 'record': ['write', 'read']\n }\n}\n\n\ndef get_object_type(object_uri):\n \"\"\"Return the type of an object from its id.\"\"\"\n if re.match(r'/buckets/(.+)/collections/(.+)/records/(.+)?', object_uri):\n return 'record'\n if re.match(r'/buckets/(.+)/collections/(.+)?', object_uri):\n return 'collection'\n if re.match(r'/buckets/(.+)/groups/(.+)?', object_uri):\n return 'group'\n if re.match(r'/buckets/(.+)?', object_uri):\n return 'bucket'\n return None\n\n\ndef build_permission_tuple(obj_type, unbound_permission, obj_parts):\n \"\"\"Returns a tuple of (object_uri, unbound_permission)\"\"\"\n PARTS_LENGTH = {\n 'bucket': 3,\n 'collection': 5,\n 'group': 5,\n 'record': 7\n }\n if obj_type not in PARTS_LENGTH:\n raise ValueError('Invalid object type: %s' % obj_type)\n\n if PARTS_LENGTH[obj_type] > len(obj_parts):\n raise ValueError('You cannot build children keys from its parent key.'\n 'Trying to build type \"%s\" from object key \"%s\".' % (\n obj_type, '/'.join(obj_parts)))\n length = PARTS_LENGTH[obj_type]\n return ('/'.join(obj_parts[:length]), unbound_permission)\n\n\ndef build_permissions_set(object_uri, unbound_permission,\n inheritance_tree=None):\n \"\"\"Build a set of all permissions that can grant access to the given\n object URI and unbound permission.\n\n >>> build_required_permissions('/buckets/blog', 'write')\n set(('/buckets/blog', 'write'))\n\n \"\"\"\n\n if inheritance_tree is None:\n inheritance_tree = PERMISSIONS_INHERITANCE_TREE\n\n obj_type = get_object_type(object_uri)\n\n # Unknown object type, does not map the INHERITANCE_TREE.\n # In that case, the set of related permissions is empty.\n if obj_type is None:\n return set()\n\n bound_permission = '%s:%s' % (obj_type, unbound_permission)\n granters = set()\n\n obj_parts = object_uri.split('/')\n for obj, permission_list in inheritance_tree[bound_permission].items():\n for permission in permission_list:\n granters.add(build_permission_tuple(obj, permission, obj_parts))\n\n return granters\n\n\n@implementer(IAuthorizationPolicy)\nclass AuthorizationPolicy(core_authorization.AuthorizationPolicy):\n def get_bound_permissions(self, *args, **kwargs):\n return build_permissions_set(*args, **kwargs)\n\n\nclass RouteFactory(core_authorization.RouteFactory):\n pass\n\n\nclass BucketRouteFactory(RouteFactory):\n def fetch_shared_records(self, perm, principals, get_bound_permissions):\n \"\"\"Buckets list is authorized even if no object is accessible for\n the current principals.\n \"\"\"\n shared = super(BucketRouteFactory, self).fetch_shared_records(\n perm, principals, get_bound_permissions)\n if shared is None and Authenticated in principals:\n self.shared_ids = []\n return self.shared_ids\n", "path": "kinto/authorization.py"}]}
2,182
408
gh_patches_debug_5763
rasdani/github-patches
git_diff
huggingface__diffusers-6410
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ValueError when `to('cuda')` on Value-guided planning pipeline ### Describe the bug When I run the Value-guided planning pipeline colab <https://colab.research.google.com/drive/1rXm8CX4ZdN5qivjJ2lhwhkOmt_m0CvU0> with `to('cuda')` to speed up the diffusion process. It gave me a ValueError. ### Reproduction <https://colab.research.google.com/drive/1SFl7daLQxd8QyHJP6ndSPznSFX2rppQ2> Code block 24 `pipeline.to(DEVICE)` ### Logs ```shell ValueError: ValueGuidedRLPipeline { "_class_name": "ValueGuidedRLPipeline", "_diffusers_version": "0.25.0.dev0", "_name_or_path": "bglick13/hopper-medium-v2-value-function-hor32" } has been incorrectly initialized or <class 'diffusers.experimental.rl.value_guided_sampling.ValueGuidedRLPipeline'> is incorrectly implemented. Expected {'env', 'unet', 'value_function', 'scheduler'} to be defined, but dict_keys([]) are defined. ``` ### System Info colab ### Who can help? @yiyixuxu ValueError when `to('cuda')` on Value-guided planning pipeline ### Describe the bug When I run the Value-guided planning pipeline colab <https://colab.research.google.com/drive/1rXm8CX4ZdN5qivjJ2lhwhkOmt_m0CvU0> with `to('cuda')` to speed up the diffusion process. It gave me a ValueError. ### Reproduction <https://colab.research.google.com/drive/1SFl7daLQxd8QyHJP6ndSPznSFX2rppQ2> Code block 24 `pipeline.to(DEVICE)` ### Logs ```shell ValueError: ValueGuidedRLPipeline { "_class_name": "ValueGuidedRLPipeline", "_diffusers_version": "0.25.0.dev0", "_name_or_path": "bglick13/hopper-medium-v2-value-function-hor32" } has been incorrectly initialized or <class 'diffusers.experimental.rl.value_guided_sampling.ValueGuidedRLPipeline'> is incorrectly implemented. Expected {'env', 'unet', 'value_function', 'scheduler'} to be defined, but dict_keys([]) are defined. ``` ### System Info colab ### Who can help? @yiyixuxu --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/diffusers/experimental/rl/value_guided_sampling.py` Content: ``` 1 # Copyright 2023 The HuggingFace Team. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import numpy as np 16 import torch 17 import tqdm 18 19 from ...models.unet_1d import UNet1DModel 20 from ...pipelines import DiffusionPipeline 21 from ...utils.dummy_pt_objects import DDPMScheduler 22 from ...utils.torch_utils import randn_tensor 23 24 25 class ValueGuidedRLPipeline(DiffusionPipeline): 26 r""" 27 Pipeline for value-guided sampling from a diffusion model trained to predict sequences of states. 28 29 This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods 30 implemented for all pipelines (downloading, saving, running on a particular device, etc.). 31 32 Parameters: 33 value_function ([`UNet1DModel`]): 34 A specialized UNet for fine-tuning trajectories base on reward. 35 unet ([`UNet1DModel`]): 36 UNet architecture to denoise the encoded trajectories. 37 scheduler ([`SchedulerMixin`]): 38 A scheduler to be used in combination with `unet` to denoise the encoded trajectories. Default for this 39 application is [`DDPMScheduler`]. 40 env (): 41 An environment following the OpenAI gym API to act in. For now only Hopper has pretrained models. 42 """ 43 44 def __init__( 45 self, 46 value_function: UNet1DModel, 47 unet: UNet1DModel, 48 scheduler: DDPMScheduler, 49 env, 50 ): 51 super().__init__() 52 self.value_function = value_function 53 self.unet = unet 54 self.scheduler = scheduler 55 self.env = env 56 self.data = env.get_dataset() 57 self.means = {} 58 for key in self.data.keys(): 59 try: 60 self.means[key] = self.data[key].mean() 61 except: # noqa: E722 62 pass 63 self.stds = {} 64 for key in self.data.keys(): 65 try: 66 self.stds[key] = self.data[key].std() 67 except: # noqa: E722 68 pass 69 self.state_dim = env.observation_space.shape[0] 70 self.action_dim = env.action_space.shape[0] 71 72 def normalize(self, x_in, key): 73 return (x_in - self.means[key]) / self.stds[key] 74 75 def de_normalize(self, x_in, key): 76 return x_in * self.stds[key] + self.means[key] 77 78 def to_torch(self, x_in): 79 if isinstance(x_in, dict): 80 return {k: self.to_torch(v) for k, v in x_in.items()} 81 elif torch.is_tensor(x_in): 82 return x_in.to(self.unet.device) 83 return torch.tensor(x_in, device=self.unet.device) 84 85 def reset_x0(self, x_in, cond, act_dim): 86 for key, val in cond.items(): 87 x_in[:, key, act_dim:] = val.clone() 88 return x_in 89 90 def run_diffusion(self, x, conditions, n_guide_steps, scale): 91 batch_size = x.shape[0] 92 y = None 93 for i in tqdm.tqdm(self.scheduler.timesteps): 94 # create batch of timesteps to pass into model 95 timesteps = torch.full((batch_size,), i, device=self.unet.device, dtype=torch.long) 96 for _ in range(n_guide_steps): 97 with torch.enable_grad(): 98 x.requires_grad_() 99 100 # permute to match dimension for pre-trained models 101 y = self.value_function(x.permute(0, 2, 1), timesteps).sample 102 grad = torch.autograd.grad([y.sum()], [x])[0] 103 104 posterior_variance = self.scheduler._get_variance(i) 105 model_std = torch.exp(0.5 * posterior_variance) 106 grad = model_std * grad 107 108 grad[timesteps < 2] = 0 109 x = x.detach() 110 x = x + scale * grad 111 x = self.reset_x0(x, conditions, self.action_dim) 112 113 prev_x = self.unet(x.permute(0, 2, 1), timesteps).sample.permute(0, 2, 1) 114 115 # TODO: verify deprecation of this kwarg 116 x = self.scheduler.step(prev_x, i, x)["prev_sample"] 117 118 # apply conditions to the trajectory (set the initial state) 119 x = self.reset_x0(x, conditions, self.action_dim) 120 x = self.to_torch(x) 121 return x, y 122 123 def __call__(self, obs, batch_size=64, planning_horizon=32, n_guide_steps=2, scale=0.1): 124 # normalize the observations and create batch dimension 125 obs = self.normalize(obs, "observations") 126 obs = obs[None].repeat(batch_size, axis=0) 127 128 conditions = {0: self.to_torch(obs)} 129 shape = (batch_size, planning_horizon, self.state_dim + self.action_dim) 130 131 # generate initial noise and apply our conditions (to make the trajectories start at current state) 132 x1 = randn_tensor(shape, device=self.unet.device) 133 x = self.reset_x0(x1, conditions, self.action_dim) 134 x = self.to_torch(x) 135 136 # run the diffusion process 137 x, y = self.run_diffusion(x, conditions, n_guide_steps, scale) 138 139 # sort output trajectories by value 140 sorted_idx = y.argsort(0, descending=True).squeeze() 141 sorted_values = x[sorted_idx] 142 actions = sorted_values[:, :, : self.action_dim] 143 actions = actions.detach().cpu().numpy() 144 denorm_actions = self.de_normalize(actions, key="actions") 145 146 # select the action with the highest value 147 if y is not None: 148 selected_index = 0 149 else: 150 # if we didn't run value guiding, select a random action 151 selected_index = np.random.randint(0, batch_size) 152 153 denorm_actions = denorm_actions[selected_index, 0] 154 return denorm_actions 155 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/diffusers/experimental/rl/value_guided_sampling.py b/src/diffusers/experimental/rl/value_guided_sampling.py --- a/src/diffusers/experimental/rl/value_guided_sampling.py +++ b/src/diffusers/experimental/rl/value_guided_sampling.py @@ -49,10 +49,9 @@ env, ): super().__init__() - self.value_function = value_function - self.unet = unet - self.scheduler = scheduler - self.env = env + + self.register_modules(value_function=value_function, unet=unet, scheduler=scheduler, env=env) + self.data = env.get_dataset() self.means = {} for key in self.data.keys():
{"golden_diff": "diff --git a/src/diffusers/experimental/rl/value_guided_sampling.py b/src/diffusers/experimental/rl/value_guided_sampling.py\n--- a/src/diffusers/experimental/rl/value_guided_sampling.py\n+++ b/src/diffusers/experimental/rl/value_guided_sampling.py\n@@ -49,10 +49,9 @@\n env,\n ):\n super().__init__()\n- self.value_function = value_function\n- self.unet = unet\n- self.scheduler = scheduler\n- self.env = env\n+\n+ self.register_modules(value_function=value_function, unet=unet, scheduler=scheduler, env=env)\n+\n self.data = env.get_dataset()\n self.means = {}\n for key in self.data.keys():\n", "issue": "ValueError when `to('cuda')` on Value-guided planning pipeline\n### Describe the bug\r\n\r\nWhen I run the Value-guided planning pipeline colab <https://colab.research.google.com/drive/1rXm8CX4ZdN5qivjJ2lhwhkOmt_m0CvU0> with `to('cuda')` to speed up the diffusion process. It gave me a ValueError.\r\n\r\n### Reproduction\r\n\r\n<https://colab.research.google.com/drive/1SFl7daLQxd8QyHJP6ndSPznSFX2rppQ2> Code block 24 `pipeline.to(DEVICE)`\r\n\r\n### Logs\r\n\r\n```shell\r\nValueError: ValueGuidedRLPipeline {\r\n \"_class_name\": \"ValueGuidedRLPipeline\",\r\n \"_diffusers_version\": \"0.25.0.dev0\",\r\n \"_name_or_path\": \"bglick13/hopper-medium-v2-value-function-hor32\"\r\n}\r\n has been incorrectly initialized or <class 'diffusers.experimental.rl.value_guided_sampling.ValueGuidedRLPipeline'> is incorrectly implemented. Expected {'env', 'unet', 'value_function', 'scheduler'} to be defined, but dict_keys([]) are defined.\r\n```\r\n\r\n### System Info\r\n\r\ncolab\r\n\r\n### Who can help?\r\n\r\n@yiyixuxu\nValueError when `to('cuda')` on Value-guided planning pipeline\n### Describe the bug\r\n\r\nWhen I run the Value-guided planning pipeline colab <https://colab.research.google.com/drive/1rXm8CX4ZdN5qivjJ2lhwhkOmt_m0CvU0> with `to('cuda')` to speed up the diffusion process. It gave me a ValueError.\r\n\r\n### Reproduction\r\n\r\n<https://colab.research.google.com/drive/1SFl7daLQxd8QyHJP6ndSPznSFX2rppQ2> Code block 24 `pipeline.to(DEVICE)`\r\n\r\n### Logs\r\n\r\n```shell\r\nValueError: ValueGuidedRLPipeline {\r\n \"_class_name\": \"ValueGuidedRLPipeline\",\r\n \"_diffusers_version\": \"0.25.0.dev0\",\r\n \"_name_or_path\": \"bglick13/hopper-medium-v2-value-function-hor32\"\r\n}\r\n has been incorrectly initialized or <class 'diffusers.experimental.rl.value_guided_sampling.ValueGuidedRLPipeline'> is incorrectly implemented. Expected {'env', 'unet', 'value_function', 'scheduler'} to be defined, but dict_keys([]) are defined.\r\n```\r\n\r\n### System Info\r\n\r\ncolab\r\n\r\n### Who can help?\r\n\r\n@yiyixuxu\n", "before_files": [{"content": "# Copyright 2023 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport torch\nimport tqdm\n\nfrom ...models.unet_1d import UNet1DModel\nfrom ...pipelines import DiffusionPipeline\nfrom ...utils.dummy_pt_objects import DDPMScheduler\nfrom ...utils.torch_utils import randn_tensor\n\n\nclass ValueGuidedRLPipeline(DiffusionPipeline):\n r\"\"\"\n Pipeline for value-guided sampling from a diffusion model trained to predict sequences of states.\n\n This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods\n implemented for all pipelines (downloading, saving, running on a particular device, etc.).\n\n Parameters:\n value_function ([`UNet1DModel`]):\n A specialized UNet for fine-tuning trajectories base on reward.\n unet ([`UNet1DModel`]):\n UNet architecture to denoise the encoded trajectories.\n scheduler ([`SchedulerMixin`]):\n A scheduler to be used in combination with `unet` to denoise the encoded trajectories. Default for this\n application is [`DDPMScheduler`].\n env ():\n An environment following the OpenAI gym API to act in. For now only Hopper has pretrained models.\n \"\"\"\n\n def __init__(\n self,\n value_function: UNet1DModel,\n unet: UNet1DModel,\n scheduler: DDPMScheduler,\n env,\n ):\n super().__init__()\n self.value_function = value_function\n self.unet = unet\n self.scheduler = scheduler\n self.env = env\n self.data = env.get_dataset()\n self.means = {}\n for key in self.data.keys():\n try:\n self.means[key] = self.data[key].mean()\n except: # noqa: E722\n pass\n self.stds = {}\n for key in self.data.keys():\n try:\n self.stds[key] = self.data[key].std()\n except: # noqa: E722\n pass\n self.state_dim = env.observation_space.shape[0]\n self.action_dim = env.action_space.shape[0]\n\n def normalize(self, x_in, key):\n return (x_in - self.means[key]) / self.stds[key]\n\n def de_normalize(self, x_in, key):\n return x_in * self.stds[key] + self.means[key]\n\n def to_torch(self, x_in):\n if isinstance(x_in, dict):\n return {k: self.to_torch(v) for k, v in x_in.items()}\n elif torch.is_tensor(x_in):\n return x_in.to(self.unet.device)\n return torch.tensor(x_in, device=self.unet.device)\n\n def reset_x0(self, x_in, cond, act_dim):\n for key, val in cond.items():\n x_in[:, key, act_dim:] = val.clone()\n return x_in\n\n def run_diffusion(self, x, conditions, n_guide_steps, scale):\n batch_size = x.shape[0]\n y = None\n for i in tqdm.tqdm(self.scheduler.timesteps):\n # create batch of timesteps to pass into model\n timesteps = torch.full((batch_size,), i, device=self.unet.device, dtype=torch.long)\n for _ in range(n_guide_steps):\n with torch.enable_grad():\n x.requires_grad_()\n\n # permute to match dimension for pre-trained models\n y = self.value_function(x.permute(0, 2, 1), timesteps).sample\n grad = torch.autograd.grad([y.sum()], [x])[0]\n\n posterior_variance = self.scheduler._get_variance(i)\n model_std = torch.exp(0.5 * posterior_variance)\n grad = model_std * grad\n\n grad[timesteps < 2] = 0\n x = x.detach()\n x = x + scale * grad\n x = self.reset_x0(x, conditions, self.action_dim)\n\n prev_x = self.unet(x.permute(0, 2, 1), timesteps).sample.permute(0, 2, 1)\n\n # TODO: verify deprecation of this kwarg\n x = self.scheduler.step(prev_x, i, x)[\"prev_sample\"]\n\n # apply conditions to the trajectory (set the initial state)\n x = self.reset_x0(x, conditions, self.action_dim)\n x = self.to_torch(x)\n return x, y\n\n def __call__(self, obs, batch_size=64, planning_horizon=32, n_guide_steps=2, scale=0.1):\n # normalize the observations and create batch dimension\n obs = self.normalize(obs, \"observations\")\n obs = obs[None].repeat(batch_size, axis=0)\n\n conditions = {0: self.to_torch(obs)}\n shape = (batch_size, planning_horizon, self.state_dim + self.action_dim)\n\n # generate initial noise and apply our conditions (to make the trajectories start at current state)\n x1 = randn_tensor(shape, device=self.unet.device)\n x = self.reset_x0(x1, conditions, self.action_dim)\n x = self.to_torch(x)\n\n # run the diffusion process\n x, y = self.run_diffusion(x, conditions, n_guide_steps, scale)\n\n # sort output trajectories by value\n sorted_idx = y.argsort(0, descending=True).squeeze()\n sorted_values = x[sorted_idx]\n actions = sorted_values[:, :, : self.action_dim]\n actions = actions.detach().cpu().numpy()\n denorm_actions = self.de_normalize(actions, key=\"actions\")\n\n # select the action with the highest value\n if y is not None:\n selected_index = 0\n else:\n # if we didn't run value guiding, select a random action\n selected_index = np.random.randint(0, batch_size)\n\n denorm_actions = denorm_actions[selected_index, 0]\n return denorm_actions\n", "path": "src/diffusers/experimental/rl/value_guided_sampling.py"}], "after_files": [{"content": "# Copyright 2023 The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport torch\nimport tqdm\n\nfrom ...models.unet_1d import UNet1DModel\nfrom ...pipelines import DiffusionPipeline\nfrom ...utils.dummy_pt_objects import DDPMScheduler\nfrom ...utils.torch_utils import randn_tensor\n\n\nclass ValueGuidedRLPipeline(DiffusionPipeline):\n r\"\"\"\n Pipeline for value-guided sampling from a diffusion model trained to predict sequences of states.\n\n This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods\n implemented for all pipelines (downloading, saving, running on a particular device, etc.).\n\n Parameters:\n value_function ([`UNet1DModel`]):\n A specialized UNet for fine-tuning trajectories base on reward.\n unet ([`UNet1DModel`]):\n UNet architecture to denoise the encoded trajectories.\n scheduler ([`SchedulerMixin`]):\n A scheduler to be used in combination with `unet` to denoise the encoded trajectories. Default for this\n application is [`DDPMScheduler`].\n env ():\n An environment following the OpenAI gym API to act in. For now only Hopper has pretrained models.\n \"\"\"\n\n def __init__(\n self,\n value_function: UNet1DModel,\n unet: UNet1DModel,\n scheduler: DDPMScheduler,\n env,\n ):\n super().__init__()\n\n self.register_modules(value_function=value_function, unet=unet, scheduler=scheduler, env=env)\n\n self.data = env.get_dataset()\n self.means = {}\n for key in self.data.keys():\n try:\n self.means[key] = self.data[key].mean()\n except: # noqa: E722\n pass\n self.stds = {}\n for key in self.data.keys():\n try:\n self.stds[key] = self.data[key].std()\n except: # noqa: E722\n pass\n self.state_dim = env.observation_space.shape[0]\n self.action_dim = env.action_space.shape[0]\n\n def normalize(self, x_in, key):\n return (x_in - self.means[key]) / self.stds[key]\n\n def de_normalize(self, x_in, key):\n return x_in * self.stds[key] + self.means[key]\n\n def to_torch(self, x_in):\n if isinstance(x_in, dict):\n return {k: self.to_torch(v) for k, v in x_in.items()}\n elif torch.is_tensor(x_in):\n return x_in.to(self.unet.device)\n return torch.tensor(x_in, device=self.unet.device)\n\n def reset_x0(self, x_in, cond, act_dim):\n for key, val in cond.items():\n x_in[:, key, act_dim:] = val.clone()\n return x_in\n\n def run_diffusion(self, x, conditions, n_guide_steps, scale):\n batch_size = x.shape[0]\n y = None\n for i in tqdm.tqdm(self.scheduler.timesteps):\n # create batch of timesteps to pass into model\n timesteps = torch.full((batch_size,), i, device=self.unet.device, dtype=torch.long)\n for _ in range(n_guide_steps):\n with torch.enable_grad():\n x.requires_grad_()\n\n # permute to match dimension for pre-trained models\n y = self.value_function(x.permute(0, 2, 1), timesteps).sample\n grad = torch.autograd.grad([y.sum()], [x])[0]\n\n posterior_variance = self.scheduler._get_variance(i)\n model_std = torch.exp(0.5 * posterior_variance)\n grad = model_std * grad\n\n grad[timesteps < 2] = 0\n x = x.detach()\n x = x + scale * grad\n x = self.reset_x0(x, conditions, self.action_dim)\n\n prev_x = self.unet(x.permute(0, 2, 1), timesteps).sample.permute(0, 2, 1)\n\n # TODO: verify deprecation of this kwarg\n x = self.scheduler.step(prev_x, i, x)[\"prev_sample\"]\n\n # apply conditions to the trajectory (set the initial state)\n x = self.reset_x0(x, conditions, self.action_dim)\n x = self.to_torch(x)\n return x, y\n\n def __call__(self, obs, batch_size=64, planning_horizon=32, n_guide_steps=2, scale=0.1):\n # normalize the observations and create batch dimension\n obs = self.normalize(obs, \"observations\")\n obs = obs[None].repeat(batch_size, axis=0)\n\n conditions = {0: self.to_torch(obs)}\n shape = (batch_size, planning_horizon, self.state_dim + self.action_dim)\n\n # generate initial noise and apply our conditions (to make the trajectories start at current state)\n x1 = randn_tensor(shape, device=self.unet.device)\n x = self.reset_x0(x1, conditions, self.action_dim)\n x = self.to_torch(x)\n\n # run the diffusion process\n x, y = self.run_diffusion(x, conditions, n_guide_steps, scale)\n\n # sort output trajectories by value\n sorted_idx = y.argsort(0, descending=True).squeeze()\n sorted_values = x[sorted_idx]\n actions = sorted_values[:, :, : self.action_dim]\n actions = actions.detach().cpu().numpy()\n denorm_actions = self.de_normalize(actions, key=\"actions\")\n\n # select the action with the highest value\n if y is not None:\n selected_index = 0\n else:\n # if we didn't run value guiding, select a random action\n selected_index = np.random.randint(0, batch_size)\n\n denorm_actions = denorm_actions[selected_index, 0]\n return denorm_actions\n", "path": "src/diffusers/experimental/rl/value_guided_sampling.py"}]}
2,632
167
gh_patches_debug_23702
rasdani/github-patches
git_diff
sunpy__sunpy-1409
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Download of sample data is repeated for each server Why do we have `sunpy.data.download_sample_data()` download all of the sample data files twice, once for each of the two servers (data.sunpy.org and hesperia.gsfc.nasa.gov)? This seems silly. Lines 60–61 in `sunpy/data/_sample.py`: ``` python for base_url in _base_urls: for file_name in _files.itervalues(): ``` Output: ``` >>> import sunpy.data >>> sunpy.data.download_sample_data() Downloading sample files to c:/Users/Albert\sunpy\data/sample_data Downloading http://data.sunpy.org/sample-data/BIR_20110922_103000_01.fit |===========================================| 760k/760k (100.00%) 4s Downloading http://data.sunpy.org/sample-data/swap_lv1_20120101_001607.fits |===========================================| 2.1M/2.1M (100.00%) 4s Downloading http://data.sunpy.org/sample-data/eit_l1_20020625_100011.fits |===========================================| 8.3M/8.3M (100.00%) 10s Downloading http://data.sunpy.org/sample-data/aia.lev1.193A_2013-09-21T16_00_06.84Z.image_ lev1.fits.zip |===========================================| 12M/ 12M (100.00%) 22s Unpacking: aia.lev1.193A_2013-09-21T16_00_06.84Z.image_lev1.fits Downloading http://data.sunpy.org/sample-data/hsi_calib_ev_20020220_1106_20020220_1106_25_ 40.fits |===========================================| 207k/207k (100.00%) 0s Downloading http://data.sunpy.org/sample-data/AIA20110319_105400_0171.fits |===========================================| 4.2M/4.2M (100.00%) 6s Downloading http://data.sunpy.org/sample-data/hsi_image_20101016_191218.fits |===========================================| 95k/ 95k (100.00%) 0s Downloading http://hesperia.gsfc.nasa.gov/~schriste/sunpy-sample-data/BIR_20110922_103000_ 01.fit |===========================================| 760k/760k (100.00%) 0s Downloading http://hesperia.gsfc.nasa.gov/~schriste/sunpy-sample-data/swap_lv1_20120101_00 1607.fits |===========================================| 2.1M/2.1M (100.00%) 2s Downloading http://hesperia.gsfc.nasa.gov/~schriste/sunpy-sample-data/eit_l1_20020625_1000 11.fits |===========================================| 8.3M/8.3M (100.00%) 6s Downloading http://hesperia.gsfc.nasa.gov/~schriste/sunpy-sample-data/aia.lev1.193A_2013-0 9-21T16_00_06.84Z.image_lev1.fits.zip |===========================================| 12M/ 12M (100.00%) 10s Unpacking: aia.lev1.193A_2013-09-21T16_00_06.84Z.image_lev1.fits Downloading http://hesperia.gsfc.nasa.gov/~schriste/sunpy-sample-data/hsi_calib_ev_2002022 0_1106_20020220_1106_25_40.fits |===========================================| 207k/207k (100.00%) 0s Downloading http://hesperia.gsfc.nasa.gov/~schriste/sunpy-sample-data/AIA20110319_105400_0 171.fits |===========================================| 4.2M/4.2M (100.00%) 3s Downloading http://hesperia.gsfc.nasa.gov/~schriste/sunpy-sample-data/hsi_image_20101016_1 91218.fits |===========================================| 95k/ 95k (100.00%) 0s ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sunpy/data/_sample.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 """SunPy sample data files""" 3 from __future__ import absolute_import 4 5 from os import remove 6 import os.path 7 from zipfile import ZipFile 8 from urllib2 import URLError 9 from shutil import move 10 11 from astropy.utils.data import download_file 12 13 from sunpy.util.net import url_exists 14 from sunpy import config 15 16 __author__ = "Steven Christe" 17 __email__ = "[email protected]" 18 19 20 sampledata_dir = config.get("downloads", "sample_dir") 21 22 # urls to search for the sample data 23 _base_urls = ( 24 'http://data.sunpy.org/sample-data/', 25 'http://hesperia.gsfc.nasa.gov/~schriste/sunpy-sample-data/') 26 27 # keys are file shortcuts 28 # values consist of filename as well as optional file extension if files are 29 # hosted compressed. This extension is removed after download. 30 _files = { 31 "AIA_171_IMAGE": ("AIA20110319_105400_0171.fits", ""), 32 "RHESSI_IMAGE": ("hsi_image_20101016_191218.fits", ""), 33 "EIT_195_IMAGE": ("eit_l1_20020625_100011.fits", ""), 34 "CALLISTO_IMAGE": ("BIR_20110922_103000_01.fit", ""), 35 "RHESSI_EVENT_LIST": ("hsi_calib_ev_20020220_1106_20020220_1106_25_40.fits", ""), 36 "SWAP_LEVEL1_IMAGE": ("swap_lv1_20120101_001607.fits", ""), 37 "AIA_193_IMAGE": ("aia.lev1.193A_2013-09-21T16_00_06.84Z.image_lev1.fits", ".zip") 38 } 39 40 sample_files = {} 41 for key in _files: 42 sample_files[key] = os.path.abspath(os.path.join(sampledata_dir, _files[key][0])) 43 44 45 def download_sample_data(progress=True): 46 """ 47 Download the sample data. 48 49 Parameters 50 ---------- 51 progress: bool 52 Show a progress bar during download 53 54 Returns 55 ------- 56 None 57 """ 58 number_of_files_fetched = 0 59 print("Downloading sample files to " + sampledata_dir) 60 for base_url in _base_urls: 61 for file_name in _files.itervalues(): 62 full_file_name = file_name[0] + file_name[1] 63 if url_exists(os.path.join(base_url, full_file_name)): 64 f = download_file(os.path.join(base_url, full_file_name)) 65 real_name, ext = os.path.splitext(full_file_name) 66 67 if file_name[1] == '.zip': 68 print("Unpacking: %s" % real_name) 69 with ZipFile(f, 'r') as zip_file: 70 zip_file.extract(real_name, sampledata_dir) 71 remove(f) 72 else: 73 # move files to the data directory 74 move(f, os.path.join(sampledata_dir, file_name[0])) 75 # increment the number of files obtained to check later 76 number_of_files_fetched += 1 77 78 if number_of_files_fetched < len(_files.keys()): 79 raise URLError("Could not download all samples files. Problem with accessing sample data servers.") 80 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sunpy/data/_sample.py b/sunpy/data/_sample.py --- a/sunpy/data/_sample.py +++ b/sunpy/data/_sample.py @@ -57,8 +57,8 @@ """ number_of_files_fetched = 0 print("Downloading sample files to " + sampledata_dir) - for base_url in _base_urls: - for file_name in _files.itervalues(): + for file_name in _files.itervalues(): + for base_url in _base_urls: full_file_name = file_name[0] + file_name[1] if url_exists(os.path.join(base_url, full_file_name)): f = download_file(os.path.join(base_url, full_file_name)) @@ -74,6 +74,7 @@ move(f, os.path.join(sampledata_dir, file_name[0])) # increment the number of files obtained to check later number_of_files_fetched += 1 + break if number_of_files_fetched < len(_files.keys()): raise URLError("Could not download all samples files. Problem with accessing sample data servers.")
{"golden_diff": "diff --git a/sunpy/data/_sample.py b/sunpy/data/_sample.py\n--- a/sunpy/data/_sample.py\n+++ b/sunpy/data/_sample.py\n@@ -57,8 +57,8 @@\n \"\"\"\n number_of_files_fetched = 0\n print(\"Downloading sample files to \" + sampledata_dir)\n- for base_url in _base_urls:\n- for file_name in _files.itervalues():\n+ for file_name in _files.itervalues():\n+ for base_url in _base_urls:\n full_file_name = file_name[0] + file_name[1]\n if url_exists(os.path.join(base_url, full_file_name)):\n f = download_file(os.path.join(base_url, full_file_name))\n@@ -74,6 +74,7 @@\n move(f, os.path.join(sampledata_dir, file_name[0]))\n # increment the number of files obtained to check later\n number_of_files_fetched += 1\n+ break\n \n if number_of_files_fetched < len(_files.keys()):\n raise URLError(\"Could not download all samples files. Problem with accessing sample data servers.\")\n", "issue": "Download of sample data is repeated for each server\nWhy do we have `sunpy.data.download_sample_data()` download all of the sample data files twice, once for each of the two servers (data.sunpy.org and hesperia.gsfc.nasa.gov)? This seems silly.\n\nLines 60\u201361 in `sunpy/data/_sample.py`:\n\n``` python\n for base_url in _base_urls:\n for file_name in _files.itervalues():\n```\n\nOutput:\n\n```\n>>> import sunpy.data\n>>> sunpy.data.download_sample_data()\nDownloading sample files to c:/Users/Albert\\sunpy\\data/sample_data\nDownloading http://data.sunpy.org/sample-data/BIR_20110922_103000_01.fit\n|===========================================| 760k/760k (100.00%) 4s\nDownloading http://data.sunpy.org/sample-data/swap_lv1_20120101_001607.fits\n|===========================================| 2.1M/2.1M (100.00%) 4s\nDownloading http://data.sunpy.org/sample-data/eit_l1_20020625_100011.fits\n|===========================================| 8.3M/8.3M (100.00%) 10s\nDownloading http://data.sunpy.org/sample-data/aia.lev1.193A_2013-09-21T16_00_06.84Z.image_\nlev1.fits.zip\n|===========================================| 12M/ 12M (100.00%) 22s\nUnpacking: aia.lev1.193A_2013-09-21T16_00_06.84Z.image_lev1.fits\nDownloading http://data.sunpy.org/sample-data/hsi_calib_ev_20020220_1106_20020220_1106_25_\n40.fits\n|===========================================| 207k/207k (100.00%) 0s\nDownloading http://data.sunpy.org/sample-data/AIA20110319_105400_0171.fits\n|===========================================| 4.2M/4.2M (100.00%) 6s\nDownloading http://data.sunpy.org/sample-data/hsi_image_20101016_191218.fits\n|===========================================| 95k/ 95k (100.00%) 0s\nDownloading http://hesperia.gsfc.nasa.gov/~schriste/sunpy-sample-data/BIR_20110922_103000_\n01.fit\n|===========================================| 760k/760k (100.00%) 0s\nDownloading http://hesperia.gsfc.nasa.gov/~schriste/sunpy-sample-data/swap_lv1_20120101_00\n1607.fits\n|===========================================| 2.1M/2.1M (100.00%) 2s\nDownloading http://hesperia.gsfc.nasa.gov/~schriste/sunpy-sample-data/eit_l1_20020625_1000\n11.fits\n|===========================================| 8.3M/8.3M (100.00%) 6s\nDownloading http://hesperia.gsfc.nasa.gov/~schriste/sunpy-sample-data/aia.lev1.193A_2013-0\n9-21T16_00_06.84Z.image_lev1.fits.zip\n|===========================================| 12M/ 12M (100.00%) 10s\nUnpacking: aia.lev1.193A_2013-09-21T16_00_06.84Z.image_lev1.fits\nDownloading http://hesperia.gsfc.nasa.gov/~schriste/sunpy-sample-data/hsi_calib_ev_2002022\n0_1106_20020220_1106_25_40.fits\n|===========================================| 207k/207k (100.00%) 0s\nDownloading http://hesperia.gsfc.nasa.gov/~schriste/sunpy-sample-data/AIA20110319_105400_0\n171.fits\n|===========================================| 4.2M/4.2M (100.00%) 3s\nDownloading http://hesperia.gsfc.nasa.gov/~schriste/sunpy-sample-data/hsi_image_20101016_1\n91218.fits\n|===========================================| 95k/ 95k (100.00%) 0s\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"SunPy sample data files\"\"\"\nfrom __future__ import absolute_import\n\nfrom os import remove\nimport os.path\nfrom zipfile import ZipFile\nfrom urllib2 import URLError\nfrom shutil import move\n\nfrom astropy.utils.data import download_file\n\nfrom sunpy.util.net import url_exists\nfrom sunpy import config\n\n__author__ = \"Steven Christe\"\n__email__ = \"[email protected]\"\n\n\nsampledata_dir = config.get(\"downloads\", \"sample_dir\")\n\n# urls to search for the sample data\n_base_urls = (\n 'http://data.sunpy.org/sample-data/',\n 'http://hesperia.gsfc.nasa.gov/~schriste/sunpy-sample-data/')\n\n# keys are file shortcuts\n# values consist of filename as well as optional file extension if files are\n# hosted compressed. This extension is removed after download.\n_files = {\n \"AIA_171_IMAGE\": (\"AIA20110319_105400_0171.fits\", \"\"),\n \"RHESSI_IMAGE\": (\"hsi_image_20101016_191218.fits\", \"\"),\n \"EIT_195_IMAGE\": (\"eit_l1_20020625_100011.fits\", \"\"),\n \"CALLISTO_IMAGE\": (\"BIR_20110922_103000_01.fit\", \"\"),\n \"RHESSI_EVENT_LIST\": (\"hsi_calib_ev_20020220_1106_20020220_1106_25_40.fits\", \"\"),\n \"SWAP_LEVEL1_IMAGE\": (\"swap_lv1_20120101_001607.fits\", \"\"),\n \"AIA_193_IMAGE\": (\"aia.lev1.193A_2013-09-21T16_00_06.84Z.image_lev1.fits\", \".zip\")\n}\n\nsample_files = {}\nfor key in _files:\n sample_files[key] = os.path.abspath(os.path.join(sampledata_dir, _files[key][0]))\n\n\ndef download_sample_data(progress=True):\n \"\"\"\n Download the sample data.\n\n Parameters\n ----------\n progress: bool\n Show a progress bar during download\n\n Returns\n -------\n None\n \"\"\"\n number_of_files_fetched = 0\n print(\"Downloading sample files to \" + sampledata_dir)\n for base_url in _base_urls:\n for file_name in _files.itervalues():\n full_file_name = file_name[0] + file_name[1]\n if url_exists(os.path.join(base_url, full_file_name)):\n f = download_file(os.path.join(base_url, full_file_name))\n real_name, ext = os.path.splitext(full_file_name)\n\n if file_name[1] == '.zip':\n print(\"Unpacking: %s\" % real_name)\n with ZipFile(f, 'r') as zip_file:\n zip_file.extract(real_name, sampledata_dir)\n remove(f)\n else:\n # move files to the data directory\n move(f, os.path.join(sampledata_dir, file_name[0]))\n # increment the number of files obtained to check later\n number_of_files_fetched += 1\n\n if number_of_files_fetched < len(_files.keys()):\n raise URLError(\"Could not download all samples files. Problem with accessing sample data servers.\")\n", "path": "sunpy/data/_sample.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"SunPy sample data files\"\"\"\nfrom __future__ import absolute_import\n\nfrom os import remove\nimport os.path\nfrom zipfile import ZipFile\nfrom urllib2 import URLError\nfrom shutil import move\n\nfrom astropy.utils.data import download_file\n\nfrom sunpy.util.net import url_exists\nfrom sunpy import config\n\n__author__ = \"Steven Christe\"\n__email__ = \"[email protected]\"\n\n\nsampledata_dir = config.get(\"downloads\", \"sample_dir\")\n\n# urls to search for the sample data\n_base_urls = (\n 'http://data.sunpy.org/sample-data/',\n 'http://hesperia.gsfc.nasa.gov/~schriste/sunpy-sample-data/')\n\n# keys are file shortcuts\n# values consist of filename as well as optional file extension if files are\n# hosted compressed. This extension is removed after download.\n_files = {\n \"AIA_171_IMAGE\": (\"AIA20110319_105400_0171.fits\", \"\"),\n \"RHESSI_IMAGE\": (\"hsi_image_20101016_191218.fits\", \"\"),\n \"EIT_195_IMAGE\": (\"eit_l1_20020625_100011.fits\", \"\"),\n \"CALLISTO_IMAGE\": (\"BIR_20110922_103000_01.fit\", \"\"),\n \"RHESSI_EVENT_LIST\": (\"hsi_calib_ev_20020220_1106_20020220_1106_25_40.fits\", \"\"),\n \"SWAP_LEVEL1_IMAGE\": (\"swap_lv1_20120101_001607.fits\", \"\"),\n \"AIA_193_IMAGE\": (\"aia.lev1.193A_2013-09-21T16_00_06.84Z.image_lev1.fits\", \".zip\")\n}\n\nsample_files = {}\nfor key in _files:\n sample_files[key] = os.path.abspath(os.path.join(sampledata_dir, _files[key][0]))\n\n\ndef download_sample_data(progress=True):\n \"\"\"\n Download the sample data.\n\n Parameters\n ----------\n progress: bool\n Show a progress bar during download\n\n Returns\n -------\n None\n \"\"\"\n number_of_files_fetched = 0\n print(\"Downloading sample files to \" + sampledata_dir)\n for file_name in _files.itervalues():\n for base_url in _base_urls:\n full_file_name = file_name[0] + file_name[1]\n if url_exists(os.path.join(base_url, full_file_name)):\n f = download_file(os.path.join(base_url, full_file_name))\n real_name, ext = os.path.splitext(full_file_name)\n\n if file_name[1] == '.zip':\n print(\"Unpacking: %s\" % real_name)\n with ZipFile(f, 'r') as zip_file:\n zip_file.extract(real_name, sampledata_dir)\n remove(f)\n else:\n # move files to the data directory\n move(f, os.path.join(sampledata_dir, file_name[0]))\n # increment the number of files obtained to check later\n number_of_files_fetched += 1\n break\n\n if number_of_files_fetched < len(_files.keys()):\n raise URLError(\"Could not download all samples files. Problem with accessing sample data servers.\")\n", "path": "sunpy/data/_sample.py"}]}
2,379
252
gh_patches_debug_22897
rasdani/github-patches
git_diff
NVIDIA__apex-490
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Docker build fails on pytorch master since Aug 15, 2019 with "AssertionError: Found no NVIDIA driver on your system" Pytorch recently added a CUDA architecture check when building with torch.utils.cpp_extension. From the [commit notes](https://github.com/pytorch/pytorch/commit/cd207737017db8c81584763207df20bc6110ed75): > The old behavior was to always use `sm_30`. The new behavior is: > - For building via a setup.py, check if `'arch'` is in `extra_compile_args`. If so, don't change anything. > - If `TORCH_CUDA_ARCH_LIST` is set, respect that (can be 1 or more arches) > - Otherwise, query device capability and use that. Following this commit, when attempting to install apex via nvidia-docker, pytorch reverts to querying device capability and fails. The issue probably should be addressed in apex's setup.py, if not nvidia-docker/pytorch themselves. Currently, I work around the issue by setting the environment variable in the dockerfile, eg: `ENV TORCH_CUDA_ARCH_LIST=Volta;Turing;Kepler+Tesla` `RUN pip install -v --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" apex/` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 import torch 2 from setuptools import setup, find_packages 3 import subprocess 4 5 from pip._internal import main as pipmain 6 import sys 7 import warnings 8 9 if not torch.cuda.is_available(): 10 print("\nWarning: Torch did not find available GPUs on this system.\n", 11 "If your intention is to cross-compile, this is not an error.\n") 12 13 print("torch.__version__ = ", torch.__version__) 14 TORCH_MAJOR = int(torch.__version__.split('.')[0]) 15 TORCH_MINOR = int(torch.__version__.split('.')[1]) 16 17 if TORCH_MAJOR == 0 and TORCH_MINOR < 4: 18 raise RuntimeError("Apex requires Pytorch 0.4 or newer.\n" + 19 "The latest stable release can be obtained from https://pytorch.org/") 20 21 cmdclass = {} 22 ext_modules = [] 23 24 if "--pyprof" in sys.argv: 25 with open('requirements.txt') as f: 26 required_packages = f.read().splitlines() 27 pipmain(["install"] + required_packages) 28 try: 29 sys.argv.remove("--pyprof") 30 except: 31 pass 32 else: 33 warnings.warn("Option --pyprof not specified. Not installing PyProf dependencies!") 34 35 if "--cpp_ext" in sys.argv or "--cuda_ext" in sys.argv: 36 if TORCH_MAJOR == 0: 37 raise RuntimeError("--cpp_ext requires Pytorch 1.0 or later, " 38 "found torch.__version__ = {}".format(torch.__version__)) 39 from torch.utils.cpp_extension import BuildExtension 40 cmdclass['build_ext'] = BuildExtension 41 42 if "--cpp_ext" in sys.argv: 43 from torch.utils.cpp_extension import CppExtension 44 sys.argv.remove("--cpp_ext") 45 ext_modules.append( 46 CppExtension('apex_C', 47 ['csrc/flatten_unflatten.cpp',])) 48 49 def check_cuda_torch_binary_vs_bare_metal(cuda_dir): 50 raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True) 51 output = raw_output.split() 52 release_idx = output.index("release") + 1 53 release = output[release_idx].split(".") 54 bare_metal_major = release[0] 55 bare_metal_minor = release[1][0] 56 torch_binary_major = torch.version.cuda.split(".")[0] 57 torch_binary_minor = torch.version.cuda.split(".")[1] 58 59 print("\nCompiling cuda extensions with") 60 print(raw_output + "from " + cuda_dir + "/bin\n") 61 62 if (bare_metal_major != torch_binary_major) or (bare_metal_minor != torch_binary_minor): 63 raise RuntimeError("Cuda extensions are being compiled with a version of Cuda that does " + 64 "not match the version used to compile Pytorch binaries. " + 65 "Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda) + 66 "In some cases, a minor-version mismatch will not cause later errors: " + 67 "https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. " 68 "You can try commenting out this check (at your own risk).") 69 70 # Set up macros for forward/backward compatibility hack around 71 # https://github.com/pytorch/pytorch/commit/4404762d7dd955383acee92e6f06b48144a0742e 72 # and 73 # https://github.com/NVIDIA/apex/issues/456 74 # https://github.com/pytorch/pytorch/commit/eb7b39e02f7d75c26d8a795ea8c7fd911334da7e#diff-4632522f237f1e4e728cb824300403ac 75 version_ge_1_1 = [] 76 if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0): 77 version_ge_1_1 = ['-DVERSION_GE_1_1'] 78 version_ge_1_3 = [] 79 if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2): 80 version_ge_1_3 = ['-DVERSION_GE_1_3'] 81 version_dependent_macros = version_ge_1_1 + version_ge_1_3 82 83 if "--cuda_ext" in sys.argv: 84 from torch.utils.cpp_extension import CUDAExtension 85 sys.argv.remove("--cuda_ext") 86 87 if torch.utils.cpp_extension.CUDA_HOME is None: 88 raise RuntimeError("--cuda_ext was requested, but nvcc was not found. Are you sure your environment has nvcc available? If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.") 89 else: 90 check_cuda_torch_binary_vs_bare_metal(torch.utils.cpp_extension.CUDA_HOME) 91 92 ext_modules.append( 93 CUDAExtension(name='amp_C', 94 sources=['csrc/amp_C_frontend.cpp', 95 'csrc/multi_tensor_sgd_kernel.cu', 96 'csrc/multi_tensor_scale_kernel.cu', 97 'csrc/multi_tensor_axpby_kernel.cu', 98 'csrc/multi_tensor_l2norm_kernel.cu', 99 'csrc/multi_tensor_lamb_stage_1.cu', 100 'csrc/multi_tensor_lamb_stage_2.cu', 101 'csrc/multi_tensor_adam.cu', 102 'csrc/multi_tensor_novograd.cu', 103 'csrc/multi_tensor_lamb.cu'], 104 extra_compile_args={'cxx': ['-O3'] + version_dependent_macros, 105 'nvcc':['-lineinfo', 106 '-O3', 107 # '--resource-usage', 108 '--use_fast_math'] + version_dependent_macros})) 109 ext_modules.append( 110 CUDAExtension(name='fused_adam_cuda', 111 sources=['csrc/fused_adam_cuda.cpp', 112 'csrc/fused_adam_cuda_kernel.cu'], 113 extra_compile_args={'cxx': ['-O3',] + version_dependent_macros, 114 'nvcc':['-O3', 115 '--use_fast_math'] + version_dependent_macros})) 116 ext_modules.append( 117 CUDAExtension(name='syncbn', 118 sources=['csrc/syncbn.cpp', 119 'csrc/welford.cu'], 120 extra_compile_args={'cxx': ['-O3'] + version_dependent_macros, 121 'nvcc':['-O3'] + version_dependent_macros})) 122 123 ext_modules.append( 124 CUDAExtension(name='fused_layer_norm_cuda', 125 sources=['csrc/layer_norm_cuda.cpp', 126 'csrc/layer_norm_cuda_kernel.cu'], 127 extra_compile_args={'cxx': ['-O3'] + version_dependent_macros, 128 'nvcc':['-maxrregcount=50', 129 '-O3', 130 '--use_fast_math'] + version_dependent_macros})) 131 132 if "--bnp" in sys.argv: 133 from torch.utils.cpp_extension import CUDAExtension 134 sys.argv.remove("--bnp") 135 136 from torch.utils.cpp_extension import BuildExtension 137 cmdclass['build_ext'] = BuildExtension 138 139 if torch.utils.cpp_extension.CUDA_HOME is None: 140 raise RuntimeError("--bnp was requested, but nvcc was not found. Are you sure your environment has nvcc available? If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.") 141 else: 142 ext_modules.append( 143 CUDAExtension(name='bnp', 144 sources=['apex/contrib/csrc/groupbn/batch_norm.cu', 145 'apex/contrib/csrc/groupbn/ipc.cu', 146 'apex/contrib/csrc/groupbn/interface.cpp', 147 'apex/contrib/csrc/groupbn/batch_norm_add_relu.cu'], 148 include_dirs=['csrc'], 149 extra_compile_args={'cxx': [] + version_dependent_macros, 150 'nvcc':['-DCUDA_HAS_FP16=1', 151 '-D__CUDA_NO_HALF_OPERATORS__', 152 '-D__CUDA_NO_HALF_CONVERSIONS__', 153 '-D__CUDA_NO_HALF2_OPERATORS__', 154 '-gencode', 155 'arch=compute_70,code=sm_70'] + version_dependent_macros})) 156 157 if "--xentropy" in sys.argv: 158 from torch.utils.cpp_extension import CUDAExtension 159 sys.argv.remove("--xentropy") 160 161 from torch.utils.cpp_extension import BuildExtension 162 cmdclass['build_ext'] = BuildExtension 163 164 if torch.utils.cpp_extension.CUDA_HOME is None: 165 raise RuntimeError("--xentropy was requested, but nvcc was not found. Are you sure your environment has nvcc available? If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.") 166 else: 167 ext_modules.append( 168 CUDAExtension(name='xentropy_cuda', 169 sources=['apex/contrib/csrc/xentropy/interface.cpp', 170 'apex/contrib/csrc/xentropy/xentropy_kernel.cu'], 171 include_dirs=['csrc'], 172 extra_compile_args={'cxx': ['-O3'] + version_dependent_macros, 173 'nvcc':['-O3'] + version_dependent_macros})) 174 175 setup( 176 name='apex', 177 version='0.1', 178 packages=find_packages(exclude=('build', 179 'csrc', 180 'include', 181 'tests', 182 'dist', 183 'docs', 184 'tests', 185 'examples', 186 'apex.egg-info',)), 187 description='PyTorch Extensions written by NVIDIA', 188 ext_modules=ext_modules, 189 cmdclass=cmdclass, 190 ) 191 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -5,10 +5,20 @@ from pip._internal import main as pipmain import sys import warnings +import os if not torch.cuda.is_available(): - print("\nWarning: Torch did not find available GPUs on this system.\n", - "If your intention is to cross-compile, this is not an error.\n") + # https://github.com/NVIDIA/apex/issues/486 + # Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(), + # which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command). + print('\nWarning: Torch did not find available GPUs on this system.\n', + 'If your intention is to cross-compile, this is not an error.\n' + 'By default, Apex will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\n' + 'Volta (compute capability 7.0), and Turing (compute capability 7.5).\n' + 'If you wish to cross-compile for a single specific architecture,\n' + 'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n') + if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None: + os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5" print("torch.__version__ = ", torch.__version__) TORCH_MAJOR = int(torch.__version__.split('.')[0])
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -5,10 +5,20 @@\n from pip._internal import main as pipmain\n import sys\n import warnings\n+import os\n \n if not torch.cuda.is_available():\n- print(\"\\nWarning: Torch did not find available GPUs on this system.\\n\",\n- \"If your intention is to cross-compile, this is not an error.\\n\")\n+ # https://github.com/NVIDIA/apex/issues/486\n+ # Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(),\n+ # which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command).\n+ print('\\nWarning: Torch did not find available GPUs on this system.\\n',\n+ 'If your intention is to cross-compile, this is not an error.\\n'\n+ 'By default, Apex will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\\n'\n+ 'Volta (compute capability 7.0), and Turing (compute capability 7.5).\\n'\n+ 'If you wish to cross-compile for a single specific architecture,\\n'\n+ 'export TORCH_CUDA_ARCH_LIST=\"compute capability\" before running setup.py.\\n')\n+ if os.environ.get(\"TORCH_CUDA_ARCH_LIST\", None) is None:\n+ os.environ[\"TORCH_CUDA_ARCH_LIST\"] = \"6.0;6.1;6.2;7.0;7.5\"\n \n print(\"torch.__version__ = \", torch.__version__)\n TORCH_MAJOR = int(torch.__version__.split('.')[0])\n", "issue": "Docker build fails on pytorch master since Aug 15, 2019 with \"AssertionError: Found no NVIDIA driver on your system\"\nPytorch recently added a CUDA architecture check when building with torch.utils.cpp_extension. From the [commit notes](https://github.com/pytorch/pytorch/commit/cd207737017db8c81584763207df20bc6110ed75):\r\n\r\n> The old behavior was to always use `sm_30`. The new behavior is:\r\n> - For building via a setup.py, check if `'arch'` is in `extra_compile_args`. If so, don't change anything.\r\n> - If `TORCH_CUDA_ARCH_LIST` is set, respect that (can be 1 or more arches)\r\n> - Otherwise, query device capability and use that.\r\n\r\nFollowing this commit, when attempting to install apex via nvidia-docker, pytorch reverts to querying device capability and fails. The issue probably should be addressed in apex's setup.py, if not nvidia-docker/pytorch themselves. Currently, I work around the issue by setting the environment variable in the dockerfile, eg:\r\n\r\n`ENV TORCH_CUDA_ARCH_LIST=Volta;Turing;Kepler+Tesla`\r\n`RUN pip install -v --no-cache-dir --global-option=\"--cpp_ext\" --global-option=\"--cuda_ext\" apex/` \r\n\r\n\n", "before_files": [{"content": "import torch\nfrom setuptools import setup, find_packages\nimport subprocess\n\nfrom pip._internal import main as pipmain\nimport sys\nimport warnings\n\nif not torch.cuda.is_available():\n print(\"\\nWarning: Torch did not find available GPUs on this system.\\n\",\n \"If your intention is to cross-compile, this is not an error.\\n\")\n\nprint(\"torch.__version__ = \", torch.__version__)\nTORCH_MAJOR = int(torch.__version__.split('.')[0])\nTORCH_MINOR = int(torch.__version__.split('.')[1])\n\nif TORCH_MAJOR == 0 and TORCH_MINOR < 4:\n raise RuntimeError(\"Apex requires Pytorch 0.4 or newer.\\n\" +\n \"The latest stable release can be obtained from https://pytorch.org/\")\n\ncmdclass = {}\next_modules = []\n\nif \"--pyprof\" in sys.argv:\n with open('requirements.txt') as f:\n required_packages = f.read().splitlines()\n pipmain([\"install\"] + required_packages)\n try:\n sys.argv.remove(\"--pyprof\")\n except:\n pass\nelse:\n warnings.warn(\"Option --pyprof not specified. Not installing PyProf dependencies!\")\n\nif \"--cpp_ext\" in sys.argv or \"--cuda_ext\" in sys.argv:\n if TORCH_MAJOR == 0:\n raise RuntimeError(\"--cpp_ext requires Pytorch 1.0 or later, \"\n \"found torch.__version__ = {}\".format(torch.__version__))\n from torch.utils.cpp_extension import BuildExtension\n cmdclass['build_ext'] = BuildExtension\n\nif \"--cpp_ext\" in sys.argv:\n from torch.utils.cpp_extension import CppExtension\n sys.argv.remove(\"--cpp_ext\")\n ext_modules.append(\n CppExtension('apex_C',\n ['csrc/flatten_unflatten.cpp',]))\n\ndef check_cuda_torch_binary_vs_bare_metal(cuda_dir):\n raw_output = subprocess.check_output([cuda_dir + \"/bin/nvcc\", \"-V\"], universal_newlines=True)\n output = raw_output.split()\n release_idx = output.index(\"release\") + 1\n release = output[release_idx].split(\".\")\n bare_metal_major = release[0]\n bare_metal_minor = release[1][0]\n torch_binary_major = torch.version.cuda.split(\".\")[0]\n torch_binary_minor = torch.version.cuda.split(\".\")[1]\n\n print(\"\\nCompiling cuda extensions with\")\n print(raw_output + \"from \" + cuda_dir + \"/bin\\n\")\n\n if (bare_metal_major != torch_binary_major) or (bare_metal_minor != torch_binary_minor):\n raise RuntimeError(\"Cuda extensions are being compiled with a version of Cuda that does \" +\n \"not match the version used to compile Pytorch binaries. \" +\n \"Pytorch binaries were compiled with Cuda {}.\\n\".format(torch.version.cuda) +\n \"In some cases, a minor-version mismatch will not cause later errors: \" +\n \"https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. \"\n \"You can try commenting out this check (at your own risk).\")\n\n# Set up macros for forward/backward compatibility hack around\n# https://github.com/pytorch/pytorch/commit/4404762d7dd955383acee92e6f06b48144a0742e\n# and\n# https://github.com/NVIDIA/apex/issues/456\n# https://github.com/pytorch/pytorch/commit/eb7b39e02f7d75c26d8a795ea8c7fd911334da7e#diff-4632522f237f1e4e728cb824300403ac\nversion_ge_1_1 = []\nif (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0):\n version_ge_1_1 = ['-DVERSION_GE_1_1']\nversion_ge_1_3 = []\nif (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2):\n version_ge_1_3 = ['-DVERSION_GE_1_3']\nversion_dependent_macros = version_ge_1_1 + version_ge_1_3\n\nif \"--cuda_ext\" in sys.argv:\n from torch.utils.cpp_extension import CUDAExtension\n sys.argv.remove(\"--cuda_ext\")\n\n if torch.utils.cpp_extension.CUDA_HOME is None:\n raise RuntimeError(\"--cuda_ext was requested, but nvcc was not found. Are you sure your environment has nvcc available? If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.\")\n else:\n check_cuda_torch_binary_vs_bare_metal(torch.utils.cpp_extension.CUDA_HOME)\n\n ext_modules.append(\n CUDAExtension(name='amp_C',\n sources=['csrc/amp_C_frontend.cpp',\n 'csrc/multi_tensor_sgd_kernel.cu',\n 'csrc/multi_tensor_scale_kernel.cu',\n 'csrc/multi_tensor_axpby_kernel.cu',\n 'csrc/multi_tensor_l2norm_kernel.cu',\n 'csrc/multi_tensor_lamb_stage_1.cu',\n 'csrc/multi_tensor_lamb_stage_2.cu',\n 'csrc/multi_tensor_adam.cu',\n 'csrc/multi_tensor_novograd.cu',\n 'csrc/multi_tensor_lamb.cu'],\n extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,\n 'nvcc':['-lineinfo',\n '-O3',\n # '--resource-usage',\n '--use_fast_math'] + version_dependent_macros}))\n ext_modules.append(\n CUDAExtension(name='fused_adam_cuda',\n sources=['csrc/fused_adam_cuda.cpp',\n 'csrc/fused_adam_cuda_kernel.cu'],\n extra_compile_args={'cxx': ['-O3',] + version_dependent_macros,\n 'nvcc':['-O3',\n '--use_fast_math'] + version_dependent_macros}))\n ext_modules.append(\n CUDAExtension(name='syncbn',\n sources=['csrc/syncbn.cpp',\n 'csrc/welford.cu'],\n extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,\n 'nvcc':['-O3'] + version_dependent_macros}))\n\n ext_modules.append(\n CUDAExtension(name='fused_layer_norm_cuda',\n sources=['csrc/layer_norm_cuda.cpp',\n 'csrc/layer_norm_cuda_kernel.cu'],\n extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,\n 'nvcc':['-maxrregcount=50',\n '-O3',\n '--use_fast_math'] + version_dependent_macros}))\n\nif \"--bnp\" in sys.argv:\n from torch.utils.cpp_extension import CUDAExtension\n sys.argv.remove(\"--bnp\")\n\n from torch.utils.cpp_extension import BuildExtension\n cmdclass['build_ext'] = BuildExtension\n\n if torch.utils.cpp_extension.CUDA_HOME is None:\n raise RuntimeError(\"--bnp was requested, but nvcc was not found. Are you sure your environment has nvcc available? If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.\")\n else:\n ext_modules.append(\n CUDAExtension(name='bnp',\n sources=['apex/contrib/csrc/groupbn/batch_norm.cu',\n 'apex/contrib/csrc/groupbn/ipc.cu',\n 'apex/contrib/csrc/groupbn/interface.cpp',\n 'apex/contrib/csrc/groupbn/batch_norm_add_relu.cu'],\n include_dirs=['csrc'],\n extra_compile_args={'cxx': [] + version_dependent_macros,\n 'nvcc':['-DCUDA_HAS_FP16=1',\n '-D__CUDA_NO_HALF_OPERATORS__',\n '-D__CUDA_NO_HALF_CONVERSIONS__',\n '-D__CUDA_NO_HALF2_OPERATORS__',\n '-gencode',\n 'arch=compute_70,code=sm_70'] + version_dependent_macros}))\n\nif \"--xentropy\" in sys.argv:\n from torch.utils.cpp_extension import CUDAExtension\n sys.argv.remove(\"--xentropy\")\n\n from torch.utils.cpp_extension import BuildExtension\n cmdclass['build_ext'] = BuildExtension\n\n if torch.utils.cpp_extension.CUDA_HOME is None:\n raise RuntimeError(\"--xentropy was requested, but nvcc was not found. Are you sure your environment has nvcc available? If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.\")\n else:\n ext_modules.append(\n CUDAExtension(name='xentropy_cuda',\n sources=['apex/contrib/csrc/xentropy/interface.cpp',\n 'apex/contrib/csrc/xentropy/xentropy_kernel.cu'],\n include_dirs=['csrc'],\n extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,\n 'nvcc':['-O3'] + version_dependent_macros}))\n\nsetup(\n name='apex',\n version='0.1',\n packages=find_packages(exclude=('build',\n 'csrc',\n 'include',\n 'tests',\n 'dist',\n 'docs',\n 'tests',\n 'examples',\n 'apex.egg-info',)),\n description='PyTorch Extensions written by NVIDIA',\n ext_modules=ext_modules,\n cmdclass=cmdclass,\n)\n", "path": "setup.py"}], "after_files": [{"content": "import torch\nfrom setuptools import setup, find_packages\nimport subprocess\n\nfrom pip._internal import main as pipmain\nimport sys\nimport warnings\nimport os\n\nif not torch.cuda.is_available():\n # https://github.com/NVIDIA/apex/issues/486\n # Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(),\n # which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command).\n print('\\nWarning: Torch did not find available GPUs on this system.\\n',\n 'If your intention is to cross-compile, this is not an error.\\n'\n 'By default, Apex will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\\n'\n 'Volta (compute capability 7.0), and Turing (compute capability 7.5).\\n'\n 'If you wish to cross-compile for a single specific architecture,\\n'\n 'export TORCH_CUDA_ARCH_LIST=\"compute capability\" before running setup.py.\\n')\n if os.environ.get(\"TORCH_CUDA_ARCH_LIST\", None) is None:\n os.environ[\"TORCH_CUDA_ARCH_LIST\"] = \"6.0;6.1;6.2;7.0;7.5\"\n\nprint(\"torch.__version__ = \", torch.__version__)\nTORCH_MAJOR = int(torch.__version__.split('.')[0])\nTORCH_MINOR = int(torch.__version__.split('.')[1])\n\nif TORCH_MAJOR == 0 and TORCH_MINOR < 4:\n raise RuntimeError(\"Apex requires Pytorch 0.4 or newer.\\n\" +\n \"The latest stable release can be obtained from https://pytorch.org/\")\n\ncmdclass = {}\next_modules = []\n\nif \"--pyprof\" in sys.argv:\n with open('requirements.txt') as f:\n required_packages = f.read().splitlines()\n pipmain([\"install\"] + required_packages)\n try:\n sys.argv.remove(\"--pyprof\")\n except:\n pass\nelse:\n warnings.warn(\"Option --pyprof not specified. Not installing PyProf dependencies!\")\n\nif \"--cpp_ext\" in sys.argv or \"--cuda_ext\" in sys.argv:\n if TORCH_MAJOR == 0:\n raise RuntimeError(\"--cpp_ext requires Pytorch 1.0 or later, \"\n \"found torch.__version__ = {}\".format(torch.__version__))\n from torch.utils.cpp_extension import BuildExtension\n cmdclass['build_ext'] = BuildExtension\n\nif \"--cpp_ext\" in sys.argv:\n from torch.utils.cpp_extension import CppExtension\n sys.argv.remove(\"--cpp_ext\")\n ext_modules.append(\n CppExtension('apex_C',\n ['csrc/flatten_unflatten.cpp',]))\n\ndef check_cuda_torch_binary_vs_bare_metal(cuda_dir):\n raw_output = subprocess.check_output([cuda_dir + \"/bin/nvcc\", \"-V\"], universal_newlines=True)\n output = raw_output.split()\n release_idx = output.index(\"release\") + 1\n release = output[release_idx].split(\".\")\n bare_metal_major = release[0]\n bare_metal_minor = release[1][0]\n torch_binary_major = torch.version.cuda.split(\".\")[0]\n torch_binary_minor = torch.version.cuda.split(\".\")[1]\n\n print(\"\\nCompiling cuda extensions with\")\n print(raw_output + \"from \" + cuda_dir + \"/bin\\n\")\n\n if (bare_metal_major != torch_binary_major) or (bare_metal_minor != torch_binary_minor):\n raise RuntimeError(\"Cuda extensions are being compiled with a version of Cuda that does \" +\n \"not match the version used to compile Pytorch binaries. \" +\n \"Pytorch binaries were compiled with Cuda {}.\\n\".format(torch.version.cuda) +\n \"In some cases, a minor-version mismatch will not cause later errors: \" +\n \"https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. \"\n \"You can try commenting out this check (at your own risk).\")\n\n# Set up macros for forward/backward compatibility hack around\n# https://github.com/pytorch/pytorch/commit/4404762d7dd955383acee92e6f06b48144a0742e\n# and\n# https://github.com/NVIDIA/apex/issues/456\n# https://github.com/pytorch/pytorch/commit/eb7b39e02f7d75c26d8a795ea8c7fd911334da7e#diff-4632522f237f1e4e728cb824300403ac\nversion_ge_1_1 = []\nif (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0):\n version_ge_1_1 = ['-DVERSION_GE_1_1']\nversion_ge_1_3 = []\nif (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2):\n version_ge_1_3 = ['-DVERSION_GE_1_3']\nversion_dependent_macros = version_ge_1_1 + version_ge_1_3\n\nif \"--cuda_ext\" in sys.argv:\n from torch.utils.cpp_extension import CUDAExtension\n sys.argv.remove(\"--cuda_ext\")\n\n if torch.utils.cpp_extension.CUDA_HOME is None:\n raise RuntimeError(\"--cuda_ext was requested, but nvcc was not found. Are you sure your environment has nvcc available? If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.\")\n else:\n check_cuda_torch_binary_vs_bare_metal(torch.utils.cpp_extension.CUDA_HOME)\n\n ext_modules.append(\n CUDAExtension(name='amp_C',\n sources=['csrc/amp_C_frontend.cpp',\n 'csrc/multi_tensor_sgd_kernel.cu',\n 'csrc/multi_tensor_scale_kernel.cu',\n 'csrc/multi_tensor_axpby_kernel.cu',\n 'csrc/multi_tensor_l2norm_kernel.cu',\n 'csrc/multi_tensor_lamb_stage_1.cu',\n 'csrc/multi_tensor_lamb_stage_2.cu',\n 'csrc/multi_tensor_adam.cu',\n 'csrc/multi_tensor_novograd.cu',\n 'csrc/multi_tensor_lamb.cu'],\n extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,\n 'nvcc':['-lineinfo',\n '-O3',\n # '--resource-usage',\n '--use_fast_math'] + version_dependent_macros}))\n ext_modules.append(\n CUDAExtension(name='fused_adam_cuda',\n sources=['csrc/fused_adam_cuda.cpp',\n 'csrc/fused_adam_cuda_kernel.cu'],\n extra_compile_args={'cxx': ['-O3',] + version_dependent_macros,\n 'nvcc':['-O3',\n '--use_fast_math'] + version_dependent_macros}))\n ext_modules.append(\n CUDAExtension(name='syncbn',\n sources=['csrc/syncbn.cpp',\n 'csrc/welford.cu'],\n extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,\n 'nvcc':['-O3'] + version_dependent_macros}))\n\n ext_modules.append(\n CUDAExtension(name='fused_layer_norm_cuda',\n sources=['csrc/layer_norm_cuda.cpp',\n 'csrc/layer_norm_cuda_kernel.cu'],\n extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,\n 'nvcc':['-maxrregcount=50',\n '-O3',\n '--use_fast_math'] + version_dependent_macros}))\n\nif \"--bnp\" in sys.argv:\n from torch.utils.cpp_extension import CUDAExtension\n sys.argv.remove(\"--bnp\")\n\n from torch.utils.cpp_extension import BuildExtension\n cmdclass['build_ext'] = BuildExtension\n\n if torch.utils.cpp_extension.CUDA_HOME is None:\n raise RuntimeError(\"--bnp was requested, but nvcc was not found. Are you sure your environment has nvcc available? If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.\")\n else:\n ext_modules.append(\n CUDAExtension(name='bnp',\n sources=['apex/contrib/csrc/groupbn/batch_norm.cu',\n 'apex/contrib/csrc/groupbn/ipc.cu',\n 'apex/contrib/csrc/groupbn/interface.cpp',\n 'apex/contrib/csrc/groupbn/batch_norm_add_relu.cu'],\n include_dirs=['csrc'],\n extra_compile_args={'cxx': [] + version_dependent_macros,\n 'nvcc':['-DCUDA_HAS_FP16=1',\n '-D__CUDA_NO_HALF_OPERATORS__',\n '-D__CUDA_NO_HALF_CONVERSIONS__',\n '-D__CUDA_NO_HALF2_OPERATORS__',\n '-gencode',\n 'arch=compute_70,code=sm_70'] + version_dependent_macros}))\n\nif \"--xentropy\" in sys.argv:\n from torch.utils.cpp_extension import CUDAExtension\n sys.argv.remove(\"--xentropy\")\n\n from torch.utils.cpp_extension import BuildExtension\n cmdclass['build_ext'] = BuildExtension\n\n if torch.utils.cpp_extension.CUDA_HOME is None:\n raise RuntimeError(\"--xentropy was requested, but nvcc was not found. Are you sure your environment has nvcc available? If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.\")\n else:\n ext_modules.append(\n CUDAExtension(name='xentropy_cuda',\n sources=['apex/contrib/csrc/xentropy/interface.cpp',\n 'apex/contrib/csrc/xentropy/xentropy_kernel.cu'],\n include_dirs=['csrc'],\n extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,\n 'nvcc':['-O3'] + version_dependent_macros}))\n\nsetup(\n name='apex',\n version='0.1',\n packages=find_packages(exclude=('build',\n 'csrc',\n 'include',\n 'tests',\n 'dist',\n 'docs',\n 'tests',\n 'examples',\n 'apex.egg-info',)),\n description='PyTorch Extensions written by NVIDIA',\n ext_modules=ext_modules,\n cmdclass=cmdclass,\n)\n", "path": "setup.py"}]}
3,135
389
gh_patches_debug_1951
rasdani/github-patches
git_diff
googleapis__python-bigquery-974
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Python to construct CASE WHEN update SQL statement I try to update 2K rows in BQ ``` def update_bq_ads_status_failed(self, update_ads): affected_rows = 0 for update_ads_chunk in split(update_ads, _UPDATE_CHUNK_SIZE): ad_ids = [item["ad_id"] for item in update_ads_chunk] removal_errors = [item["removal_error"] for item in update_ads_chunk] update_removal_error = "" for ad_id, removal_error in zip(ad_ids, removal_errors): update_removal_error = update_removal_error + \ f''' WHEN ad_id = '{ad_id}' Then '{removal_error}' ''' affected_rows += self.update_bq_ads_status(f""" UPDATE '{table_full_name}' SET status = 'Failed Removing' SET removal_error = CASE {update_removal_error} END WHERE ad_id IN {str(ad_ids)} """) return affected_rows ``` I'm getting this error. I know it's too vague and not possible to debug like this. > timeout=300.0, headers={'X-Server-Timeout': '300.0', > 'Accept-Encoding': 'gzip', 'Content-Type': 'application/json', > 'X-Goog-API-Client': 'gl-python/3.8.10 grpc/1.39.0 gax/2.0.0 > gapic/2.26.0 gccl/2.26.0', 'User-Agent': 'gl-python/3.8.10 grpc/1.39.0 > gax/2.0.0 gapic/2.26.0 gccl/2.26.0'})), last exception: ('Connection > aborted.', RemoteDisconnected('Remote end closed connection without > response')) I'm trying to eliminate errors. Is my BQ update syntactically correct? What's the BQ update timeout? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `google/cloud/bigquery/retry.py` Content: ``` 1 # Copyright 2018 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from google.api_core import exceptions 16 from google.api_core import retry 17 from google.auth import exceptions as auth_exceptions 18 import requests.exceptions 19 20 21 _RETRYABLE_REASONS = frozenset( 22 ["rateLimitExceeded", "backendError", "internalError", "badGateway"] 23 ) 24 25 _UNSTRUCTURED_RETRYABLE_TYPES = ( 26 ConnectionError, 27 exceptions.TooManyRequests, 28 exceptions.InternalServerError, 29 exceptions.BadGateway, 30 requests.exceptions.ChunkedEncodingError, 31 requests.exceptions.ConnectionError, 32 requests.exceptions.Timeout, 33 auth_exceptions.TransportError, 34 ) 35 36 _DEFAULT_JOB_DEADLINE = 60.0 * 10.0 # seconds 37 38 39 def _should_retry(exc): 40 """Predicate for determining when to retry. 41 42 We retry if and only if the 'reason' is 'backendError' 43 or 'rateLimitExceeded'. 44 """ 45 if not hasattr(exc, "errors") or len(exc.errors) == 0: 46 # Check for unstructured error returns, e.g. from GFE 47 return isinstance(exc, _UNSTRUCTURED_RETRYABLE_TYPES) 48 49 reason = exc.errors[0]["reason"] 50 return reason in _RETRYABLE_REASONS 51 52 53 DEFAULT_RETRY = retry.Retry(predicate=_should_retry, deadline=600.0) 54 """The default retry object. 55 56 Any method with a ``retry`` parameter will be retried automatically, 57 with reasonable defaults. To disable retry, pass ``retry=None``. 58 To modify the default retry behavior, call a ``with_XXX`` method 59 on ``DEFAULT_RETRY``. For example, to change the deadline to 30 seconds, 60 pass ``retry=bigquery.DEFAULT_RETRY.with_deadline(30)``. 61 """ 62 63 DEFAULT_TIMEOUT = 5.0 * 60.0 64 """The default API timeout. 65 66 This is the time to wait per request. To adjust the total wait time, set a 67 deadline on the retry object. 68 """ 69 70 job_retry_reasons = "rateLimitExceeded", "backendError" 71 72 73 def _job_should_retry(exc): 74 if not hasattr(exc, "errors") or len(exc.errors) == 0: 75 return False 76 77 reason = exc.errors[0]["reason"] 78 return reason in job_retry_reasons 79 80 81 DEFAULT_JOB_RETRY = retry.Retry( 82 predicate=_job_should_retry, deadline=_DEFAULT_JOB_DEADLINE 83 ) 84 """ 85 The default job retry object. 86 """ 87 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/google/cloud/bigquery/retry.py b/google/cloud/bigquery/retry.py --- a/google/cloud/bigquery/retry.py +++ b/google/cloud/bigquery/retry.py @@ -60,7 +60,7 @@ pass ``retry=bigquery.DEFAULT_RETRY.with_deadline(30)``. """ -DEFAULT_TIMEOUT = 5.0 * 60.0 +DEFAULT_TIMEOUT = None """The default API timeout. This is the time to wait per request. To adjust the total wait time, set a
{"golden_diff": "diff --git a/google/cloud/bigquery/retry.py b/google/cloud/bigquery/retry.py\n--- a/google/cloud/bigquery/retry.py\n+++ b/google/cloud/bigquery/retry.py\n@@ -60,7 +60,7 @@\n pass ``retry=bigquery.DEFAULT_RETRY.with_deadline(30)``.\n \"\"\"\n \n-DEFAULT_TIMEOUT = 5.0 * 60.0\n+DEFAULT_TIMEOUT = None\n \"\"\"The default API timeout.\n \n This is the time to wait per request. To adjust the total wait time, set a\n", "issue": "Python to construct CASE WHEN update SQL statement\nI try to update 2K rows in BQ \r\n\r\n\r\n\r\n\r\n```\r\ndef update_bq_ads_status_failed(self, update_ads):\r\n affected_rows = 0\r\n for update_ads_chunk in split(update_ads, _UPDATE_CHUNK_SIZE):\r\n ad_ids = [item[\"ad_id\"] for item in update_ads_chunk]\r\n removal_errors = [item[\"removal_error\"] for item in update_ads_chunk]\r\n\r\n update_removal_error = \"\"\r\n for ad_id, removal_error in zip(ad_ids, removal_errors):\r\n update_removal_error = update_removal_error + \\\r\n f''' WHEN ad_id = '{ad_id}' Then '{removal_error}' '''\r\n affected_rows += self.update_bq_ads_status(f\"\"\"\r\n UPDATE '{table_full_name}' \r\n SET status = 'Failed Removing' \r\n SET removal_error = CASE {update_removal_error} END \r\n WHERE ad_id IN {str(ad_ids)}\r\n \"\"\")\r\n return affected_rows\r\n```\r\n\r\n\r\nI'm getting this error. I know it's too vague and not possible to debug like this.\r\n\r\n> timeout=300.0, headers={'X-Server-Timeout': '300.0',\r\n> 'Accept-Encoding': 'gzip', 'Content-Type': 'application/json',\r\n> 'X-Goog-API-Client': 'gl-python/3.8.10 grpc/1.39.0 gax/2.0.0\r\n> gapic/2.26.0 gccl/2.26.0', 'User-Agent': 'gl-python/3.8.10 grpc/1.39.0\r\n> gax/2.0.0 gapic/2.26.0 gccl/2.26.0'})), last exception: ('Connection\r\n> aborted.', RemoteDisconnected('Remote end closed connection without\r\n> response'))\r\n\r\n\r\nI'm trying to eliminate errors. Is my BQ update syntactically correct? \r\n\r\nWhat's the BQ update timeout?\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom google.api_core import exceptions\nfrom google.api_core import retry\nfrom google.auth import exceptions as auth_exceptions\nimport requests.exceptions\n\n\n_RETRYABLE_REASONS = frozenset(\n [\"rateLimitExceeded\", \"backendError\", \"internalError\", \"badGateway\"]\n)\n\n_UNSTRUCTURED_RETRYABLE_TYPES = (\n ConnectionError,\n exceptions.TooManyRequests,\n exceptions.InternalServerError,\n exceptions.BadGateway,\n requests.exceptions.ChunkedEncodingError,\n requests.exceptions.ConnectionError,\n requests.exceptions.Timeout,\n auth_exceptions.TransportError,\n)\n\n_DEFAULT_JOB_DEADLINE = 60.0 * 10.0 # seconds\n\n\ndef _should_retry(exc):\n \"\"\"Predicate for determining when to retry.\n\n We retry if and only if the 'reason' is 'backendError'\n or 'rateLimitExceeded'.\n \"\"\"\n if not hasattr(exc, \"errors\") or len(exc.errors) == 0:\n # Check for unstructured error returns, e.g. from GFE\n return isinstance(exc, _UNSTRUCTURED_RETRYABLE_TYPES)\n\n reason = exc.errors[0][\"reason\"]\n return reason in _RETRYABLE_REASONS\n\n\nDEFAULT_RETRY = retry.Retry(predicate=_should_retry, deadline=600.0)\n\"\"\"The default retry object.\n\nAny method with a ``retry`` parameter will be retried automatically,\nwith reasonable defaults. To disable retry, pass ``retry=None``.\nTo modify the default retry behavior, call a ``with_XXX`` method\non ``DEFAULT_RETRY``. For example, to change the deadline to 30 seconds,\npass ``retry=bigquery.DEFAULT_RETRY.with_deadline(30)``.\n\"\"\"\n\nDEFAULT_TIMEOUT = 5.0 * 60.0\n\"\"\"The default API timeout.\n\nThis is the time to wait per request. To adjust the total wait time, set a\ndeadline on the retry object.\n\"\"\"\n\njob_retry_reasons = \"rateLimitExceeded\", \"backendError\"\n\n\ndef _job_should_retry(exc):\n if not hasattr(exc, \"errors\") or len(exc.errors) == 0:\n return False\n\n reason = exc.errors[0][\"reason\"]\n return reason in job_retry_reasons\n\n\nDEFAULT_JOB_RETRY = retry.Retry(\n predicate=_job_should_retry, deadline=_DEFAULT_JOB_DEADLINE\n)\n\"\"\"\nThe default job retry object.\n\"\"\"\n", "path": "google/cloud/bigquery/retry.py"}], "after_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom google.api_core import exceptions\nfrom google.api_core import retry\nfrom google.auth import exceptions as auth_exceptions\nimport requests.exceptions\n\n\n_RETRYABLE_REASONS = frozenset(\n [\"rateLimitExceeded\", \"backendError\", \"internalError\", \"badGateway\"]\n)\n\n_UNSTRUCTURED_RETRYABLE_TYPES = (\n ConnectionError,\n exceptions.TooManyRequests,\n exceptions.InternalServerError,\n exceptions.BadGateway,\n requests.exceptions.ChunkedEncodingError,\n requests.exceptions.ConnectionError,\n requests.exceptions.Timeout,\n auth_exceptions.TransportError,\n)\n\n_DEFAULT_JOB_DEADLINE = 60.0 * 10.0 # seconds\n\n\ndef _should_retry(exc):\n \"\"\"Predicate for determining when to retry.\n\n We retry if and only if the 'reason' is 'backendError'\n or 'rateLimitExceeded'.\n \"\"\"\n if not hasattr(exc, \"errors\") or len(exc.errors) == 0:\n # Check for unstructured error returns, e.g. from GFE\n return isinstance(exc, _UNSTRUCTURED_RETRYABLE_TYPES)\n\n reason = exc.errors[0][\"reason\"]\n return reason in _RETRYABLE_REASONS\n\n\nDEFAULT_RETRY = retry.Retry(predicate=_should_retry, deadline=600.0)\n\"\"\"The default retry object.\n\nAny method with a ``retry`` parameter will be retried automatically,\nwith reasonable defaults. To disable retry, pass ``retry=None``.\nTo modify the default retry behavior, call a ``with_XXX`` method\non ``DEFAULT_RETRY``. For example, to change the deadline to 30 seconds,\npass ``retry=bigquery.DEFAULT_RETRY.with_deadline(30)``.\n\"\"\"\n\nDEFAULT_TIMEOUT = None\n\"\"\"The default API timeout.\n\nThis is the time to wait per request. To adjust the total wait time, set a\ndeadline on the retry object.\n\"\"\"\n\njob_retry_reasons = \"rateLimitExceeded\", \"backendError\"\n\n\ndef _job_should_retry(exc):\n if not hasattr(exc, \"errors\") or len(exc.errors) == 0:\n return False\n\n reason = exc.errors[0][\"reason\"]\n return reason in job_retry_reasons\n\n\nDEFAULT_JOB_RETRY = retry.Retry(\n predicate=_job_should_retry, deadline=_DEFAULT_JOB_DEADLINE\n)\n\"\"\"\nThe default job retry object.\n\"\"\"\n", "path": "google/cloud/bigquery/retry.py"}]}
1,498
118
gh_patches_debug_28122
rasdani/github-patches
git_diff
bridgecrewio__checkov-1985
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Checkov failure on F driver windows **Describe the bug** When running checkov from the jetbrains plugin using this command: ```checkov -s --bc-api-key **-**-**-** --repo-id jetbrains/extension -f F:\Code\modules\test2\s3.tf -o json ``` We are getting this error: File "c:\Python39\Scripts\checkov.cmd", line 53, in <module> run() File "C:\Users\MESH User\AppData\Roaming\Python\Python37\site-packages\checkov\main.py", line 232, in run repo_root_for_plan_enrichment=config.repo_root_for_plan_enrichment) File "C:\Users\MESH User\AppData\Roaming\Python\Python37\site-packages\checkov\common\runners\runner_registry.py", line 66, in run for scan_report in reports: File "C:\Python37\lib\concurrent\futures\_base.py", line 598, in result_iterator yield fs.pop().result() File "C:\Python37\lib\concurrent\futures\_base.py", line 428, in result return self.__get_result() File "C:\Python37\lib\concurrent\futures\_base.py", line 384, in __get_result raise self._exception File "C:\Python37\lib\concurrent\futures\thread.py", line 57, in run result = self.fn(*self.args, **self.kwargs) File "C:\Users\MESH User\AppData\Roaming\Python\Python37\site-packages\checkov\common\runners\runner_registry.py", line 63, in <lambda> runner_filter=self.runner_filter, collect_skip_comments=collect_skip_comments), File "C:\Users\MESH User\AppData\Roaming\Python\Python37\site-packages\checkov\terraform\runner.py", line 102, in run self.check_tf_definition(report, root_folder, runner_filter, collect_skip_comments) File "C:\Users\MESH User\AppData\Roaming\Python\Python37\site-packages\checkov\terraform\runner.py", line 195, in check_tf_definition scanned_file, runner_filter, abs_referrer) File "C:\Users\MESH User\AppData\Roaming\Python\Python37\site-packages\checkov\terraform\runner.py", line 206, in run_all_blocks scanned_file, block_type, runner_filter, None, module_referrer) File "C:\Users\MESH User\AppData\Roaming\Python\Python37\site-packages\checkov\terraform\runner.py", line 289, in run_block caller_file_line_range=caller_file_line_range) File "C:\Users\MESH User\AppData\Roaming\Python\Python37\site-packages\checkov\common\output\record.py", line 44, in __init__ self.repo_file_path = convert_to_unix_path(f'/{os.path.relpath(file_abs_path)}') # matches file paths given in the BC platform and should always be a unix path File "C:\Python37\lib\ntpath.py", line 562, in relpath path_drive, start_drive)) ValueError: path is on mount 'F:', start on mount 'C:' **To Reproduce** Steps to reproduce the behavior: 1. Run checkov on a driver which is not the driver checkov is installed on windows. **Additional context** Add any other context about the problem here (e.g. code snippets). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `checkov/common/output/record.py` Content: ``` 1 import os 2 import re 3 4 from colorama import init, Fore, Style 5 from termcolor import colored 6 7 from checkov.common.models.enums import CheckResult 8 from checkov.common.util.file_utils import convert_to_unix_path 9 10 init(autoreset=True) 11 12 13 class Record: 14 check_id = "" 15 check_name = "" 16 check_result = None 17 check_class = "" 18 code_block = "" 19 file_path = "" 20 file_line_range = [] 21 caller_file_path = None # When created from a module 22 caller_file_line_range = None # 23 resource = "" 24 guideline = None 25 fixed_definition = None 26 entity_tags = None 27 28 def __init__(self, check_id, check_name, check_result, code_block, file_path, file_line_range, resource, 29 evaluations, check_class, file_abs_path, entity_tags=None, 30 caller_file_path=None, caller_file_line_range=None, bc_check_id=None): 31 """ 32 :param evaluations: A dict with the key being the variable name, value being a dict containing: 33 - 'var_file' 34 - 'value' 35 - 'definitions', a list of dicts which contain 'definition_expression' 36 """ 37 self.check_id = check_id 38 self.bc_check_id = bc_check_id 39 self.check_name = check_name 40 self.check_result = check_result 41 self.code_block = code_block 42 self.file_path = file_path 43 self.file_abs_path = file_abs_path 44 self.repo_file_path = convert_to_unix_path(f'/{os.path.relpath(file_abs_path)}') # matches file paths given in the BC platform and should always be a unix path 45 self.file_line_range = file_line_range 46 self.resource = resource 47 self.evaluations = evaluations 48 self.check_class = check_class 49 self.fixed_definition = None 50 self.entity_tags = entity_tags 51 self.caller_file_path = caller_file_path 52 self.caller_file_line_range = caller_file_line_range 53 54 def set_guideline(self, guideline): 55 self.guideline = guideline 56 57 @staticmethod 58 def _trim_special_chars(expression): 59 return "".join(re.findall(r'[^ ${\}]+', expression)) 60 61 def _is_expression_in_code_lines(self, expression): 62 stripped_expression = self._trim_special_chars(expression) 63 return any(stripped_expression in self._trim_special_chars(line) for (_, line) in self.code_block) 64 65 @staticmethod 66 def _code_line_string(code_block): 67 string_block = '' 68 last_line_number, _ = code_block[-1] 69 70 for (line_num, line) in code_block: 71 spaces = ' ' * (len(str(last_line_number)) - len(str(line_num))) 72 if line.lstrip().startswith('#'): 73 string_block += "\t\t" + Fore.WHITE + str(line_num) + spaces + ' | ' + line 74 else: 75 string_block += "\t\t" + Fore.WHITE + str(line_num) + spaces + ' | ' + Fore.YELLOW + line 76 return string_block 77 78 def to_string(self, compact=False, use_bc_ids=False): 79 status = '' 80 evaluation_message = f'' 81 status_color = "white" 82 if self.check_result['result'] == CheckResult.PASSED: 83 status = CheckResult.PASSED.name 84 status_color = "green" 85 elif self.check_result['result'] == CheckResult.FAILED: 86 status = CheckResult.FAILED.name 87 status_color = "red" 88 elif self.check_result['result'] == CheckResult.SKIPPED: 89 status = CheckResult.SKIPPED.name 90 status_color = 'blue' 91 suppress_comment = "\tSuppress comment: {}\n".format(self.check_result['suppress_comment']) 92 93 check_message = colored("Check: {}: \"{}\"\n".format(self.get_output_id(use_bc_ids), self.check_name), "white") 94 guideline_message = '' 95 if self.guideline: 96 guideline_message = "\tGuide: " + Style.BRIGHT + colored(f"{self.guideline}\n", 'blue', attrs=['underline']) + Style.RESET_ALL 97 file_details = colored( 98 "\tFile: {}:{}\n".format(self.file_path, "-".join([str(x) for x in self.file_line_range])), 99 "magenta") 100 code_lines = "" 101 if self.code_block: 102 code_lines = "\n{}\n".format("".join( 103 [self._code_line_string(self.code_block)])) 104 caller_file_details = "" 105 if self.caller_file_path and self.caller_file_line_range: 106 caller_file_details = colored( 107 "\tCalling File: {}:{}\n".format(self.caller_file_path, 108 "-".join([str(x) for x in self.caller_file_line_range])), 109 "magenta") 110 if self.evaluations: 111 for (var_name, var_evaluations) in self.evaluations.items(): 112 var_file = var_evaluations['var_file'] 113 var_definitions = var_evaluations['definitions'] 114 for definition_obj in var_definitions: 115 definition_expression = definition_obj["definition_expression"] 116 if self._is_expression_in_code_lines(definition_expression): 117 evaluation_message = evaluation_message + colored( 118 f'\tVariable {colored(var_name, "yellow")} (of {var_file}) evaluated to value "{colored(var_evaluations["value"], "yellow")}" ' 119 f'in expression: {colored(definition_obj["definition_name"] + " = ", "yellow")}{colored(definition_obj["definition_expression"], "yellow")}\n', 120 'white') 121 status_message = colored("\t{} for resource: {}\n".format(status, self.resource), status_color) 122 if self.check_result['result'] == CheckResult.FAILED and code_lines and not compact: 123 return check_message + status_message + file_details + caller_file_details + guideline_message + code_lines + evaluation_message 124 125 if self.check_result['result'] == CheckResult.SKIPPED: 126 return check_message + status_message + suppress_comment + file_details + caller_file_details + guideline_message 127 else: 128 return check_message + status_message + file_details + caller_file_details + evaluation_message + guideline_message 129 130 def __str__(self): 131 return self.to_string() 132 133 def get_output_id(self, use_bc_ids: bool) -> str: 134 return self.bc_check_id if self.bc_check_id and use_bc_ids else self.check_id 135 136 def get_unique_string(self): 137 return f"{self.check_id}.{self.check_result}.{self.file_abs_path}.{self.file_line_range}.{self.resource}" 138 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/checkov/common/output/record.py b/checkov/common/output/record.py --- a/checkov/common/output/record.py +++ b/checkov/common/output/record.py @@ -1,5 +1,7 @@ import os import re +from pathlib import Path +from typing import Union from colorama import init, Fore, Style from termcolor import colored @@ -41,7 +43,7 @@ self.code_block = code_block self.file_path = file_path self.file_abs_path = file_abs_path - self.repo_file_path = convert_to_unix_path(f'/{os.path.relpath(file_abs_path)}') # matches file paths given in the BC platform and should always be a unix path + self.repo_file_path = self._determine_repo_file_path(file_abs_path) self.file_line_range = file_line_range self.resource = resource self.evaluations = evaluations @@ -51,6 +53,15 @@ self.caller_file_path = caller_file_path self.caller_file_line_range = caller_file_line_range + @staticmethod + def _determine_repo_file_path(file_path: Union[str, "os.PathLike[str]"]) -> str: + # matches file paths given in the BC platform and should always be a unix path + repo_file_path = Path(file_path) + if Path.cwd().drive == repo_file_path.drive: + return convert_to_unix_path(f"/{os.path.relpath(repo_file_path)}").replace("/..", "") + + return f"/{'/'.join(repo_file_path.parts[1:])}" + def set_guideline(self, guideline): self.guideline = guideline
{"golden_diff": "diff --git a/checkov/common/output/record.py b/checkov/common/output/record.py\n--- a/checkov/common/output/record.py\n+++ b/checkov/common/output/record.py\n@@ -1,5 +1,7 @@\n import os\n import re\n+from pathlib import Path\n+from typing import Union\n \n from colorama import init, Fore, Style\n from termcolor import colored\n@@ -41,7 +43,7 @@\n self.code_block = code_block\n self.file_path = file_path\n self.file_abs_path = file_abs_path\n- self.repo_file_path = convert_to_unix_path(f'/{os.path.relpath(file_abs_path)}') # matches file paths given in the BC platform and should always be a unix path\n+ self.repo_file_path = self._determine_repo_file_path(file_abs_path)\n self.file_line_range = file_line_range\n self.resource = resource\n self.evaluations = evaluations\n@@ -51,6 +53,15 @@\n self.caller_file_path = caller_file_path\n self.caller_file_line_range = caller_file_line_range\n \n+ @staticmethod\n+ def _determine_repo_file_path(file_path: Union[str, \"os.PathLike[str]\"]) -> str:\n+ # matches file paths given in the BC platform and should always be a unix path\n+ repo_file_path = Path(file_path)\n+ if Path.cwd().drive == repo_file_path.drive:\n+ return convert_to_unix_path(f\"/{os.path.relpath(repo_file_path)}\").replace(\"/..\", \"\")\n+\n+ return f\"/{'/'.join(repo_file_path.parts[1:])}\"\n+\n def set_guideline(self, guideline):\n self.guideline = guideline\n", "issue": "Checkov failure on F driver windows\n**Describe the bug**\r\nWhen running checkov from the jetbrains plugin using this command:\r\n```checkov -s --bc-api-key **-**-**-** --repo-id jetbrains/extension -f F:\\Code\\modules\\test2\\s3.tf -o json ```\r\nWe are getting this error:\r\n File \"c:\\Python39\\Scripts\\checkov.cmd\", line 53, in <module>\r\n run()\r\n File \"C:\\Users\\MESH User\\AppData\\Roaming\\Python\\Python37\\site-packages\\checkov\\main.py\", line 232, in run\r\n repo_root_for_plan_enrichment=config.repo_root_for_plan_enrichment)\r\n File \"C:\\Users\\MESH User\\AppData\\Roaming\\Python\\Python37\\site-packages\\checkov\\common\\runners\\runner_registry.py\", line 66, in run\r\n for scan_report in reports:\r\n File \"C:\\Python37\\lib\\concurrent\\futures\\_base.py\", line 598, in result_iterator\r\n yield fs.pop().result()\r\n File \"C:\\Python37\\lib\\concurrent\\futures\\_base.py\", line 428, in result\r\n return self.__get_result()\r\n File \"C:\\Python37\\lib\\concurrent\\futures\\_base.py\", line 384, in __get_result\r\n raise self._exception\r\n File \"C:\\Python37\\lib\\concurrent\\futures\\thread.py\", line 57, in run\r\n result = self.fn(*self.args, **self.kwargs)\r\n File \"C:\\Users\\MESH User\\AppData\\Roaming\\Python\\Python37\\site-packages\\checkov\\common\\runners\\runner_registry.py\", line 63, in <lambda>\r\n runner_filter=self.runner_filter, collect_skip_comments=collect_skip_comments),\r\n File \"C:\\Users\\MESH User\\AppData\\Roaming\\Python\\Python37\\site-packages\\checkov\\terraform\\runner.py\", line 102, in run\r\n self.check_tf_definition(report, root_folder, runner_filter, collect_skip_comments)\r\n File \"C:\\Users\\MESH User\\AppData\\Roaming\\Python\\Python37\\site-packages\\checkov\\terraform\\runner.py\", line 195, in check_tf_definition\r\n scanned_file, runner_filter, abs_referrer)\r\n File \"C:\\Users\\MESH User\\AppData\\Roaming\\Python\\Python37\\site-packages\\checkov\\terraform\\runner.py\", line 206, in run_all_blocks\r\n scanned_file, block_type, runner_filter, None, module_referrer)\r\n File \"C:\\Users\\MESH User\\AppData\\Roaming\\Python\\Python37\\site-packages\\checkov\\terraform\\runner.py\", line 289, in run_block\r\n caller_file_line_range=caller_file_line_range)\r\n File \"C:\\Users\\MESH User\\AppData\\Roaming\\Python\\Python37\\site-packages\\checkov\\common\\output\\record.py\", line 44, in __init__\r\n self.repo_file_path = convert_to_unix_path(f'/{os.path.relpath(file_abs_path)}') # matches file paths given in the BC platform and should always be a unix path\r\n File \"C:\\Python37\\lib\\ntpath.py\", line 562, in relpath\r\n path_drive, start_drive))\r\nValueError: path is on mount 'F:', start on mount 'C:'\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. Run checkov on a driver which is not the driver checkov is installed on windows.\r\n\r\n\r\n**Additional context**\r\nAdd any other context about the problem here (e.g. code snippets).\r\n\n", "before_files": [{"content": "import os\nimport re\n\nfrom colorama import init, Fore, Style\nfrom termcolor import colored\n\nfrom checkov.common.models.enums import CheckResult\nfrom checkov.common.util.file_utils import convert_to_unix_path\n\ninit(autoreset=True)\n\n\nclass Record:\n check_id = \"\"\n check_name = \"\"\n check_result = None\n check_class = \"\"\n code_block = \"\"\n file_path = \"\"\n file_line_range = []\n caller_file_path = None # When created from a module\n caller_file_line_range = None #\n resource = \"\"\n guideline = None\n fixed_definition = None\n entity_tags = None\n\n def __init__(self, check_id, check_name, check_result, code_block, file_path, file_line_range, resource,\n evaluations, check_class, file_abs_path, entity_tags=None,\n caller_file_path=None, caller_file_line_range=None, bc_check_id=None):\n \"\"\"\n :param evaluations: A dict with the key being the variable name, value being a dict containing:\n - 'var_file'\n - 'value'\n - 'definitions', a list of dicts which contain 'definition_expression'\n \"\"\"\n self.check_id = check_id\n self.bc_check_id = bc_check_id\n self.check_name = check_name\n self.check_result = check_result\n self.code_block = code_block\n self.file_path = file_path\n self.file_abs_path = file_abs_path\n self.repo_file_path = convert_to_unix_path(f'/{os.path.relpath(file_abs_path)}') # matches file paths given in the BC platform and should always be a unix path\n self.file_line_range = file_line_range\n self.resource = resource\n self.evaluations = evaluations\n self.check_class = check_class\n self.fixed_definition = None\n self.entity_tags = entity_tags\n self.caller_file_path = caller_file_path\n self.caller_file_line_range = caller_file_line_range\n\n def set_guideline(self, guideline):\n self.guideline = guideline\n\n @staticmethod\n def _trim_special_chars(expression):\n return \"\".join(re.findall(r'[^ ${\\}]+', expression))\n\n def _is_expression_in_code_lines(self, expression):\n stripped_expression = self._trim_special_chars(expression)\n return any(stripped_expression in self._trim_special_chars(line) for (_, line) in self.code_block)\n\n @staticmethod\n def _code_line_string(code_block):\n string_block = ''\n last_line_number, _ = code_block[-1]\n\n for (line_num, line) in code_block:\n spaces = ' ' * (len(str(last_line_number)) - len(str(line_num)))\n if line.lstrip().startswith('#'):\n string_block += \"\\t\\t\" + Fore.WHITE + str(line_num) + spaces + ' | ' + line\n else:\n string_block += \"\\t\\t\" + Fore.WHITE + str(line_num) + spaces + ' | ' + Fore.YELLOW + line\n return string_block\n\n def to_string(self, compact=False, use_bc_ids=False):\n status = ''\n evaluation_message = f''\n status_color = \"white\"\n if self.check_result['result'] == CheckResult.PASSED:\n status = CheckResult.PASSED.name\n status_color = \"green\"\n elif self.check_result['result'] == CheckResult.FAILED:\n status = CheckResult.FAILED.name\n status_color = \"red\"\n elif self.check_result['result'] == CheckResult.SKIPPED:\n status = CheckResult.SKIPPED.name\n status_color = 'blue'\n suppress_comment = \"\\tSuppress comment: {}\\n\".format(self.check_result['suppress_comment'])\n\n check_message = colored(\"Check: {}: \\\"{}\\\"\\n\".format(self.get_output_id(use_bc_ids), self.check_name), \"white\")\n guideline_message = ''\n if self.guideline:\n guideline_message = \"\\tGuide: \" + Style.BRIGHT + colored(f\"{self.guideline}\\n\", 'blue', attrs=['underline']) + Style.RESET_ALL\n file_details = colored(\n \"\\tFile: {}:{}\\n\".format(self.file_path, \"-\".join([str(x) for x in self.file_line_range])),\n \"magenta\")\n code_lines = \"\"\n if self.code_block:\n code_lines = \"\\n{}\\n\".format(\"\".join(\n [self._code_line_string(self.code_block)]))\n caller_file_details = \"\"\n if self.caller_file_path and self.caller_file_line_range:\n caller_file_details = colored(\n \"\\tCalling File: {}:{}\\n\".format(self.caller_file_path,\n \"-\".join([str(x) for x in self.caller_file_line_range])),\n \"magenta\")\n if self.evaluations:\n for (var_name, var_evaluations) in self.evaluations.items():\n var_file = var_evaluations['var_file']\n var_definitions = var_evaluations['definitions']\n for definition_obj in var_definitions:\n definition_expression = definition_obj[\"definition_expression\"]\n if self._is_expression_in_code_lines(definition_expression):\n evaluation_message = evaluation_message + colored(\n f'\\tVariable {colored(var_name, \"yellow\")} (of {var_file}) evaluated to value \"{colored(var_evaluations[\"value\"], \"yellow\")}\" '\n f'in expression: {colored(definition_obj[\"definition_name\"] + \" = \", \"yellow\")}{colored(definition_obj[\"definition_expression\"], \"yellow\")}\\n',\n 'white')\n status_message = colored(\"\\t{} for resource: {}\\n\".format(status, self.resource), status_color)\n if self.check_result['result'] == CheckResult.FAILED and code_lines and not compact:\n return check_message + status_message + file_details + caller_file_details + guideline_message + code_lines + evaluation_message\n\n if self.check_result['result'] == CheckResult.SKIPPED:\n return check_message + status_message + suppress_comment + file_details + caller_file_details + guideline_message\n else:\n return check_message + status_message + file_details + caller_file_details + evaluation_message + guideline_message\n\n def __str__(self):\n return self.to_string()\n\n def get_output_id(self, use_bc_ids: bool) -> str:\n return self.bc_check_id if self.bc_check_id and use_bc_ids else self.check_id\n\n def get_unique_string(self):\n return f\"{self.check_id}.{self.check_result}.{self.file_abs_path}.{self.file_line_range}.{self.resource}\"\n", "path": "checkov/common/output/record.py"}], "after_files": [{"content": "import os\nimport re\nfrom pathlib import Path\nfrom typing import Union\n\nfrom colorama import init, Fore, Style\nfrom termcolor import colored\n\nfrom checkov.common.models.enums import CheckResult\nfrom checkov.common.util.file_utils import convert_to_unix_path\n\ninit(autoreset=True)\n\n\nclass Record:\n check_id = \"\"\n check_name = \"\"\n check_result = None\n check_class = \"\"\n code_block = \"\"\n file_path = \"\"\n file_line_range = []\n caller_file_path = None # When created from a module\n caller_file_line_range = None #\n resource = \"\"\n guideline = None\n fixed_definition = None\n entity_tags = None\n\n def __init__(self, check_id, check_name, check_result, code_block, file_path, file_line_range, resource,\n evaluations, check_class, file_abs_path, entity_tags=None,\n caller_file_path=None, caller_file_line_range=None, bc_check_id=None):\n \"\"\"\n :param evaluations: A dict with the key being the variable name, value being a dict containing:\n - 'var_file'\n - 'value'\n - 'definitions', a list of dicts which contain 'definition_expression'\n \"\"\"\n self.check_id = check_id\n self.bc_check_id = bc_check_id\n self.check_name = check_name\n self.check_result = check_result\n self.code_block = code_block\n self.file_path = file_path\n self.file_abs_path = file_abs_path\n self.repo_file_path = self._determine_repo_file_path(file_abs_path)\n self.file_line_range = file_line_range\n self.resource = resource\n self.evaluations = evaluations\n self.check_class = check_class\n self.fixed_definition = None\n self.entity_tags = entity_tags\n self.caller_file_path = caller_file_path\n self.caller_file_line_range = caller_file_line_range\n\n @staticmethod\n def _determine_repo_file_path(file_path: Union[str, \"os.PathLike[str]\"]) -> str:\n # matches file paths given in the BC platform and should always be a unix path\n repo_file_path = Path(file_path)\n if Path.cwd().drive == repo_file_path.drive:\n return convert_to_unix_path(f\"/{os.path.relpath(repo_file_path)}\").replace(\"/..\", \"\")\n\n return f\"/{'/'.join(repo_file_path.parts[1:])}\"\n\n def set_guideline(self, guideline):\n self.guideline = guideline\n\n @staticmethod\n def _trim_special_chars(expression):\n return \"\".join(re.findall(r'[^ ${\\}]+', expression))\n\n def _is_expression_in_code_lines(self, expression):\n stripped_expression = self._trim_special_chars(expression)\n return any(stripped_expression in self._trim_special_chars(line) for (_, line) in self.code_block)\n\n @staticmethod\n def _code_line_string(code_block):\n string_block = ''\n last_line_number, _ = code_block[-1]\n\n for (line_num, line) in code_block:\n spaces = ' ' * (len(str(last_line_number)) - len(str(line_num)))\n if line.lstrip().startswith('#'):\n string_block += \"\\t\\t\" + Fore.WHITE + str(line_num) + spaces + ' | ' + line\n else:\n string_block += \"\\t\\t\" + Fore.WHITE + str(line_num) + spaces + ' | ' + Fore.YELLOW + line\n return string_block\n\n def to_string(self, compact=False, use_bc_ids=False):\n status = ''\n evaluation_message = f''\n status_color = \"white\"\n if self.check_result['result'] == CheckResult.PASSED:\n status = CheckResult.PASSED.name\n status_color = \"green\"\n elif self.check_result['result'] == CheckResult.FAILED:\n status = CheckResult.FAILED.name\n status_color = \"red\"\n elif self.check_result['result'] == CheckResult.SKIPPED:\n status = CheckResult.SKIPPED.name\n status_color = 'blue'\n suppress_comment = \"\\tSuppress comment: {}\\n\".format(self.check_result['suppress_comment'])\n\n check_message = colored(\"Check: {}: \\\"{}\\\"\\n\".format(self.get_output_id(use_bc_ids), self.check_name), \"white\")\n guideline_message = ''\n if self.guideline:\n guideline_message = \"\\tGuide: \" + Style.BRIGHT + colored(f\"{self.guideline}\\n\", 'blue', attrs=['underline']) + Style.RESET_ALL\n file_details = colored(\n \"\\tFile: {}:{}\\n\".format(self.file_path, \"-\".join([str(x) for x in self.file_line_range])),\n \"magenta\")\n code_lines = \"\"\n if self.code_block:\n code_lines = \"\\n{}\\n\".format(\"\".join(\n [self._code_line_string(self.code_block)]))\n caller_file_details = \"\"\n if self.caller_file_path and self.caller_file_line_range:\n caller_file_details = colored(\n \"\\tCalling File: {}:{}\\n\".format(self.caller_file_path,\n \"-\".join([str(x) for x in self.caller_file_line_range])),\n \"magenta\")\n if self.evaluations:\n for (var_name, var_evaluations) in self.evaluations.items():\n var_file = var_evaluations['var_file']\n var_definitions = var_evaluations['definitions']\n for definition_obj in var_definitions:\n definition_expression = definition_obj[\"definition_expression\"]\n if self._is_expression_in_code_lines(definition_expression):\n evaluation_message = evaluation_message + colored(\n f'\\tVariable {colored(var_name, \"yellow\")} (of {var_file}) evaluated to value \"{colored(var_evaluations[\"value\"], \"yellow\")}\" '\n f'in expression: {colored(definition_obj[\"definition_name\"] + \" = \", \"yellow\")}{colored(definition_obj[\"definition_expression\"], \"yellow\")}\\n',\n 'white')\n status_message = colored(\"\\t{} for resource: {}\\n\".format(status, self.resource), status_color)\n if self.check_result['result'] == CheckResult.FAILED and code_lines and not compact:\n return check_message + status_message + file_details + caller_file_details + guideline_message + code_lines + evaluation_message\n\n if self.check_result['result'] == CheckResult.SKIPPED:\n return check_message + status_message + suppress_comment + file_details + caller_file_details + guideline_message\n else:\n return check_message + status_message + file_details + caller_file_details + evaluation_message + guideline_message\n\n def __str__(self):\n return self.to_string()\n\n def get_output_id(self, use_bc_ids: bool) -> str:\n return self.bc_check_id if self.bc_check_id and use_bc_ids else self.check_id\n\n def get_unique_string(self):\n return f\"{self.check_id}.{self.check_result}.{self.file_abs_path}.{self.file_line_range}.{self.resource}\"\n", "path": "checkov/common/output/record.py"}]}
2,774
372
gh_patches_debug_49194
rasdani/github-patches
git_diff
plone__Products.CMFPlone-3093
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- sorting in control panel The items of the control panel are completely unsorted (should be sorted in alphabetical order (depending on the current language in Plone). ![screenshot-dev1 veit-schiele de 8080 2015-07-18 13-35-44](https://cloud.githubusercontent.com/assets/594239/8761438/07d76bf4-2d52-11e5-8540-82fd53932690.png) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `Products/CMFPlone/PloneControlPanel.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 from AccessControl import ClassSecurityInfo 3 from AccessControl.class_init import InitializeClass 4 from App.special_dtml import DTMLFile 5 from OFS.Folder import Folder 6 from OFS.PropertyManager import PropertyManager 7 from Products.CMFCore.ActionInformation import ActionInformation 8 from Products.CMFCore.ActionProviderBase import ActionProviderBase 9 from Products.CMFCore.Expression import Expression, createExprContext 10 from Products.CMFCore.permissions import ManagePortal, View 11 from Products.CMFCore.utils import _checkPermission 12 from Products.CMFCore.utils import getToolByName 13 from Products.CMFCore.utils import registerToolInterface 14 from Products.CMFCore.utils import UniqueObject 15 from Products.CMFPlone import PloneMessageFactory as _ 16 from Products.CMFPlone.interfaces import IControlPanel 17 from Products.CMFPlone.PloneBaseTool import PloneBaseTool 18 from zope.component.hooks import getSite 19 from zope.i18n import translate 20 from zope.i18nmessageid import Message 21 from zope.interface import implementer 22 23 import six 24 25 26 class PloneConfiglet(ActionInformation): 27 28 def __init__(self, appId, **kwargs): 29 self.appId = appId 30 ActionInformation.__init__(self, **kwargs) 31 32 def getAppId(self): 33 return self.appId 34 35 def getDescription(self): 36 return self.description 37 38 def clone(self): 39 return self.__class__(**self.__dict__) 40 41 def getAction(self, ec): 42 res = ActionInformation.getAction(self, ec) 43 res['description'] = self.getDescription() 44 return res 45 46 47 @implementer(IControlPanel) 48 class PloneControlPanel(PloneBaseTool, UniqueObject, 49 Folder, ActionProviderBase, PropertyManager): 50 """Weave together the various sources of "actions" which 51 are apropos to the current user and context. 52 """ 53 54 security = ClassSecurityInfo() 55 56 id = 'portal_controlpanel' 57 title = 'Control Panel' 58 toolicon = 'skins/plone_images/site_icon.png' 59 meta_type = 'Plone Control Panel Tool' 60 _actions_form = DTMLFile('www/editPloneConfiglets', globals()) 61 62 manage_options = (ActionProviderBase.manage_options + 63 PropertyManager.manage_options) 64 65 group = dict( 66 member=[ 67 ('Member', _(u'My Preferences')), 68 ], 69 site=[ 70 ('plone-general', _(u'General')), 71 ('plone-content', _(u'Content')), 72 ('plone-users', _(u'Users')), 73 ('plone-security', _(u'Security')), 74 ('plone-advanced', _(u'Advanced')), 75 ('Plone', _(u'Plone Configuration')), 76 ('Products', _(u'Add-on Configuration')), 77 ] 78 ) 79 80 def __init__(self, **kw): 81 if kw: 82 self.__dict__.update(**kw) 83 84 security.declareProtected(ManagePortal, 'registerConfiglets') 85 86 def registerConfiglets(self, configlets): 87 for conf in configlets: 88 self.registerConfiglet(**conf) 89 90 security.declareProtected(ManagePortal, 'getGroupIds') 91 92 def getGroupIds(self, category='site'): 93 groups = self.group.get(category, []) 94 return [g[0] for g in groups if g] 95 96 security.declareProtected(View, 'getGroups') 97 98 def getGroups(self, category='site'): 99 groups = self.group.get(category, []) 100 return [{'id': g[0], 'title': g[1]} for g in groups if g] 101 102 security.declarePrivate('listActions') 103 104 def listActions(self, info=None, object=None): 105 # This exists here to shut up a deprecation warning about old-style 106 # actions in CMFCore's ActionProviderBase. It was decided not to 107 # move configlets to be based on action tool categories for Plone 4 108 # (see PLIP #8804), but that (or an alternative) will have to happen 109 # before CMF 2.4 when support for old-style actions is removed. 110 return self._actions or () 111 112 security.declarePublic('maySeeSomeConfiglets') 113 114 def maySeeSomeConfiglets(self): 115 groups = self.getGroups('site') 116 117 all = [] 118 for group in groups: 119 all.extend(self.enumConfiglets(group=group['id'])) 120 all = [item for item in all if item['visible']] 121 return len(all) != 0 122 123 security.declarePublic('enumConfiglets') 124 125 def enumConfiglets(self, group=None): 126 portal = getToolByName(self, 'portal_url').getPortalObject() 127 context = createExprContext(self, portal, self) 128 res = [] 129 for a in self.listActions(): 130 verified = 0 131 for permission in a.permissions: 132 if _checkPermission(permission, portal): 133 verified = 1 134 if verified and a.category == group and a.testCondition(context) \ 135 and a.visible: 136 res.append(a.getAction(context)) 137 # Translate the title for sorting 138 if getattr(self, 'REQUEST', None) is not None: 139 for a in res: 140 title = a['title'] 141 if not isinstance(title, Message): 142 title = Message(title, domain='plone') 143 a['title'] = translate(title, 144 context=self.REQUEST) 145 146 def _id(v): 147 return v['id'] 148 res.sort(key=_id) 149 return res 150 151 security.declareProtected(ManagePortal, 'unregisterConfiglet') 152 153 def unregisterConfiglet(self, id): 154 actids = [o.id for o in self.listActions()] 155 selection = [actids.index(a) for a in actids if a == id] 156 if not selection: 157 return 158 self.deleteActions(selection) 159 160 security.declareProtected(ManagePortal, 'unregisterApplication') 161 162 def unregisterApplication(self, appId): 163 acts = list(self.listActions()) 164 selection = [acts.index(a) for a in acts if a.appId == appId] 165 if not selection: 166 return 167 self.deleteActions(selection) 168 169 def _extractAction(self, properties, index): 170 # Extract an ActionInformation from the funky form properties. 171 id = str(properties.get('id_%d' % index, '')) 172 name = str(properties.get('name_%d' % index, '')) 173 action = str(properties.get('action_%d' % index, '')) 174 condition = str(properties.get('condition_%d' % index, '')) 175 category = str(properties.get('category_%d' % index, '')) 176 visible = properties.get('visible_%d' % index, 0) 177 permissions = properties.get('permission_%d' % index, ()) 178 appId = properties.get('appId_%d' % index, '') 179 description = properties.get('description_%d' % index, '') 180 icon_expr = properties.get('icon_expr_%d' % index, '') 181 182 if not name: 183 raise ValueError('A name is required.') 184 185 if action != '': 186 action = Expression(text=action) 187 188 if condition != '': 189 condition = Expression(text=condition) 190 191 if category == '': 192 category = 'object' 193 194 if not isinstance(visible, int): 195 try: 196 visible = int(visible) 197 except ValueError: 198 visible = 0 199 200 if isinstance(permissions, six.string_types): 201 permissions = (permissions, ) 202 203 return PloneConfiglet(id=id, 204 title=name, 205 action=action, 206 condition=condition, 207 permissions=permissions, 208 category=category, 209 visible=visible, 210 appId=appId, 211 description=description, 212 icon_expr=icon_expr, 213 ) 214 215 security.declareProtected(ManagePortal, 'addAction') 216 217 def addAction(self, 218 id, 219 name, 220 action, 221 condition='', 222 permission='', 223 category='Plone', 224 visible=1, 225 appId=None, 226 icon_expr='', 227 description='', 228 REQUEST=None, 229 ): 230 # Add an action to our list. 231 if not name: 232 raise ValueError('A name is required.') 233 234 a_expr = action and Expression(text=str(action)) or '' 235 c_expr = condition and Expression(text=str(condition)) or '' 236 237 if not isinstance(permission, tuple): 238 permission = permission and (str(permission), ) or () 239 240 new_actions = self._cloneActions() 241 242 new_action = PloneConfiglet(id=str(id), 243 title=name, 244 action=a_expr, 245 condition=c_expr, 246 permissions=permission, 247 category=str(category), 248 visible=int(visible), 249 appId=appId, 250 description=description, 251 icon_expr=icon_expr, 252 ) 253 254 new_actions.append(new_action) 255 self._actions = tuple(new_actions) 256 257 if REQUEST is not None: 258 return self.manage_editActionsForm( 259 REQUEST, manage_tabs_message='Added.') 260 261 security.declareProtected(ManagePortal, 'registerConfiglet') 262 registerConfiglet = addAction 263 264 security.declareProtected(ManagePortal, 'manage_editActionsForm') 265 266 def manage_editActionsForm(self, REQUEST, manage_tabs_message=None): 267 """ Show the 'Actions' management tab. 268 """ 269 actions = [] 270 271 for a in self.listActions(): 272 273 a1 = {} 274 a1['id'] = a.getId() 275 a1['name'] = a.Title() 276 p = a.getPermissions() 277 if p: 278 a1['permission'] = p[0] 279 else: 280 a1['permission'] = '' 281 a1['category'] = a.getCategory() or 'object' 282 a1['visible'] = a.getVisibility() 283 a1['action'] = a.getActionExpression() 284 a1['condition'] = a.getCondition() 285 a1['appId'] = a.getAppId() 286 a1['description'] = a.getDescription() 287 a1['icon_expr'] = a.getIconExpression() 288 actions.append(a1) 289 290 # possible_permissions is in OFS.role.RoleManager. 291 pp = self.possible_permissions() 292 return self._actions_form( 293 self, 294 REQUEST, 295 actions=actions, 296 possible_permissions=pp, 297 management_view='Actions', 298 manage_tabs_message=manage_tabs_message, 299 ) 300 301 @property 302 def site_url(self): 303 """Return the absolute URL to the current site, which is likely not 304 necessarily the portal root. 305 Used by ``portlet_prefs`` to construct the URL to 306 ``@@overview-controlpanel``. 307 """ 308 return getSite().absolute_url() 309 310 311 InitializeClass(PloneControlPanel) 312 registerToolInterface('portal_controlpanel', IControlPanel) 313 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/Products/CMFPlone/PloneControlPanel.py b/Products/CMFPlone/PloneControlPanel.py --- a/Products/CMFPlone/PloneControlPanel.py +++ b/Products/CMFPlone/PloneControlPanel.py @@ -143,9 +143,10 @@ a['title'] = translate(title, context=self.REQUEST) - def _id(v): - return v['id'] - res.sort(key=_id) + def _title(v): + return v['title'] + + res.sort(key=_title) return res security.declareProtected(ManagePortal, 'unregisterConfiglet')
{"golden_diff": "diff --git a/Products/CMFPlone/PloneControlPanel.py b/Products/CMFPlone/PloneControlPanel.py\n--- a/Products/CMFPlone/PloneControlPanel.py\n+++ b/Products/CMFPlone/PloneControlPanel.py\n@@ -143,9 +143,10 @@\n a['title'] = translate(title,\n context=self.REQUEST)\n \n- def _id(v):\n- return v['id']\n- res.sort(key=_id)\n+ def _title(v):\n+ return v['title']\n+\n+ res.sort(key=_title)\n return res\n \n security.declareProtected(ManagePortal, 'unregisterConfiglet')\n", "issue": "sorting in control panel\nThe items of the control panel are completely unsorted (should be sorted in alphabetical order (depending on the current language in Plone).\n![screenshot-dev1 veit-schiele de 8080 2015-07-18 13-35-44](https://cloud.githubusercontent.com/assets/594239/8761438/07d76bf4-2d52-11e5-8540-82fd53932690.png)\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom AccessControl import ClassSecurityInfo\nfrom AccessControl.class_init import InitializeClass\nfrom App.special_dtml import DTMLFile\nfrom OFS.Folder import Folder\nfrom OFS.PropertyManager import PropertyManager\nfrom Products.CMFCore.ActionInformation import ActionInformation\nfrom Products.CMFCore.ActionProviderBase import ActionProviderBase\nfrom Products.CMFCore.Expression import Expression, createExprContext\nfrom Products.CMFCore.permissions import ManagePortal, View\nfrom Products.CMFCore.utils import _checkPermission\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFCore.utils import registerToolInterface\nfrom Products.CMFCore.utils import UniqueObject\nfrom Products.CMFPlone import PloneMessageFactory as _\nfrom Products.CMFPlone.interfaces import IControlPanel\nfrom Products.CMFPlone.PloneBaseTool import PloneBaseTool\nfrom zope.component.hooks import getSite\nfrom zope.i18n import translate\nfrom zope.i18nmessageid import Message\nfrom zope.interface import implementer\n\nimport six\n\n\nclass PloneConfiglet(ActionInformation):\n\n def __init__(self, appId, **kwargs):\n self.appId = appId\n ActionInformation.__init__(self, **kwargs)\n\n def getAppId(self):\n return self.appId\n\n def getDescription(self):\n return self.description\n\n def clone(self):\n return self.__class__(**self.__dict__)\n\n def getAction(self, ec):\n res = ActionInformation.getAction(self, ec)\n res['description'] = self.getDescription()\n return res\n\n\n@implementer(IControlPanel)\nclass PloneControlPanel(PloneBaseTool, UniqueObject,\n Folder, ActionProviderBase, PropertyManager):\n \"\"\"Weave together the various sources of \"actions\" which\n are apropos to the current user and context.\n \"\"\"\n\n security = ClassSecurityInfo()\n\n id = 'portal_controlpanel'\n title = 'Control Panel'\n toolicon = 'skins/plone_images/site_icon.png'\n meta_type = 'Plone Control Panel Tool'\n _actions_form = DTMLFile('www/editPloneConfiglets', globals())\n\n manage_options = (ActionProviderBase.manage_options +\n PropertyManager.manage_options)\n\n group = dict(\n member=[\n ('Member', _(u'My Preferences')),\n ],\n site=[\n ('plone-general', _(u'General')),\n ('plone-content', _(u'Content')),\n ('plone-users', _(u'Users')),\n ('plone-security', _(u'Security')),\n ('plone-advanced', _(u'Advanced')),\n ('Plone', _(u'Plone Configuration')),\n ('Products', _(u'Add-on Configuration')),\n ]\n )\n\n def __init__(self, **kw):\n if kw:\n self.__dict__.update(**kw)\n\n security.declareProtected(ManagePortal, 'registerConfiglets')\n\n def registerConfiglets(self, configlets):\n for conf in configlets:\n self.registerConfiglet(**conf)\n\n security.declareProtected(ManagePortal, 'getGroupIds')\n\n def getGroupIds(self, category='site'):\n groups = self.group.get(category, [])\n return [g[0] for g in groups if g]\n\n security.declareProtected(View, 'getGroups')\n\n def getGroups(self, category='site'):\n groups = self.group.get(category, [])\n return [{'id': g[0], 'title': g[1]} for g in groups if g]\n\n security.declarePrivate('listActions')\n\n def listActions(self, info=None, object=None):\n # This exists here to shut up a deprecation warning about old-style\n # actions in CMFCore's ActionProviderBase. It was decided not to\n # move configlets to be based on action tool categories for Plone 4\n # (see PLIP #8804), but that (or an alternative) will have to happen\n # before CMF 2.4 when support for old-style actions is removed.\n return self._actions or ()\n\n security.declarePublic('maySeeSomeConfiglets')\n\n def maySeeSomeConfiglets(self):\n groups = self.getGroups('site')\n\n all = []\n for group in groups:\n all.extend(self.enumConfiglets(group=group['id']))\n all = [item for item in all if item['visible']]\n return len(all) != 0\n\n security.declarePublic('enumConfiglets')\n\n def enumConfiglets(self, group=None):\n portal = getToolByName(self, 'portal_url').getPortalObject()\n context = createExprContext(self, portal, self)\n res = []\n for a in self.listActions():\n verified = 0\n for permission in a.permissions:\n if _checkPermission(permission, portal):\n verified = 1\n if verified and a.category == group and a.testCondition(context) \\\n and a.visible:\n res.append(a.getAction(context))\n # Translate the title for sorting\n if getattr(self, 'REQUEST', None) is not None:\n for a in res:\n title = a['title']\n if not isinstance(title, Message):\n title = Message(title, domain='plone')\n a['title'] = translate(title,\n context=self.REQUEST)\n\n def _id(v):\n return v['id']\n res.sort(key=_id)\n return res\n\n security.declareProtected(ManagePortal, 'unregisterConfiglet')\n\n def unregisterConfiglet(self, id):\n actids = [o.id for o in self.listActions()]\n selection = [actids.index(a) for a in actids if a == id]\n if not selection:\n return\n self.deleteActions(selection)\n\n security.declareProtected(ManagePortal, 'unregisterApplication')\n\n def unregisterApplication(self, appId):\n acts = list(self.listActions())\n selection = [acts.index(a) for a in acts if a.appId == appId]\n if not selection:\n return\n self.deleteActions(selection)\n\n def _extractAction(self, properties, index):\n # Extract an ActionInformation from the funky form properties.\n id = str(properties.get('id_%d' % index, ''))\n name = str(properties.get('name_%d' % index, ''))\n action = str(properties.get('action_%d' % index, ''))\n condition = str(properties.get('condition_%d' % index, ''))\n category = str(properties.get('category_%d' % index, ''))\n visible = properties.get('visible_%d' % index, 0)\n permissions = properties.get('permission_%d' % index, ())\n appId = properties.get('appId_%d' % index, '')\n description = properties.get('description_%d' % index, '')\n icon_expr = properties.get('icon_expr_%d' % index, '')\n\n if not name:\n raise ValueError('A name is required.')\n\n if action != '':\n action = Expression(text=action)\n\n if condition != '':\n condition = Expression(text=condition)\n\n if category == '':\n category = 'object'\n\n if not isinstance(visible, int):\n try:\n visible = int(visible)\n except ValueError:\n visible = 0\n\n if isinstance(permissions, six.string_types):\n permissions = (permissions, )\n\n return PloneConfiglet(id=id,\n title=name,\n action=action,\n condition=condition,\n permissions=permissions,\n category=category,\n visible=visible,\n appId=appId,\n description=description,\n icon_expr=icon_expr,\n )\n\n security.declareProtected(ManagePortal, 'addAction')\n\n def addAction(self,\n id,\n name,\n action,\n condition='',\n permission='',\n category='Plone',\n visible=1,\n appId=None,\n icon_expr='',\n description='',\n REQUEST=None,\n ):\n # Add an action to our list.\n if not name:\n raise ValueError('A name is required.')\n\n a_expr = action and Expression(text=str(action)) or ''\n c_expr = condition and Expression(text=str(condition)) or ''\n\n if not isinstance(permission, tuple):\n permission = permission and (str(permission), ) or ()\n\n new_actions = self._cloneActions()\n\n new_action = PloneConfiglet(id=str(id),\n title=name,\n action=a_expr,\n condition=c_expr,\n permissions=permission,\n category=str(category),\n visible=int(visible),\n appId=appId,\n description=description,\n icon_expr=icon_expr,\n )\n\n new_actions.append(new_action)\n self._actions = tuple(new_actions)\n\n if REQUEST is not None:\n return self.manage_editActionsForm(\n REQUEST, manage_tabs_message='Added.')\n\n security.declareProtected(ManagePortal, 'registerConfiglet')\n registerConfiglet = addAction\n\n security.declareProtected(ManagePortal, 'manage_editActionsForm')\n\n def manage_editActionsForm(self, REQUEST, manage_tabs_message=None):\n \"\"\" Show the 'Actions' management tab.\n \"\"\"\n actions = []\n\n for a in self.listActions():\n\n a1 = {}\n a1['id'] = a.getId()\n a1['name'] = a.Title()\n p = a.getPermissions()\n if p:\n a1['permission'] = p[0]\n else:\n a1['permission'] = ''\n a1['category'] = a.getCategory() or 'object'\n a1['visible'] = a.getVisibility()\n a1['action'] = a.getActionExpression()\n a1['condition'] = a.getCondition()\n a1['appId'] = a.getAppId()\n a1['description'] = a.getDescription()\n a1['icon_expr'] = a.getIconExpression()\n actions.append(a1)\n\n # possible_permissions is in OFS.role.RoleManager.\n pp = self.possible_permissions()\n return self._actions_form(\n self,\n REQUEST,\n actions=actions,\n possible_permissions=pp,\n management_view='Actions',\n manage_tabs_message=manage_tabs_message,\n )\n\n @property\n def site_url(self):\n \"\"\"Return the absolute URL to the current site, which is likely not\n necessarily the portal root.\n Used by ``portlet_prefs`` to construct the URL to\n ``@@overview-controlpanel``.\n \"\"\"\n return getSite().absolute_url()\n\n\nInitializeClass(PloneControlPanel)\nregisterToolInterface('portal_controlpanel', IControlPanel)\n", "path": "Products/CMFPlone/PloneControlPanel.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom AccessControl import ClassSecurityInfo\nfrom AccessControl.class_init import InitializeClass\nfrom App.special_dtml import DTMLFile\nfrom OFS.Folder import Folder\nfrom OFS.PropertyManager import PropertyManager\nfrom Products.CMFCore.ActionInformation import ActionInformation\nfrom Products.CMFCore.ActionProviderBase import ActionProviderBase\nfrom Products.CMFCore.Expression import Expression, createExprContext\nfrom Products.CMFCore.permissions import ManagePortal, View\nfrom Products.CMFCore.utils import _checkPermission\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFCore.utils import registerToolInterface\nfrom Products.CMFCore.utils import UniqueObject\nfrom Products.CMFPlone import PloneMessageFactory as _\nfrom Products.CMFPlone.interfaces import IControlPanel\nfrom Products.CMFPlone.PloneBaseTool import PloneBaseTool\nfrom zope.component.hooks import getSite\nfrom zope.i18n import translate\nfrom zope.i18nmessageid import Message\nfrom zope.interface import implementer\n\nimport six\n\n\nclass PloneConfiglet(ActionInformation):\n\n def __init__(self, appId, **kwargs):\n self.appId = appId\n ActionInformation.__init__(self, **kwargs)\n\n def getAppId(self):\n return self.appId\n\n def getDescription(self):\n return self.description\n\n def clone(self):\n return self.__class__(**self.__dict__)\n\n def getAction(self, ec):\n res = ActionInformation.getAction(self, ec)\n res['description'] = self.getDescription()\n return res\n\n\n@implementer(IControlPanel)\nclass PloneControlPanel(PloneBaseTool, UniqueObject,\n Folder, ActionProviderBase, PropertyManager):\n \"\"\"Weave together the various sources of \"actions\" which\n are apropos to the current user and context.\n \"\"\"\n\n security = ClassSecurityInfo()\n\n id = 'portal_controlpanel'\n title = 'Control Panel'\n toolicon = 'skins/plone_images/site_icon.png'\n meta_type = 'Plone Control Panel Tool'\n _actions_form = DTMLFile('www/editPloneConfiglets', globals())\n\n manage_options = (ActionProviderBase.manage_options +\n PropertyManager.manage_options)\n\n group = dict(\n member=[\n ('Member', _(u'My Preferences')),\n ],\n site=[\n ('plone-general', _(u'General')),\n ('plone-content', _(u'Content')),\n ('plone-users', _(u'Users')),\n ('plone-security', _(u'Security')),\n ('plone-advanced', _(u'Advanced')),\n ('Plone', _(u'Plone Configuration')),\n ('Products', _(u'Add-on Configuration')),\n ]\n )\n\n def __init__(self, **kw):\n if kw:\n self.__dict__.update(**kw)\n\n security.declareProtected(ManagePortal, 'registerConfiglets')\n\n def registerConfiglets(self, configlets):\n for conf in configlets:\n self.registerConfiglet(**conf)\n\n security.declareProtected(ManagePortal, 'getGroupIds')\n\n def getGroupIds(self, category='site'):\n groups = self.group.get(category, [])\n return [g[0] for g in groups if g]\n\n security.declareProtected(View, 'getGroups')\n\n def getGroups(self, category='site'):\n groups = self.group.get(category, [])\n return [{'id': g[0], 'title': g[1]} for g in groups if g]\n\n security.declarePrivate('listActions')\n\n def listActions(self, info=None, object=None):\n # This exists here to shut up a deprecation warning about old-style\n # actions in CMFCore's ActionProviderBase. It was decided not to\n # move configlets to be based on action tool categories for Plone 4\n # (see PLIP #8804), but that (or an alternative) will have to happen\n # before CMF 2.4 when support for old-style actions is removed.\n return self._actions or ()\n\n security.declarePublic('maySeeSomeConfiglets')\n\n def maySeeSomeConfiglets(self):\n groups = self.getGroups('site')\n\n all = []\n for group in groups:\n all.extend(self.enumConfiglets(group=group['id']))\n all = [item for item in all if item['visible']]\n return len(all) != 0\n\n security.declarePublic('enumConfiglets')\n\n def enumConfiglets(self, group=None):\n portal = getToolByName(self, 'portal_url').getPortalObject()\n context = createExprContext(self, portal, self)\n res = []\n for a in self.listActions():\n verified = 0\n for permission in a.permissions:\n if _checkPermission(permission, portal):\n verified = 1\n if verified and a.category == group and a.testCondition(context) \\\n and a.visible:\n res.append(a.getAction(context))\n # Translate the title for sorting\n if getattr(self, 'REQUEST', None) is not None:\n for a in res:\n title = a['title']\n if not isinstance(title, Message):\n title = Message(title, domain='plone')\n a['title'] = translate(title,\n context=self.REQUEST)\n\n def _title(v):\n return v['title']\n\n res.sort(key=_title)\n return res\n\n security.declareProtected(ManagePortal, 'unregisterConfiglet')\n\n def unregisterConfiglet(self, id):\n actids = [o.id for o in self.listActions()]\n selection = [actids.index(a) for a in actids if a == id]\n if not selection:\n return\n self.deleteActions(selection)\n\n security.declareProtected(ManagePortal, 'unregisterApplication')\n\n def unregisterApplication(self, appId):\n acts = list(self.listActions())\n selection = [acts.index(a) for a in acts if a.appId == appId]\n if not selection:\n return\n self.deleteActions(selection)\n\n def _extractAction(self, properties, index):\n # Extract an ActionInformation from the funky form properties.\n id = str(properties.get('id_%d' % index, ''))\n name = str(properties.get('name_%d' % index, ''))\n action = str(properties.get('action_%d' % index, ''))\n condition = str(properties.get('condition_%d' % index, ''))\n category = str(properties.get('category_%d' % index, ''))\n visible = properties.get('visible_%d' % index, 0)\n permissions = properties.get('permission_%d' % index, ())\n appId = properties.get('appId_%d' % index, '')\n description = properties.get('description_%d' % index, '')\n icon_expr = properties.get('icon_expr_%d' % index, '')\n\n if not name:\n raise ValueError('A name is required.')\n\n if action != '':\n action = Expression(text=action)\n\n if condition != '':\n condition = Expression(text=condition)\n\n if category == '':\n category = 'object'\n\n if not isinstance(visible, int):\n try:\n visible = int(visible)\n except ValueError:\n visible = 0\n\n if isinstance(permissions, six.string_types):\n permissions = (permissions, )\n\n return PloneConfiglet(id=id,\n title=name,\n action=action,\n condition=condition,\n permissions=permissions,\n category=category,\n visible=visible,\n appId=appId,\n description=description,\n icon_expr=icon_expr,\n )\n\n security.declareProtected(ManagePortal, 'addAction')\n\n def addAction(self,\n id,\n name,\n action,\n condition='',\n permission='',\n category='Plone',\n visible=1,\n appId=None,\n icon_expr='',\n description='',\n REQUEST=None,\n ):\n # Add an action to our list.\n if not name:\n raise ValueError('A name is required.')\n\n a_expr = action and Expression(text=str(action)) or ''\n c_expr = condition and Expression(text=str(condition)) or ''\n\n if not isinstance(permission, tuple):\n permission = permission and (str(permission), ) or ()\n\n new_actions = self._cloneActions()\n\n new_action = PloneConfiglet(id=str(id),\n title=name,\n action=a_expr,\n condition=c_expr,\n permissions=permission,\n category=str(category),\n visible=int(visible),\n appId=appId,\n description=description,\n icon_expr=icon_expr,\n )\n\n new_actions.append(new_action)\n self._actions = tuple(new_actions)\n\n if REQUEST is not None:\n return self.manage_editActionsForm(\n REQUEST, manage_tabs_message='Added.')\n\n security.declareProtected(ManagePortal, 'registerConfiglet')\n registerConfiglet = addAction\n\n security.declareProtected(ManagePortal, 'manage_editActionsForm')\n\n def manage_editActionsForm(self, REQUEST, manage_tabs_message=None):\n \"\"\" Show the 'Actions' management tab.\n \"\"\"\n actions = []\n\n for a in self.listActions():\n\n a1 = {}\n a1['id'] = a.getId()\n a1['name'] = a.Title()\n p = a.getPermissions()\n if p:\n a1['permission'] = p[0]\n else:\n a1['permission'] = ''\n a1['category'] = a.getCategory() or 'object'\n a1['visible'] = a.getVisibility()\n a1['action'] = a.getActionExpression()\n a1['condition'] = a.getCondition()\n a1['appId'] = a.getAppId()\n a1['description'] = a.getDescription()\n a1['icon_expr'] = a.getIconExpression()\n actions.append(a1)\n\n # possible_permissions is in OFS.role.RoleManager.\n pp = self.possible_permissions()\n return self._actions_form(\n self,\n REQUEST,\n actions=actions,\n possible_permissions=pp,\n management_view='Actions',\n manage_tabs_message=manage_tabs_message,\n )\n\n @property\n def site_url(self):\n \"\"\"Return the absolute URL to the current site, which is likely not\n necessarily the portal root.\n Used by ``portlet_prefs`` to construct the URL to\n ``@@overview-controlpanel``.\n \"\"\"\n return getSite().absolute_url()\n\n\nInitializeClass(PloneControlPanel)\nregisterToolInterface('portal_controlpanel', IControlPanel)\n", "path": "Products/CMFPlone/PloneControlPanel.py"}]}
3,496
158
gh_patches_debug_7040
rasdani/github-patches
git_diff
saleor__saleor-1208
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Superuser can change his status In `StaffForm`: https://github.com/mirumee/saleor/blob/master/saleor/dashboard/staff/forms.py#L12-L13 Widget is disabled `self.fields['is_staff'].widget.attrs['disabled'] = True` But it won't prevent you from changing values on POST. We should disable field instead, like `self.fields['is_active'].disabled = True` Test covering this would be nice as well. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `saleor/dashboard/staff/forms.py` Content: ``` 1 from django import forms 2 3 from ...userprofile.models import User 4 5 6 class StaffForm(forms.ModelForm): 7 def __init__(self, *args, **kwargs): 8 self.user = kwargs.pop('user', None) 9 kwargs.update(initial={'is_staff': True}) 10 super(StaffForm, self).__init__(*args, **kwargs) 11 if self.user == self.instance: 12 self.fields['is_staff'].widget.attrs['disabled'] = True 13 self.fields['is_active'].widget.attrs['disabled'] = True 14 15 class Meta: 16 model = User 17 fields = ['email', 'groups', 'is_staff', 'is_active'] 18 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/saleor/dashboard/staff/forms.py b/saleor/dashboard/staff/forms.py --- a/saleor/dashboard/staff/forms.py +++ b/saleor/dashboard/staff/forms.py @@ -9,8 +9,8 @@ kwargs.update(initial={'is_staff': True}) super(StaffForm, self).__init__(*args, **kwargs) if self.user == self.instance: - self.fields['is_staff'].widget.attrs['disabled'] = True - self.fields['is_active'].widget.attrs['disabled'] = True + self.fields['is_staff'].disabled = True + self.fields['is_active'].disabled = True class Meta: model = User
{"golden_diff": "diff --git a/saleor/dashboard/staff/forms.py b/saleor/dashboard/staff/forms.py\n--- a/saleor/dashboard/staff/forms.py\n+++ b/saleor/dashboard/staff/forms.py\n@@ -9,8 +9,8 @@\n kwargs.update(initial={'is_staff': True})\n super(StaffForm, self).__init__(*args, **kwargs)\n if self.user == self.instance:\n- self.fields['is_staff'].widget.attrs['disabled'] = True\n- self.fields['is_active'].widget.attrs['disabled'] = True\n+ self.fields['is_staff'].disabled = True\n+ self.fields['is_active'].disabled = True\n \n class Meta:\n model = User\n", "issue": "Superuser can change his status\nIn `StaffForm`:\r\nhttps://github.com/mirumee/saleor/blob/master/saleor/dashboard/staff/forms.py#L12-L13\r\nWidget is disabled `self.fields['is_staff'].widget.attrs['disabled'] = True` \r\nBut it won't prevent you from changing values on POST.\r\nWe should disable field instead, like `self.fields['is_active'].disabled = True` \r\nTest covering this would be nice as well.\r\n\n", "before_files": [{"content": "from django import forms\n\nfrom ...userprofile.models import User\n\n\nclass StaffForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user', None)\n kwargs.update(initial={'is_staff': True})\n super(StaffForm, self).__init__(*args, **kwargs)\n if self.user == self.instance:\n self.fields['is_staff'].widget.attrs['disabled'] = True\n self.fields['is_active'].widget.attrs['disabled'] = True\n\n class Meta:\n model = User\n fields = ['email', 'groups', 'is_staff', 'is_active']\n", "path": "saleor/dashboard/staff/forms.py"}], "after_files": [{"content": "from django import forms\n\nfrom ...userprofile.models import User\n\n\nclass StaffForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n self.user = kwargs.pop('user', None)\n kwargs.update(initial={'is_staff': True})\n super(StaffForm, self).__init__(*args, **kwargs)\n if self.user == self.instance:\n self.fields['is_staff'].disabled = True\n self.fields['is_active'].disabled = True\n\n class Meta:\n model = User\n fields = ['email', 'groups', 'is_staff', 'is_active']\n", "path": "saleor/dashboard/staff/forms.py"}]}
527
152
gh_patches_debug_39158
rasdani/github-patches
git_diff
quantumlib__Cirq-1668
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Class methods on GridQubit to create common lattices Some obvious shapes ```GridQubit.square(3, 3)``` ```GirdQubit.rect(2, 4)``` returning list of list with row and column. Default to 0,0 as one corner, but option to set corner. Ability to construction from an ASCII diagram like xmon devices ```GridQubit.from_pic``` which operations on maps like ``` ...A... ..AAA.. .AAAAA. AAAAAAAA ```` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cirq/devices/grid_qubit.py` Content: ``` 1 # Copyright 2018 The Cirq Developers 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # https://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 16 from typing import Dict 17 18 from cirq import ops 19 20 21 class GridQubit(ops.Qid): 22 """A qubit on a 2d square lattice. 23 24 GridQubits use row-major ordering: 25 26 GridQubit(0, 0) < GridQubit(0, 1) < GridQubit(1, 0) < GridQubit(1, 1) 27 """ 28 29 def __init__(self, row: int, col: int): 30 self.row = row 31 self.col = col 32 33 def _comparison_key(self): 34 return self.row, self.col 35 36 def is_adjacent(self, other: ops.Qid) -> bool: 37 """Determines if two qubits are adjacent qubits.""" 38 return (isinstance(other, GridQubit) and 39 abs(self.row - other.row) + abs(self.col - other.col) == 1) 40 41 def __repr__(self): 42 return 'cirq.GridQubit({}, {})'.format(self.row, self.col) 43 44 def __str__(self): 45 return '({}, {})'.format(self.row, self.col) 46 47 def to_proto_dict(self) -> Dict: 48 """Return the proto in dictionary form.""" 49 return { 50 'row': self.row, 51 'col': self.col, 52 } 53 54 @staticmethod 55 def from_proto_dict(proto_dict: Dict) -> 'GridQubit': 56 """Proto dict must have 'row' and 'col' keys.""" 57 if 'row' not in proto_dict or 'col' not in proto_dict: 58 raise ValueError( 59 'Proto dict does not contain row or col: {}'.format(proto_dict)) 60 return GridQubit(row=proto_dict['row'], col=proto_dict['col']) 61 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cirq/devices/grid_qubit.py b/cirq/devices/grid_qubit.py --- a/cirq/devices/grid_qubit.py +++ b/cirq/devices/grid_qubit.py @@ -13,7 +13,7 @@ # limitations under the License. -from typing import Dict +from typing import Dict, List from cirq import ops @@ -38,6 +38,93 @@ return (isinstance(other, GridQubit) and abs(self.row - other.row) + abs(self.col - other.col) == 1) + @staticmethod + def square(diameter: int, top: int = 0, left: int = 0) -> List['GridQubit']: + """Returns a square of GridQubits. + + Args: + diameter: Length of a side of the square + top: Row number of the topmost row + left: Column number of the leftmost row + + Returns: + A list of GridQubits filling in a square grid + """ + return GridQubit.rect(diameter, diameter, top=top, left=left) + + @staticmethod + def rect(rows: int, cols: int, top: int = 0, + left: int = 0) -> List['GridQubit']: + """Returns a rectangle of GridQubits. + + Args: + rows: Number of rows in the rectangle + cols: Number of columns in the rectangle + top: Row number of the topmost row + left: Column number of the leftmost row + + Returns: + A list of GridQubits filling in a rectangular grid + """ + return [ + GridQubit(row, col) + for row in range(top, top + rows) + for col in range(left, left + cols) + ] + + @staticmethod + def from_diagram(diagram: str) -> List['GridQubit']: + """Parse ASCII art device layout into info about qubits and + connectivity. As an example, the below diagram will create a list of + GridQubits in a pyramid structure. + ---A--- + --AAA-- + -AAAAA- + AAAAAAA + + You can use any character other than a hyphen to mark a qubit. As an + example, the qubits for the Bristlecone device could be represented by + the below diagram. This produces a diamond-shaped grid of qubits, and + qubits with the same letter correspond to the same readout line. + + .....AB..... + ....ABCD.... + ...ABCDEF... + ..ABCDEFGH.. + .ABCDEFGHIJ. + ABCDEFGHIJKL + .CDEFGHIJKL. + ..EFGHIJKL.. + ...GHIJKL... + ....IJKL.... + .....KL..... + + Args: + diagram: String representing the qubit layout. Each line represents + a row. Alphanumeric characters are assigned as qubits. + Dots ('.'), dashes ('-'), and spaces (' ') are treated as + empty locations in the grid. If diagram has characters other + than alphanumerics, spacers, and newlines ('\n'), an error will + be thrown. The top-left corner of the diagram will be have + coordinate (0,0). + + Returns: + A list of GridQubits corresponding to the provided diagram + + Raises: + ValueError: If the input string contains an invalid character. + """ + lines = diagram.strip().split('\n') + no_qubit_characters = ['.', '-', ' '] + qubits = [] + for row, line in enumerate(lines): + for col, c in enumerate(line.strip()): + if c not in no_qubit_characters: + if not c.isalnum(): + raise ValueError("Input string has invalid character") + qubits.append(GridQubit(row, col)) + return qubits + def __repr__(self): return 'cirq.GridQubit({}, {})'.format(self.row, self.col)
{"golden_diff": "diff --git a/cirq/devices/grid_qubit.py b/cirq/devices/grid_qubit.py\n--- a/cirq/devices/grid_qubit.py\n+++ b/cirq/devices/grid_qubit.py\n@@ -13,7 +13,7 @@\n # limitations under the License.\n \n \n-from typing import Dict\n+from typing import Dict, List\n \n from cirq import ops\n \n@@ -38,6 +38,93 @@\n return (isinstance(other, GridQubit) and\n abs(self.row - other.row) + abs(self.col - other.col) == 1)\n \n+ @staticmethod\n+ def square(diameter: int, top: int = 0, left: int = 0) -> List['GridQubit']:\n+ \"\"\"Returns a square of GridQubits.\n+\n+ Args:\n+ diameter: Length of a side of the square\n+ top: Row number of the topmost row\n+ left: Column number of the leftmost row\n+\n+ Returns:\n+ A list of GridQubits filling in a square grid\n+ \"\"\"\n+ return GridQubit.rect(diameter, diameter, top=top, left=left)\n+\n+ @staticmethod\n+ def rect(rows: int, cols: int, top: int = 0,\n+ left: int = 0) -> List['GridQubit']:\n+ \"\"\"Returns a rectangle of GridQubits.\n+\n+ Args:\n+ rows: Number of rows in the rectangle\n+ cols: Number of columns in the rectangle\n+ top: Row number of the topmost row\n+ left: Column number of the leftmost row\n+\n+ Returns:\n+ A list of GridQubits filling in a rectangular grid\n+ \"\"\"\n+ return [\n+ GridQubit(row, col)\n+ for row in range(top, top + rows)\n+ for col in range(left, left + cols)\n+ ]\n+\n+ @staticmethod\n+ def from_diagram(diagram: str) -> List['GridQubit']:\n+ \"\"\"Parse ASCII art device layout into info about qubits and\n+ connectivity. As an example, the below diagram will create a list of\n+ GridQubits in a pyramid structure.\n+ ---A---\n+ --AAA--\n+ -AAAAA-\n+ AAAAAAA\n+\n+ You can use any character other than a hyphen to mark a qubit. As an\n+ example, the qubits for the Bristlecone device could be represented by\n+ the below diagram. This produces a diamond-shaped grid of qubits, and\n+ qubits with the same letter correspond to the same readout line.\n+\n+ .....AB.....\n+ ....ABCD....\n+ ...ABCDEF...\n+ ..ABCDEFGH..\n+ .ABCDEFGHIJ.\n+ ABCDEFGHIJKL\n+ .CDEFGHIJKL.\n+ ..EFGHIJKL..\n+ ...GHIJKL...\n+ ....IJKL....\n+ .....KL.....\n+\n+ Args:\n+ diagram: String representing the qubit layout. Each line represents\n+ a row. Alphanumeric characters are assigned as qubits.\n+ Dots ('.'), dashes ('-'), and spaces (' ') are treated as\n+ empty locations in the grid. If diagram has characters other\n+ than alphanumerics, spacers, and newlines ('\\n'), an error will\n+ be thrown. The top-left corner of the diagram will be have\n+ coordinate (0,0).\n+\n+ Returns:\n+ A list of GridQubits corresponding to the provided diagram\n+\n+ Raises:\n+ ValueError: If the input string contains an invalid character.\n+ \"\"\"\n+ lines = diagram.strip().split('\\n')\n+ no_qubit_characters = ['.', '-', ' ']\n+ qubits = []\n+ for row, line in enumerate(lines):\n+ for col, c in enumerate(line.strip()):\n+ if c not in no_qubit_characters:\n+ if not c.isalnum():\n+ raise ValueError(\"Input string has invalid character\")\n+ qubits.append(GridQubit(row, col))\n+ return qubits\n+\n def __repr__(self):\n return 'cirq.GridQubit({}, {})'.format(self.row, self.col)\n", "issue": "Class methods on GridQubit to create common lattices\nSome obvious shapes\r\n```GridQubit.square(3, 3)```\r\n```GirdQubit.rect(2, 4)```\r\nreturning list of list with row and column. Default to 0,0 as one corner, but option to set corner.\r\n\r\nAbility to construction from an ASCII diagram like xmon devices\r\n```GridQubit.from_pic```\r\nwhich operations on maps like\r\n```\r\n...A...\r\n..AAA..\r\n.AAAAA.\r\nAAAAAAAA\r\n````\r\n\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom typing import Dict\n\nfrom cirq import ops\n\n\nclass GridQubit(ops.Qid):\n \"\"\"A qubit on a 2d square lattice.\n\n GridQubits use row-major ordering:\n\n GridQubit(0, 0) < GridQubit(0, 1) < GridQubit(1, 0) < GridQubit(1, 1)\n \"\"\"\n\n def __init__(self, row: int, col: int):\n self.row = row\n self.col = col\n\n def _comparison_key(self):\n return self.row, self.col\n\n def is_adjacent(self, other: ops.Qid) -> bool:\n \"\"\"Determines if two qubits are adjacent qubits.\"\"\"\n return (isinstance(other, GridQubit) and\n abs(self.row - other.row) + abs(self.col - other.col) == 1)\n\n def __repr__(self):\n return 'cirq.GridQubit({}, {})'.format(self.row, self.col)\n\n def __str__(self):\n return '({}, {})'.format(self.row, self.col)\n\n def to_proto_dict(self) -> Dict:\n \"\"\"Return the proto in dictionary form.\"\"\"\n return {\n 'row': self.row,\n 'col': self.col,\n }\n\n @staticmethod\n def from_proto_dict(proto_dict: Dict) -> 'GridQubit':\n \"\"\"Proto dict must have 'row' and 'col' keys.\"\"\"\n if 'row' not in proto_dict or 'col' not in proto_dict:\n raise ValueError(\n 'Proto dict does not contain row or col: {}'.format(proto_dict))\n return GridQubit(row=proto_dict['row'], col=proto_dict['col'])\n", "path": "cirq/devices/grid_qubit.py"}], "after_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom typing import Dict, List\n\nfrom cirq import ops\n\n\nclass GridQubit(ops.Qid):\n \"\"\"A qubit on a 2d square lattice.\n\n GridQubits use row-major ordering:\n\n GridQubit(0, 0) < GridQubit(0, 1) < GridQubit(1, 0) < GridQubit(1, 1)\n \"\"\"\n\n def __init__(self, row: int, col: int):\n self.row = row\n self.col = col\n\n def _comparison_key(self):\n return self.row, self.col\n\n def is_adjacent(self, other: ops.Qid) -> bool:\n \"\"\"Determines if two qubits are adjacent qubits.\"\"\"\n return (isinstance(other, GridQubit) and\n abs(self.row - other.row) + abs(self.col - other.col) == 1)\n\n @staticmethod\n def square(diameter: int, top: int = 0, left: int = 0) -> List['GridQubit']:\n \"\"\"Returns a square of GridQubits.\n\n Args:\n diameter: Length of a side of the square\n top: Row number of the topmost row\n left: Column number of the leftmost row\n\n Returns:\n A list of GridQubits filling in a square grid\n \"\"\"\n return GridQubit.rect(diameter, diameter, top=top, left=left)\n\n @staticmethod\n def rect(rows: int, cols: int, top: int = 0,\n left: int = 0) -> List['GridQubit']:\n \"\"\"Returns a rectangle of GridQubits.\n\n Args:\n rows: Number of rows in the rectangle\n cols: Number of columns in the rectangle\n top: Row number of the topmost row\n left: Column number of the leftmost row\n\n Returns:\n A list of GridQubits filling in a rectangular grid\n \"\"\"\n return [\n GridQubit(row, col)\n for row in range(top, top + rows)\n for col in range(left, left + cols)\n ]\n\n @staticmethod\n def from_diagram(diagram: str) -> List['GridQubit']:\n \"\"\"Parse ASCII art device layout into info about qubits and\n connectivity. As an example, the below diagram will create a list of\n GridQubits in a pyramid structure.\n ---A---\n --AAA--\n -AAAAA-\n AAAAAAA\n\n You can use any character other than a hyphen to mark a qubit. As an\n example, the qubits for the Bristlecone device could be represented by\n the below diagram. This produces a diamond-shaped grid of qubits, and\n qubits with the same letter correspond to the same readout line.\n\n .....AB.....\n ....ABCD....\n ...ABCDEF...\n ..ABCDEFGH..\n .ABCDEFGHIJ.\n ABCDEFGHIJKL\n .CDEFGHIJKL.\n ..EFGHIJKL..\n ...GHIJKL...\n ....IJKL....\n .....KL.....\n\n Args:\n diagram: String representing the qubit layout. Each line represents\n a row. Alphanumeric characters are assigned as qubits.\n Dots ('.'), dashes ('-'), and spaces (' ') are treated as\n empty locations in the grid. If diagram has characters other\n than alphanumerics, spacers, and newlines ('\\n'), an error will\n be thrown. The top-left corner of the diagram will be have\n coordinate (0,0).\n\n Returns:\n A list of GridQubits corresponding to the provided diagram\n\n Raises:\n ValueError: If the input string contains an invalid character.\n \"\"\"\n lines = diagram.strip().split('\\n')\n no_qubit_characters = ['.', '-', ' ']\n qubits = []\n for row, line in enumerate(lines):\n for col, c in enumerate(line.strip()):\n if c not in no_qubit_characters:\n if not c.isalnum():\n raise ValueError(\"Input string has invalid character\")\n qubits.append(GridQubit(row, col))\n return qubits\n\n def __repr__(self):\n return 'cirq.GridQubit({}, {})'.format(self.row, self.col)\n\n def __str__(self):\n return '({}, {})'.format(self.row, self.col)\n\n def to_proto_dict(self) -> Dict:\n \"\"\"Return the proto in dictionary form.\"\"\"\n return {\n 'row': self.row,\n 'col': self.col,\n }\n\n @staticmethod\n def from_proto_dict(proto_dict: Dict) -> 'GridQubit':\n \"\"\"Proto dict must have 'row' and 'col' keys.\"\"\"\n if 'row' not in proto_dict or 'col' not in proto_dict:\n raise ValueError(\n 'Proto dict does not contain row or col: {}'.format(proto_dict))\n return GridQubit(row=proto_dict['row'], col=proto_dict['col'])\n", "path": "cirq/devices/grid_qubit.py"}]}
998
938
gh_patches_debug_2317
rasdani/github-patches
git_diff
ivy-llc__ivy-23306
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- rfftfreq --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ivy/functional/frontends/paddle/fft.py` Content: ``` 1 # global 2 import ivy 3 from ivy.func_wrapper import with_supported_dtypes 4 from ivy.functional.frontends.paddle.func_wrapper import ( 5 to_ivy_arrays_and_back, 6 ) 7 8 9 @with_supported_dtypes( 10 {"2.5.1 and below": ("complex64", "complex128")}, 11 "paddle", 12 ) 13 @to_ivy_arrays_and_back 14 def fft(x, n=None, axis=-1.0, norm="backward", name=None): 15 ret = ivy.fft(ivy.astype(x, "complex128"), axis, norm=norm, n=n) 16 return ivy.astype(ret, x.dtype) 17 18 19 @with_supported_dtypes( 20 { 21 "2.5.1 and below": ( 22 "int32", 23 "int64", 24 "float32", 25 "float64", 26 "complex64", 27 "complex128", 28 ) 29 }, 30 "paddle", 31 ) 32 @to_ivy_arrays_and_back 33 def fftshift(x, axes=None, name=None): 34 shape = x.shape 35 36 if axes is None: 37 axes = tuple(range(x.ndim)) 38 shifts = [(dim // 2) for dim in shape] 39 elif isinstance(axes, int): 40 shifts = shape[axes] // 2 41 else: 42 shifts = ivy.concat([shape[ax] // 2 for ax in axes]) 43 44 roll = ivy.roll(x, shifts, axis=axes) 45 46 return roll 47 48 49 @with_supported_dtypes( 50 {"2.5.1 and below": ("complex64", "complex128")}, 51 "paddle", 52 ) 53 @to_ivy_arrays_and_back 54 def hfft(x, n=None, axis=-1, norm="backward", name=None): 55 """Compute the FFT of a signal that has Hermitian symmetry, resulting in a real 56 spectrum.""" 57 # Determine the input shape and axis length 58 input_shape = x.shape 59 input_len = input_shape[axis] 60 61 # Calculate n if not provided 62 if n is None: 63 n = 2 * (input_len - 1) 64 65 # Perform the FFT along the specified axis 66 result = ivy.fft(x, axis, n=n, norm=norm) 67 68 return ivy.real(result) 69 70 71 @with_supported_dtypes( 72 {"2.5.1 and below": ("complex64", "complex128")}, 73 "paddle", 74 ) 75 @to_ivy_arrays_and_back 76 def ifft(x, n=None, axis=-1.0, norm="backward", name=None): 77 ret = ivy.ifft(ivy.astype(x, "complex128"), axis, norm=norm, n=n) 78 return ivy.astype(ret, x.dtype) 79 80 81 @with_supported_dtypes( 82 { 83 "2.5.1 and below": ( 84 "int32", 85 "int64", 86 "float32", 87 "float64", 88 ) 89 }, 90 "paddle", 91 ) 92 @to_ivy_arrays_and_back 93 def ifftshift(x, axes=None, name=None): 94 shape = x.shape 95 96 if axes is None: 97 axes = tuple(range(x.ndim)) 98 shifts = [-(dim // 2) for dim in shape] 99 elif isinstance(axes, int): 100 shifts = -(shape[axes] // 2) 101 else: 102 shifts = ivy.concat([-shape[ax] // 2 for ax in axes]) 103 104 roll = ivy.roll(x, shifts, axis=axes) 105 106 return roll 107 108 109 @with_supported_dtypes( 110 {"2.5.1 and below": ("complex64", "complex128")}, 111 "paddle", 112 ) 113 @to_ivy_arrays_and_back 114 def irfft(x, n=None, axis=-1.0, norm="backward", name=None): 115 if n is None: 116 n = 2 * (x.shape[axis] - 1) 117 118 pos_freq_terms = ivy.take_along_axis(x, range(n // 2 + 1), axis) 119 neg_freq_terms = ivy.conj(pos_freq_terms[1:-1][::-1]) 120 combined_freq_terms = ivy.concat((pos_freq_terms, neg_freq_terms), axis=axis) 121 time_domain = ivy.ifft(combined_freq_terms, axis, norm=norm, n=n) 122 if ivy.isreal(x): 123 time_domain = ivy.real(time_domain) 124 return time_domain 125 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ivy/functional/frontends/paddle/fft.py b/ivy/functional/frontends/paddle/fft.py --- a/ivy/functional/frontends/paddle/fft.py +++ b/ivy/functional/frontends/paddle/fft.py @@ -122,3 +122,12 @@ if ivy.isreal(x): time_domain = ivy.real(time_domain) return time_domain + + +@to_ivy_arrays_and_back +def rfftfreq(n, d=1.0, dtype=None, name=None): + dtype = ivy.default_dtype() + val = 1.0 / (n * d) + pos_max = n // 2 + 1 + indices = ivy.arange(0, pos_max, dtype=dtype) + return indices * val
{"golden_diff": "diff --git a/ivy/functional/frontends/paddle/fft.py b/ivy/functional/frontends/paddle/fft.py\n--- a/ivy/functional/frontends/paddle/fft.py\n+++ b/ivy/functional/frontends/paddle/fft.py\n@@ -122,3 +122,12 @@\n if ivy.isreal(x):\n time_domain = ivy.real(time_domain)\n return time_domain\n+\n+\n+@to_ivy_arrays_and_back\n+def rfftfreq(n, d=1.0, dtype=None, name=None):\n+ dtype = ivy.default_dtype()\n+ val = 1.0 / (n * d)\n+ pos_max = n // 2 + 1\n+ indices = ivy.arange(0, pos_max, dtype=dtype)\n+ return indices * val\n", "issue": "rfftfreq\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef fft(x, n=None, axis=-1.0, norm=\"backward\", name=None):\n ret = ivy.fft(ivy.astype(x, \"complex128\"), axis, norm=norm, n=n)\n return ivy.astype(ret, x.dtype)\n\n\n@with_supported_dtypes(\n {\n \"2.5.1 and below\": (\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef fftshift(x, axes=None, name=None):\n shape = x.shape\n\n if axes is None:\n axes = tuple(range(x.ndim))\n shifts = [(dim // 2) for dim in shape]\n elif isinstance(axes, int):\n shifts = shape[axes] // 2\n else:\n shifts = ivy.concat([shape[ax] // 2 for ax in axes])\n\n roll = ivy.roll(x, shifts, axis=axes)\n\n return roll\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef hfft(x, n=None, axis=-1, norm=\"backward\", name=None):\n \"\"\"Compute the FFT of a signal that has Hermitian symmetry, resulting in a real\n spectrum.\"\"\"\n # Determine the input shape and axis length\n input_shape = x.shape\n input_len = input_shape[axis]\n\n # Calculate n if not provided\n if n is None:\n n = 2 * (input_len - 1)\n\n # Perform the FFT along the specified axis\n result = ivy.fft(x, axis, n=n, norm=norm)\n\n return ivy.real(result)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef ifft(x, n=None, axis=-1.0, norm=\"backward\", name=None):\n ret = ivy.ifft(ivy.astype(x, \"complex128\"), axis, norm=norm, n=n)\n return ivy.astype(ret, x.dtype)\n\n\n@with_supported_dtypes(\n {\n \"2.5.1 and below\": (\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef ifftshift(x, axes=None, name=None):\n shape = x.shape\n\n if axes is None:\n axes = tuple(range(x.ndim))\n shifts = [-(dim // 2) for dim in shape]\n elif isinstance(axes, int):\n shifts = -(shape[axes] // 2)\n else:\n shifts = ivy.concat([-shape[ax] // 2 for ax in axes])\n\n roll = ivy.roll(x, shifts, axis=axes)\n\n return roll\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef irfft(x, n=None, axis=-1.0, norm=\"backward\", name=None):\n if n is None:\n n = 2 * (x.shape[axis] - 1)\n\n pos_freq_terms = ivy.take_along_axis(x, range(n // 2 + 1), axis)\n neg_freq_terms = ivy.conj(pos_freq_terms[1:-1][::-1])\n combined_freq_terms = ivy.concat((pos_freq_terms, neg_freq_terms), axis=axis)\n time_domain = ivy.ifft(combined_freq_terms, axis, norm=norm, n=n)\n if ivy.isreal(x):\n time_domain = ivy.real(time_domain)\n return time_domain\n", "path": "ivy/functional/frontends/paddle/fft.py"}], "after_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_supported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef fft(x, n=None, axis=-1.0, norm=\"backward\", name=None):\n ret = ivy.fft(ivy.astype(x, \"complex128\"), axis, norm=norm, n=n)\n return ivy.astype(ret, x.dtype)\n\n\n@with_supported_dtypes(\n {\n \"2.5.1 and below\": (\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n \"complex64\",\n \"complex128\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef fftshift(x, axes=None, name=None):\n shape = x.shape\n\n if axes is None:\n axes = tuple(range(x.ndim))\n shifts = [(dim // 2) for dim in shape]\n elif isinstance(axes, int):\n shifts = shape[axes] // 2\n else:\n shifts = ivy.concat([shape[ax] // 2 for ax in axes])\n\n roll = ivy.roll(x, shifts, axis=axes)\n\n return roll\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef hfft(x, n=None, axis=-1, norm=\"backward\", name=None):\n \"\"\"Compute the FFT of a signal that has Hermitian symmetry, resulting in a real\n spectrum.\"\"\"\n # Determine the input shape and axis length\n input_shape = x.shape\n input_len = input_shape[axis]\n\n # Calculate n if not provided\n if n is None:\n n = 2 * (input_len - 1)\n\n # Perform the FFT along the specified axis\n result = ivy.fft(x, axis, n=n, norm=norm)\n\n return ivy.real(result)\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef ifft(x, n=None, axis=-1.0, norm=\"backward\", name=None):\n ret = ivy.ifft(ivy.astype(x, \"complex128\"), axis, norm=norm, n=n)\n return ivy.astype(ret, x.dtype)\n\n\n@with_supported_dtypes(\n {\n \"2.5.1 and below\": (\n \"int32\",\n \"int64\",\n \"float32\",\n \"float64\",\n )\n },\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef ifftshift(x, axes=None, name=None):\n shape = x.shape\n\n if axes is None:\n axes = tuple(range(x.ndim))\n shifts = [-(dim // 2) for dim in shape]\n elif isinstance(axes, int):\n shifts = -(shape[axes] // 2)\n else:\n shifts = ivy.concat([-shape[ax] // 2 for ax in axes])\n\n roll = ivy.roll(x, shifts, axis=axes)\n\n return roll\n\n\n@with_supported_dtypes(\n {\"2.5.1 and below\": (\"complex64\", \"complex128\")},\n \"paddle\",\n)\n@to_ivy_arrays_and_back\ndef irfft(x, n=None, axis=-1.0, norm=\"backward\", name=None):\n if n is None:\n n = 2 * (x.shape[axis] - 1)\n\n pos_freq_terms = ivy.take_along_axis(x, range(n // 2 + 1), axis)\n neg_freq_terms = ivy.conj(pos_freq_terms[1:-1][::-1])\n combined_freq_terms = ivy.concat((pos_freq_terms, neg_freq_terms), axis=axis)\n time_domain = ivy.ifft(combined_freq_terms, axis, norm=norm, n=n)\n if ivy.isreal(x):\n time_domain = ivy.real(time_domain)\n return time_domain\n\n\n@to_ivy_arrays_and_back\ndef rfftfreq(n, d=1.0, dtype=None, name=None):\n dtype = ivy.default_dtype()\n val = 1.0 / (n * d)\n pos_max = n // 2 + 1\n indices = ivy.arange(0, pos_max, dtype=dtype)\n return indices * val\n", "path": "ivy/functional/frontends/paddle/fft.py"}]}
1,501
185
gh_patches_debug_22143
rasdani/github-patches
git_diff
nautobot__nautobot-261
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Cable path tracing does not work with data imported by nautobot-netbox-importer (KeyError) <!-- NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED. This form is only for reporting reproducible bugs. If you need assistance with Nautobot installation, or if you have a general question, please start a discussion instead: https://github.com/nautobot/nautobot/discussions Please describe the environment in which you are running Nautobot. Be sure that you are running an unmodified instance of the latest stable release before submitting a bug report, and that any plugins have been disabled. --> ### Environment * Python version: any * Nautobot version: 1.0.0b3 <!-- Describe in detail the exact steps that someone else can take to reproduce this bug using the current stable release of Nautobot. Begin with the creation of any necessary database objects and call out every operation being performed explicitly. If reporting a bug in the REST API, be sure to reconstruct the raw HTTP request(s) being made: Don't rely on a client library such as pynautobot. --> ### Steps to Reproduce 1. Install Nautobot 1.0.0b3 2. Install the `nautobot-netbox-importer` plugin 3. Import data from NetBox using the importer 4. In the Nautobot web UI, navigate to the Device detail view for a Device that has connected interfaces, and attempt to access the `Interfaces` tab. <!-- What did you expect to happen? --> ### Expected Behavior The page should load successfully, including cable traces for any connected interfaces. <!-- What happened instead? --> ### Observed Behavior Nautobot throws a KeyError: ``` File "/usr/local/lib/python3.7/site-packages/nautobot/dcim/models/cables.py", line 465, in get_path path.append(prefetched[ct_id][object_id]) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `nautobot/dcim/utils.py` Content: ``` 1 import uuid 2 3 from django.contrib.contenttypes.models import ContentType 4 5 from nautobot.utilities.utils import hex_to_rgb, lighten_color, rgb_to_hex 6 7 8 def compile_path_node(ct_id, object_id): 9 return f"{ct_id}:{object_id}" 10 11 12 def decompile_path_node(repr): 13 ct_id, object_id = repr.split(":") 14 # The value is stored as a string, but the lookup later uses UUID objects as keys so we convert it now. 15 # Note that the content type ID is still an integer because we have no control over that model. 16 return int(ct_id), uuid.UUID(object_id, version=4) 17 18 19 def object_to_path_node(obj): 20 """ 21 Return a representation of an object suitable for inclusion in a CablePath path. Node representation is in the 22 form <ContentType ID>:<Object ID>. 23 """ 24 ct = ContentType.objects.get_for_model(obj) 25 return compile_path_node(ct.pk, obj.pk) 26 27 28 def path_node_to_object(repr): 29 """ 30 Given the string representation of a path node, return the corresponding instance. 31 """ 32 ct_id, object_id = decompile_path_node(repr) 33 ct = ContentType.objects.get_for_id(ct_id) 34 return ct.model_class().objects.get(pk=object_id) 35 36 37 def cable_status_color_css(record): 38 """ 39 Given a record such as an Interface, return the CSS needed to apply appropriate coloring to it. 40 """ 41 if not record.cable: 42 return "" 43 # The status colors are for use with labels and such, and tend to be quite bright. 44 # For this function we want a much milder, mellower color suitable as a row background. 45 base_color = record.cable.get_status_color().strip("#") 46 lighter_color = rgb_to_hex(*lighten_color(*hex_to_rgb(base_color), 0.75)) 47 return f"background-color: #{lighter_color}" 48 ``` Path: `nautobot/core/api/serializers.py` Content: ``` 1 import uuid 2 3 from django.core.exceptions import ( 4 FieldError, 5 MultipleObjectsReturned, 6 ObjectDoesNotExist, 7 ) 8 from django.db.models import AutoField, ManyToManyField 9 from rest_framework import serializers 10 from rest_framework.exceptions import ValidationError 11 12 from nautobot.utilities.utils import dict_to_filter_params 13 14 15 class ValidatedModelSerializer(serializers.ModelSerializer): 16 """ 17 Extends the built-in ModelSerializer to enforce calling full_clean() on a copy of the associated instance during 18 validation. (DRF does not do this by default; see https://github.com/encode/django-rest-framework/issues/3144) 19 """ 20 21 def validate(self, data): 22 23 # Remove custom fields data and tags (if any) prior to model validation 24 attrs = data.copy() 25 attrs.pop("custom_fields", None) 26 attrs.pop("tags", None) 27 28 # Skip ManyToManyFields 29 for field in self.Meta.model._meta.get_fields(): 30 if isinstance(field, ManyToManyField): 31 attrs.pop(field.name, None) 32 33 # Run clean() on an instance of the model 34 if self.instance is None: 35 instance = self.Meta.model(**attrs) 36 else: 37 instance = self.instance 38 for k, v in attrs.items(): 39 setattr(instance, k, v) 40 instance.full_clean() 41 42 return data 43 44 45 class WritableNestedSerializer(serializers.ModelSerializer): 46 """ 47 Returns a nested representation of an object on read, but accepts only a primary key on write. 48 """ 49 50 def to_internal_value(self, data): 51 52 if data is None: 53 return None 54 55 # Dictionary of related object attributes 56 if isinstance(data, dict): 57 params = dict_to_filter_params(data) 58 queryset = self.Meta.model.objects 59 try: 60 return queryset.get(**params) 61 except ObjectDoesNotExist: 62 raise ValidationError("Related object not found using the provided attributes: {}".format(params)) 63 except MultipleObjectsReturned: 64 raise ValidationError("Multiple objects match the provided attributes: {}".format(params)) 65 except FieldError as e: 66 raise ValidationError(e) 67 68 queryset = self.Meta.model.objects 69 pk = None 70 71 if isinstance(self.Meta.model._meta.pk, AutoField): 72 # PK is an int for this model. This is usually the User model 73 try: 74 pk = int(data) 75 except (TypeError, ValueError): 76 raise ValidationError( 77 "Related objects must be referenced by ID or by dictionary of attributes. Received an " 78 "unrecognized value: {}".format(data) 79 ) 80 81 else: 82 # We assume a type of UUIDField for all other models 83 84 # PK of related object 85 try: 86 # Ensure the pk is a valid UUID 87 pk = uuid.UUID(str(data), version=4) 88 except (TypeError, ValueError): 89 raise ValidationError( 90 "Related objects must be referenced by ID or by dictionary of attributes. Received an " 91 "unrecognized value: {}".format(data) 92 ) 93 94 try: 95 return queryset.get(pk=pk) 96 except ObjectDoesNotExist: 97 raise ValidationError("Related object not found using the provided ID: {}".format(pk)) 98 99 100 class BulkOperationSerializer(serializers.Serializer): 101 id = serializers.CharField() # This supports both UUIDs and numeric ID for the User model 102 103 104 # 105 # GraphQL, used by the openapi doc, not by the view 106 # 107 108 109 class GraphQLAPISerializer(serializers.Serializer): 110 query = serializers.CharField(required=True, help_text="GraphQL query") 111 variables = serializers.JSONField(required=False, help_text="Variables in JSON Format") 112 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/nautobot/core/api/serializers.py b/nautobot/core/api/serializers.py --- a/nautobot/core/api/serializers.py +++ b/nautobot/core/api/serializers.py @@ -84,7 +84,7 @@ # PK of related object try: # Ensure the pk is a valid UUID - pk = uuid.UUID(str(data), version=4) + pk = uuid.UUID(str(data)) except (TypeError, ValueError): raise ValidationError( "Related objects must be referenced by ID or by dictionary of attributes. Received an " diff --git a/nautobot/dcim/utils.py b/nautobot/dcim/utils.py --- a/nautobot/dcim/utils.py +++ b/nautobot/dcim/utils.py @@ -13,7 +13,7 @@ ct_id, object_id = repr.split(":") # The value is stored as a string, but the lookup later uses UUID objects as keys so we convert it now. # Note that the content type ID is still an integer because we have no control over that model. - return int(ct_id), uuid.UUID(object_id, version=4) + return int(ct_id), uuid.UUID(object_id) def object_to_path_node(obj):
{"golden_diff": "diff --git a/nautobot/core/api/serializers.py b/nautobot/core/api/serializers.py\n--- a/nautobot/core/api/serializers.py\n+++ b/nautobot/core/api/serializers.py\n@@ -84,7 +84,7 @@\n # PK of related object\n try:\n # Ensure the pk is a valid UUID\n- pk = uuid.UUID(str(data), version=4)\n+ pk = uuid.UUID(str(data))\n except (TypeError, ValueError):\n raise ValidationError(\n \"Related objects must be referenced by ID or by dictionary of attributes. Received an \"\ndiff --git a/nautobot/dcim/utils.py b/nautobot/dcim/utils.py\n--- a/nautobot/dcim/utils.py\n+++ b/nautobot/dcim/utils.py\n@@ -13,7 +13,7 @@\n ct_id, object_id = repr.split(\":\")\n # The value is stored as a string, but the lookup later uses UUID objects as keys so we convert it now.\n # Note that the content type ID is still an integer because we have no control over that model.\n- return int(ct_id), uuid.UUID(object_id, version=4)\n+ return int(ct_id), uuid.UUID(object_id)\n \n \n def object_to_path_node(obj):\n", "issue": "Cable path tracing does not work with data imported by nautobot-netbox-importer (KeyError)\n<!--\r\n NOTE: IF YOUR ISSUE DOES NOT FOLLOW THIS TEMPLATE, IT WILL BE CLOSED.\r\n\r\n This form is only for reporting reproducible bugs. If you need assistance\r\n with Nautobot installation, or if you have a general question, please start a\r\n discussion instead: https://github.com/nautobot/nautobot/discussions\r\n\r\n Please describe the environment in which you are running Nautobot. Be sure\r\n that you are running an unmodified instance of the latest stable release\r\n before submitting a bug report, and that any plugins have been disabled.\r\n-->\r\n### Environment\r\n* Python version: any\r\n* Nautobot version: 1.0.0b3\r\n\r\n<!--\r\n Describe in detail the exact steps that someone else can take to reproduce\r\n this bug using the current stable release of Nautobot. Begin with the\r\n creation of any necessary database objects and call out every operation\r\n being performed explicitly. If reporting a bug in the REST API, be sure to\r\n reconstruct the raw HTTP request(s) being made: Don't rely on a client\r\n library such as pynautobot.\r\n-->\r\n### Steps to Reproduce\r\n1. Install Nautobot 1.0.0b3\r\n2. Install the `nautobot-netbox-importer` plugin\r\n3. Import data from NetBox using the importer\r\n4. In the Nautobot web UI, navigate to the Device detail view for a Device that has connected interfaces, and attempt to access the `Interfaces` tab.\r\n\r\n<!-- What did you expect to happen? -->\r\n### Expected Behavior\r\n\r\nThe page should load successfully, including cable traces for any connected interfaces.\r\n\r\n<!-- What happened instead? -->\r\n### Observed Behavior\r\n\r\nNautobot throws a KeyError:\r\n\r\n```\r\n File \"/usr/local/lib/python3.7/site-packages/nautobot/dcim/models/cables.py\", line 465, in get_path\r\n path.append(prefetched[ct_id][object_id])\r\n```\r\n\n", "before_files": [{"content": "import uuid\n\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom nautobot.utilities.utils import hex_to_rgb, lighten_color, rgb_to_hex\n\n\ndef compile_path_node(ct_id, object_id):\n return f\"{ct_id}:{object_id}\"\n\n\ndef decompile_path_node(repr):\n ct_id, object_id = repr.split(\":\")\n # The value is stored as a string, but the lookup later uses UUID objects as keys so we convert it now.\n # Note that the content type ID is still an integer because we have no control over that model.\n return int(ct_id), uuid.UUID(object_id, version=4)\n\n\ndef object_to_path_node(obj):\n \"\"\"\n Return a representation of an object suitable for inclusion in a CablePath path. Node representation is in the\n form <ContentType ID>:<Object ID>.\n \"\"\"\n ct = ContentType.objects.get_for_model(obj)\n return compile_path_node(ct.pk, obj.pk)\n\n\ndef path_node_to_object(repr):\n \"\"\"\n Given the string representation of a path node, return the corresponding instance.\n \"\"\"\n ct_id, object_id = decompile_path_node(repr)\n ct = ContentType.objects.get_for_id(ct_id)\n return ct.model_class().objects.get(pk=object_id)\n\n\ndef cable_status_color_css(record):\n \"\"\"\n Given a record such as an Interface, return the CSS needed to apply appropriate coloring to it.\n \"\"\"\n if not record.cable:\n return \"\"\n # The status colors are for use with labels and such, and tend to be quite bright.\n # For this function we want a much milder, mellower color suitable as a row background.\n base_color = record.cable.get_status_color().strip(\"#\")\n lighter_color = rgb_to_hex(*lighten_color(*hex_to_rgb(base_color), 0.75))\n return f\"background-color: #{lighter_color}\"\n", "path": "nautobot/dcim/utils.py"}, {"content": "import uuid\n\nfrom django.core.exceptions import (\n FieldError,\n MultipleObjectsReturned,\n ObjectDoesNotExist,\n)\nfrom django.db.models import AutoField, ManyToManyField\nfrom rest_framework import serializers\nfrom rest_framework.exceptions import ValidationError\n\nfrom nautobot.utilities.utils import dict_to_filter_params\n\n\nclass ValidatedModelSerializer(serializers.ModelSerializer):\n \"\"\"\n Extends the built-in ModelSerializer to enforce calling full_clean() on a copy of the associated instance during\n validation. (DRF does not do this by default; see https://github.com/encode/django-rest-framework/issues/3144)\n \"\"\"\n\n def validate(self, data):\n\n # Remove custom fields data and tags (if any) prior to model validation\n attrs = data.copy()\n attrs.pop(\"custom_fields\", None)\n attrs.pop(\"tags\", None)\n\n # Skip ManyToManyFields\n for field in self.Meta.model._meta.get_fields():\n if isinstance(field, ManyToManyField):\n attrs.pop(field.name, None)\n\n # Run clean() on an instance of the model\n if self.instance is None:\n instance = self.Meta.model(**attrs)\n else:\n instance = self.instance\n for k, v in attrs.items():\n setattr(instance, k, v)\n instance.full_clean()\n\n return data\n\n\nclass WritableNestedSerializer(serializers.ModelSerializer):\n \"\"\"\n Returns a nested representation of an object on read, but accepts only a primary key on write.\n \"\"\"\n\n def to_internal_value(self, data):\n\n if data is None:\n return None\n\n # Dictionary of related object attributes\n if isinstance(data, dict):\n params = dict_to_filter_params(data)\n queryset = self.Meta.model.objects\n try:\n return queryset.get(**params)\n except ObjectDoesNotExist:\n raise ValidationError(\"Related object not found using the provided attributes: {}\".format(params))\n except MultipleObjectsReturned:\n raise ValidationError(\"Multiple objects match the provided attributes: {}\".format(params))\n except FieldError as e:\n raise ValidationError(e)\n\n queryset = self.Meta.model.objects\n pk = None\n\n if isinstance(self.Meta.model._meta.pk, AutoField):\n # PK is an int for this model. This is usually the User model\n try:\n pk = int(data)\n except (TypeError, ValueError):\n raise ValidationError(\n \"Related objects must be referenced by ID or by dictionary of attributes. Received an \"\n \"unrecognized value: {}\".format(data)\n )\n\n else:\n # We assume a type of UUIDField for all other models\n\n # PK of related object\n try:\n # Ensure the pk is a valid UUID\n pk = uuid.UUID(str(data), version=4)\n except (TypeError, ValueError):\n raise ValidationError(\n \"Related objects must be referenced by ID or by dictionary of attributes. Received an \"\n \"unrecognized value: {}\".format(data)\n )\n\n try:\n return queryset.get(pk=pk)\n except ObjectDoesNotExist:\n raise ValidationError(\"Related object not found using the provided ID: {}\".format(pk))\n\n\nclass BulkOperationSerializer(serializers.Serializer):\n id = serializers.CharField() # This supports both UUIDs and numeric ID for the User model\n\n\n#\n# GraphQL, used by the openapi doc, not by the view\n#\n\n\nclass GraphQLAPISerializer(serializers.Serializer):\n query = serializers.CharField(required=True, help_text=\"GraphQL query\")\n variables = serializers.JSONField(required=False, help_text=\"Variables in JSON Format\")\n", "path": "nautobot/core/api/serializers.py"}], "after_files": [{"content": "import uuid\n\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom nautobot.utilities.utils import hex_to_rgb, lighten_color, rgb_to_hex\n\n\ndef compile_path_node(ct_id, object_id):\n return f\"{ct_id}:{object_id}\"\n\n\ndef decompile_path_node(repr):\n ct_id, object_id = repr.split(\":\")\n # The value is stored as a string, but the lookup later uses UUID objects as keys so we convert it now.\n # Note that the content type ID is still an integer because we have no control over that model.\n return int(ct_id), uuid.UUID(object_id)\n\n\ndef object_to_path_node(obj):\n \"\"\"\n Return a representation of an object suitable for inclusion in a CablePath path. Node representation is in the\n form <ContentType ID>:<Object ID>.\n \"\"\"\n ct = ContentType.objects.get_for_model(obj)\n return compile_path_node(ct.pk, obj.pk)\n\n\ndef path_node_to_object(repr):\n \"\"\"\n Given the string representation of a path node, return the corresponding instance.\n \"\"\"\n ct_id, object_id = decompile_path_node(repr)\n ct = ContentType.objects.get_for_id(ct_id)\n return ct.model_class().objects.get(pk=object_id)\n\n\ndef cable_status_color_css(record):\n \"\"\"\n Given a record such as an Interface, return the CSS needed to apply appropriate coloring to it.\n \"\"\"\n if not record.cable:\n return \"\"\n # The status colors are for use with labels and such, and tend to be quite bright.\n # For this function we want a much milder, mellower color suitable as a row background.\n base_color = record.cable.get_status_color().strip(\"#\")\n lighter_color = rgb_to_hex(*lighten_color(*hex_to_rgb(base_color), 0.75))\n return f\"background-color: #{lighter_color}\"\n", "path": "nautobot/dcim/utils.py"}, {"content": "import uuid\n\nfrom django.core.exceptions import (\n FieldError,\n MultipleObjectsReturned,\n ObjectDoesNotExist,\n)\nfrom django.db.models import AutoField, ManyToManyField\nfrom rest_framework import serializers\nfrom rest_framework.exceptions import ValidationError\n\nfrom nautobot.utilities.utils import dict_to_filter_params\n\n\nclass ValidatedModelSerializer(serializers.ModelSerializer):\n \"\"\"\n Extends the built-in ModelSerializer to enforce calling full_clean() on a copy of the associated instance during\n validation. (DRF does not do this by default; see https://github.com/encode/django-rest-framework/issues/3144)\n \"\"\"\n\n def validate(self, data):\n\n # Remove custom fields data and tags (if any) prior to model validation\n attrs = data.copy()\n attrs.pop(\"custom_fields\", None)\n attrs.pop(\"tags\", None)\n\n # Skip ManyToManyFields\n for field in self.Meta.model._meta.get_fields():\n if isinstance(field, ManyToManyField):\n attrs.pop(field.name, None)\n\n # Run clean() on an instance of the model\n if self.instance is None:\n instance = self.Meta.model(**attrs)\n else:\n instance = self.instance\n for k, v in attrs.items():\n setattr(instance, k, v)\n instance.full_clean()\n\n return data\n\n\nclass WritableNestedSerializer(serializers.ModelSerializer):\n \"\"\"\n Returns a nested representation of an object on read, but accepts only a primary key on write.\n \"\"\"\n\n def to_internal_value(self, data):\n\n if data is None:\n return None\n\n # Dictionary of related object attributes\n if isinstance(data, dict):\n params = dict_to_filter_params(data)\n queryset = self.Meta.model.objects\n try:\n return queryset.get(**params)\n except ObjectDoesNotExist:\n raise ValidationError(\"Related object not found using the provided attributes: {}\".format(params))\n except MultipleObjectsReturned:\n raise ValidationError(\"Multiple objects match the provided attributes: {}\".format(params))\n except FieldError as e:\n raise ValidationError(e)\n\n queryset = self.Meta.model.objects\n pk = None\n\n if isinstance(self.Meta.model._meta.pk, AutoField):\n # PK is an int for this model. This is usually the User model\n try:\n pk = int(data)\n except (TypeError, ValueError):\n raise ValidationError(\n \"Related objects must be referenced by ID or by dictionary of attributes. Received an \"\n \"unrecognized value: {}\".format(data)\n )\n\n else:\n # We assume a type of UUIDField for all other models\n\n # PK of related object\n try:\n # Ensure the pk is a valid UUID\n pk = uuid.UUID(str(data))\n except (TypeError, ValueError):\n raise ValidationError(\n \"Related objects must be referenced by ID or by dictionary of attributes. Received an \"\n \"unrecognized value: {}\".format(data)\n )\n\n try:\n return queryset.get(pk=pk)\n except ObjectDoesNotExist:\n raise ValidationError(\"Related object not found using the provided ID: {}\".format(pk))\n\n\nclass BulkOperationSerializer(serializers.Serializer):\n id = serializers.CharField() # This supports both UUIDs and numeric ID for the User model\n\n\n#\n# GraphQL, used by the openapi doc, not by the view\n#\n\n\nclass GraphQLAPISerializer(serializers.Serializer):\n query = serializers.CharField(required=True, help_text=\"GraphQL query\")\n variables = serializers.JSONField(required=False, help_text=\"Variables in JSON Format\")\n", "path": "nautobot/core/api/serializers.py"}]}
2,179
276
gh_patches_debug_12679
rasdani/github-patches
git_diff
saulpw__visidata-1824
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- YAML fails reading files with tuples Example file: ``` foo: !!python/tuple - 1 - 2 - 3 ``` Error: ``` ConstructorError: could not determine a constructor for the tag 'tag:yaml.org,2002:python/tuple'· ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `visidata/loaders/yaml.py` Content: ``` 1 from itertools import chain 2 3 from visidata import VisiData, Progress, JsonSheet, vd 4 5 6 @VisiData.api 7 def open_yml(vd, p): 8 return YamlSheet(p.name, source=p) 9 10 VisiData.open_yaml = VisiData.open_yml 11 12 class YamlSheet(JsonSheet): 13 def iterload(self): 14 yaml = vd.importExternal('yaml', 'PyYAML') 15 with self.source.open_text() as fp: 16 documents = yaml.safe_load_all(fp) 17 18 self.columns = [] 19 self._knownKeys.clear() 20 21 # Peek at the document stream to determine how to best DWIM. 22 # 23 # This code is a bit verbose because it avoids slurping the generator 24 # all at once into memory. 25 try: 26 first = next(documents) 27 except StopIteration: 28 # Empty file‽ 29 yield None 30 return 31 32 try: 33 second = next(documents) 34 except StopIteration: 35 if isinstance(first, list): 36 # A file with a single YAML list: yield one row per list item. 37 yield from Progress(first) 38 else: 39 # A file with a single YAML non-list value, e.g a dict. 40 yield first 41 else: 42 # A file containing multiple YAML documents: yield one row per document. 43 yield from Progress(chain([first, second], documents), total=0) 44 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/visidata/loaders/yaml.py b/visidata/loaders/yaml.py --- a/visidata/loaders/yaml.py +++ b/visidata/loaders/yaml.py @@ -12,8 +12,18 @@ class YamlSheet(JsonSheet): def iterload(self): yaml = vd.importExternal('yaml', 'PyYAML') + + class PrettySafeLoader(yaml.SafeLoader): + def construct_python_tuple(self, node): + return tuple(self.construct_sequence(node)) + + PrettySafeLoader.add_constructor( + u'tag:yaml.org,2002:python/tuple', + PrettySafeLoader.construct_python_tuple + ) + with self.source.open_text() as fp: - documents = yaml.safe_load_all(fp) + documents = yaml.load_all(fp, PrettySafeLoader) self.columns = [] self._knownKeys.clear()
{"golden_diff": "diff --git a/visidata/loaders/yaml.py b/visidata/loaders/yaml.py\n--- a/visidata/loaders/yaml.py\n+++ b/visidata/loaders/yaml.py\n@@ -12,8 +12,18 @@\n class YamlSheet(JsonSheet):\n def iterload(self):\n yaml = vd.importExternal('yaml', 'PyYAML')\n+\n+ class PrettySafeLoader(yaml.SafeLoader):\n+ def construct_python_tuple(self, node):\n+ return tuple(self.construct_sequence(node))\n+\n+ PrettySafeLoader.add_constructor(\n+ u'tag:yaml.org,2002:python/tuple',\n+ PrettySafeLoader.construct_python_tuple\n+ )\n+\n with self.source.open_text() as fp:\n- documents = yaml.safe_load_all(fp)\n+ documents = yaml.load_all(fp, PrettySafeLoader)\n \n self.columns = []\n self._knownKeys.clear()\n", "issue": "YAML fails reading files with tuples\nExample file:\r\n\r\n```\r\nfoo: !!python/tuple\r\n- 1\r\n- 2\r\n- 3\r\n```\r\n\r\nError:\r\n\r\n```\r\nConstructorError: could not determine a constructor for the tag 'tag:yaml.org,2002:python/tuple'\u00b7\r\n```\n", "before_files": [{"content": "from itertools import chain\n\nfrom visidata import VisiData, Progress, JsonSheet, vd\n\n\[email protected]\ndef open_yml(vd, p):\n return YamlSheet(p.name, source=p)\n\nVisiData.open_yaml = VisiData.open_yml\n\nclass YamlSheet(JsonSheet):\n def iterload(self):\n yaml = vd.importExternal('yaml', 'PyYAML')\n with self.source.open_text() as fp:\n documents = yaml.safe_load_all(fp)\n\n self.columns = []\n self._knownKeys.clear()\n\n # Peek at the document stream to determine how to best DWIM.\n #\n # This code is a bit verbose because it avoids slurping the generator\n # all at once into memory.\n try:\n first = next(documents)\n except StopIteration:\n # Empty file\u203d\n yield None\n return\n\n try:\n second = next(documents)\n except StopIteration:\n if isinstance(first, list):\n # A file with a single YAML list: yield one row per list item.\n yield from Progress(first)\n else:\n # A file with a single YAML non-list value, e.g a dict.\n yield first\n else:\n # A file containing multiple YAML documents: yield one row per document.\n yield from Progress(chain([first, second], documents), total=0)\n", "path": "visidata/loaders/yaml.py"}], "after_files": [{"content": "from itertools import chain\n\nfrom visidata import VisiData, Progress, JsonSheet, vd\n\n\[email protected]\ndef open_yml(vd, p):\n return YamlSheet(p.name, source=p)\n\nVisiData.open_yaml = VisiData.open_yml\n\nclass YamlSheet(JsonSheet):\n def iterload(self):\n yaml = vd.importExternal('yaml', 'PyYAML')\n\n class PrettySafeLoader(yaml.SafeLoader):\n def construct_python_tuple(self, node):\n return tuple(self.construct_sequence(node))\n\n PrettySafeLoader.add_constructor(\n u'tag:yaml.org,2002:python/tuple',\n PrettySafeLoader.construct_python_tuple\n )\n\n with self.source.open_text() as fp:\n documents = yaml.load_all(fp, PrettySafeLoader)\n\n self.columns = []\n self._knownKeys.clear()\n\n # Peek at the document stream to determine how to best DWIM.\n #\n # This code is a bit verbose because it avoids slurping the generator\n # all at once into memory.\n try:\n first = next(documents)\n except StopIteration:\n # Empty file\u203d\n yield None\n return\n\n try:\n second = next(documents)\n except StopIteration:\n if isinstance(first, list):\n # A file with a single YAML list: yield one row per list item.\n yield from Progress(first)\n else:\n # A file with a single YAML non-list value, e.g a dict.\n yield first\n else:\n # A file containing multiple YAML documents: yield one row per document.\n yield from Progress(chain([first, second], documents), total=0)\n", "path": "visidata/loaders/yaml.py"}]}
705
201
gh_patches_debug_19433
rasdani/github-patches
git_diff
TheAlgorithms__Python-5833
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Merge insertion sort doesn't work ``` >>> merge_insertion_sort([0, 1, 2, 3, 4]) [0, 2, 1, 3, 4] ``` Cc @ulwlu #2211 [`sorts/merge_insertion_sort.py`](../blob/master/sorts/merge_insertion_sort.py) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sorts/merge_insertion_sort.py` Content: ``` 1 """ 2 This is a pure Python implementation of the merge-insertion sort algorithm 3 Source: https://en.wikipedia.org/wiki/Merge-insertion_sort 4 5 For doctests run following command: 6 python3 -m doctest -v merge_insertion_sort.py 7 or 8 python -m doctest -v merge_insertion_sort.py 9 10 For manual testing run: 11 python3 merge_insertion_sort.py 12 """ 13 14 from __future__ import annotations 15 16 17 def merge_insertion_sort(collection: list[int]) -> list[int]: 18 """Pure implementation of merge-insertion sort algorithm in Python 19 20 :param collection: some mutable ordered collection with heterogeneous 21 comparable items inside 22 :return: the same collection ordered by ascending 23 24 Examples: 25 >>> merge_insertion_sort([0, 5, 3, 2, 2]) 26 [0, 2, 2, 3, 5] 27 28 >>> merge_insertion_sort([99]) 29 [99] 30 31 >>> merge_insertion_sort([-2, -5, -45]) 32 [-45, -5, -2] 33 """ 34 35 def binary_search_insertion(sorted_list, item): 36 left = 0 37 right = len(sorted_list) - 1 38 while left <= right: 39 middle = (left + right) // 2 40 if left == right: 41 if sorted_list[middle] < item: 42 left = middle + 1 43 break 44 elif sorted_list[middle] < item: 45 left = middle + 1 46 else: 47 right = middle - 1 48 sorted_list.insert(left, item) 49 return sorted_list 50 51 def sortlist_2d(list_2d): 52 def merge(left, right): 53 result = [] 54 while left and right: 55 if left[0][0] < right[0][0]: 56 result.append(left.pop(0)) 57 else: 58 result.append(right.pop(0)) 59 return result + left + right 60 61 length = len(list_2d) 62 if length <= 1: 63 return list_2d 64 middle = length // 2 65 return merge(sortlist_2d(list_2d[:middle]), sortlist_2d(list_2d[middle:])) 66 67 if len(collection) <= 1: 68 return collection 69 70 """ 71 Group the items into two pairs, and leave one element if there is a last odd item. 72 73 Example: [999, 100, 75, 40, 10000] 74 -> [999, 100], [75, 40]. Leave 10000. 75 """ 76 two_paired_list = [] 77 has_last_odd_item = False 78 for i in range(0, len(collection), 2): 79 if i == len(collection) - 1: 80 has_last_odd_item = True 81 else: 82 """ 83 Sort two-pairs in each groups. 84 85 Example: [999, 100], [75, 40] 86 -> [100, 999], [40, 75] 87 """ 88 if collection[i] < collection[i + 1]: 89 two_paired_list.append([collection[i], collection[i + 1]]) 90 else: 91 two_paired_list.append([collection[i + 1], collection[i]]) 92 93 """ 94 Sort two_paired_list. 95 96 Example: [100, 999], [40, 75] 97 -> [40, 75], [100, 999] 98 """ 99 sorted_list_2d = sortlist_2d(two_paired_list) 100 101 """ 102 40 < 100 is sure because it has already been sorted. 103 Generate the sorted_list of them so that you can avoid unnecessary comparison. 104 105 Example: 106 group0 group1 107 40 100 108 75 999 109 -> 110 group0 group1 111 [40, 100] 112 75 999 113 """ 114 result = [i[0] for i in sorted_list_2d] 115 116 """ 117 100 < 999 is sure because it has already been sorted. 118 Put 999 in last of the sorted_list so that you can avoid unnecessary comparison. 119 120 Example: 121 group0 group1 122 [40, 100] 123 75 999 124 -> 125 group0 group1 126 [40, 100, 999] 127 75 128 """ 129 result.append(sorted_list_2d[-1][1]) 130 131 """ 132 Insert the last odd item left if there is. 133 134 Example: 135 group0 group1 136 [40, 100, 999] 137 75 138 -> 139 group0 group1 140 [40, 100, 999, 10000] 141 75 142 """ 143 if has_last_odd_item: 144 pivot = collection[-1] 145 result = binary_search_insertion(result, pivot) 146 147 """ 148 Insert the remaining items. 149 In this case, 40 < 75 is sure because it has already been sorted. 150 Therefore, you only need to insert 75 into [100, 999, 10000], 151 so that you can avoid unnecessary comparison. 152 153 Example: 154 group0 group1 155 [40, 100, 999, 10000] 156 ^ You don't need to compare with this as 40 < 75 is already sure. 157 75 158 -> 159 [40, 75, 100, 999, 10000] 160 """ 161 is_last_odd_item_inserted_before_this_index = False 162 for i in range(len(sorted_list_2d) - 1): 163 if result[i] == collection[-i]: 164 is_last_odd_item_inserted_before_this_index = True 165 pivot = sorted_list_2d[i][1] 166 # If last_odd_item is inserted before the item's index, 167 # you should forward index one more. 168 if is_last_odd_item_inserted_before_this_index: 169 result = result[: i + 2] + binary_search_insertion(result[i + 2 :], pivot) 170 else: 171 result = result[: i + 1] + binary_search_insertion(result[i + 1 :], pivot) 172 173 return result 174 175 176 if __name__ == "__main__": 177 user_input = input("Enter numbers separated by a comma:\n").strip() 178 unsorted = [int(item) for item in user_input.split(",")] 179 print(merge_insertion_sort(unsorted)) 180 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sorts/merge_insertion_sort.py b/sorts/merge_insertion_sort.py --- a/sorts/merge_insertion_sort.py +++ b/sorts/merge_insertion_sort.py @@ -30,6 +30,12 @@ >>> merge_insertion_sort([-2, -5, -45]) [-45, -5, -2] + + Testing with all permutations on range(0,5): + >>> import itertools + >>> permutations = list(itertools.permutations([0, 1, 2, 3, 4])) + >>> all(merge_insertion_sort(p) == [0, 1, 2, 3, 4] for p in permutations) + True """ def binary_search_insertion(sorted_list, item): @@ -160,7 +166,7 @@ """ is_last_odd_item_inserted_before_this_index = False for i in range(len(sorted_list_2d) - 1): - if result[i] == collection[-i]: + if result[i] == collection[-1] and has_last_odd_item: is_last_odd_item_inserted_before_this_index = True pivot = sorted_list_2d[i][1] # If last_odd_item is inserted before the item's index,
{"golden_diff": "diff --git a/sorts/merge_insertion_sort.py b/sorts/merge_insertion_sort.py\n--- a/sorts/merge_insertion_sort.py\n+++ b/sorts/merge_insertion_sort.py\n@@ -30,6 +30,12 @@\n \n >>> merge_insertion_sort([-2, -5, -45])\n [-45, -5, -2]\n+\n+ Testing with all permutations on range(0,5):\n+ >>> import itertools\n+ >>> permutations = list(itertools.permutations([0, 1, 2, 3, 4]))\n+ >>> all(merge_insertion_sort(p) == [0, 1, 2, 3, 4] for p in permutations)\n+ True\n \"\"\"\n \n def binary_search_insertion(sorted_list, item):\n@@ -160,7 +166,7 @@\n \"\"\"\n is_last_odd_item_inserted_before_this_index = False\n for i in range(len(sorted_list_2d) - 1):\n- if result[i] == collection[-i]:\n+ if result[i] == collection[-1] and has_last_odd_item:\n is_last_odd_item_inserted_before_this_index = True\n pivot = sorted_list_2d[i][1]\n # If last_odd_item is inserted before the item's index,\n", "issue": "Merge insertion sort doesn't work\n```\r\n>>> merge_insertion_sort([0, 1, 2, 3, 4])\r\n[0, 2, 1, 3, 4]\r\n```\r\n\r\nCc @ulwlu #2211 [`sorts/merge_insertion_sort.py`](../blob/master/sorts/merge_insertion_sort.py)\n", "before_files": [{"content": "\"\"\"\nThis is a pure Python implementation of the merge-insertion sort algorithm\nSource: https://en.wikipedia.org/wiki/Merge-insertion_sort\n\nFor doctests run following command:\npython3 -m doctest -v merge_insertion_sort.py\nor\npython -m doctest -v merge_insertion_sort.py\n\nFor manual testing run:\npython3 merge_insertion_sort.py\n\"\"\"\n\nfrom __future__ import annotations\n\n\ndef merge_insertion_sort(collection: list[int]) -> list[int]:\n \"\"\"Pure implementation of merge-insertion sort algorithm in Python\n\n :param collection: some mutable ordered collection with heterogeneous\n comparable items inside\n :return: the same collection ordered by ascending\n\n Examples:\n >>> merge_insertion_sort([0, 5, 3, 2, 2])\n [0, 2, 2, 3, 5]\n\n >>> merge_insertion_sort([99])\n [99]\n\n >>> merge_insertion_sort([-2, -5, -45])\n [-45, -5, -2]\n \"\"\"\n\n def binary_search_insertion(sorted_list, item):\n left = 0\n right = len(sorted_list) - 1\n while left <= right:\n middle = (left + right) // 2\n if left == right:\n if sorted_list[middle] < item:\n left = middle + 1\n break\n elif sorted_list[middle] < item:\n left = middle + 1\n else:\n right = middle - 1\n sorted_list.insert(left, item)\n return sorted_list\n\n def sortlist_2d(list_2d):\n def merge(left, right):\n result = []\n while left and right:\n if left[0][0] < right[0][0]:\n result.append(left.pop(0))\n else:\n result.append(right.pop(0))\n return result + left + right\n\n length = len(list_2d)\n if length <= 1:\n return list_2d\n middle = length // 2\n return merge(sortlist_2d(list_2d[:middle]), sortlist_2d(list_2d[middle:]))\n\n if len(collection) <= 1:\n return collection\n\n \"\"\"\n Group the items into two pairs, and leave one element if there is a last odd item.\n\n Example: [999, 100, 75, 40, 10000]\n -> [999, 100], [75, 40]. Leave 10000.\n \"\"\"\n two_paired_list = []\n has_last_odd_item = False\n for i in range(0, len(collection), 2):\n if i == len(collection) - 1:\n has_last_odd_item = True\n else:\n \"\"\"\n Sort two-pairs in each groups.\n\n Example: [999, 100], [75, 40]\n -> [100, 999], [40, 75]\n \"\"\"\n if collection[i] < collection[i + 1]:\n two_paired_list.append([collection[i], collection[i + 1]])\n else:\n two_paired_list.append([collection[i + 1], collection[i]])\n\n \"\"\"\n Sort two_paired_list.\n\n Example: [100, 999], [40, 75]\n -> [40, 75], [100, 999]\n \"\"\"\n sorted_list_2d = sortlist_2d(two_paired_list)\n\n \"\"\"\n 40 < 100 is sure because it has already been sorted.\n Generate the sorted_list of them so that you can avoid unnecessary comparison.\n\n Example:\n group0 group1\n 40 100\n 75 999\n ->\n group0 group1\n [40, 100]\n 75 999\n \"\"\"\n result = [i[0] for i in sorted_list_2d]\n\n \"\"\"\n 100 < 999 is sure because it has already been sorted.\n Put 999 in last of the sorted_list so that you can avoid unnecessary comparison.\n\n Example:\n group0 group1\n [40, 100]\n 75 999\n ->\n group0 group1\n [40, 100, 999]\n 75\n \"\"\"\n result.append(sorted_list_2d[-1][1])\n\n \"\"\"\n Insert the last odd item left if there is.\n\n Example:\n group0 group1\n [40, 100, 999]\n 75\n ->\n group0 group1\n [40, 100, 999, 10000]\n 75\n \"\"\"\n if has_last_odd_item:\n pivot = collection[-1]\n result = binary_search_insertion(result, pivot)\n\n \"\"\"\n Insert the remaining items.\n In this case, 40 < 75 is sure because it has already been sorted.\n Therefore, you only need to insert 75 into [100, 999, 10000],\n so that you can avoid unnecessary comparison.\n\n Example:\n group0 group1\n [40, 100, 999, 10000]\n ^ You don't need to compare with this as 40 < 75 is already sure.\n 75\n ->\n [40, 75, 100, 999, 10000]\n \"\"\"\n is_last_odd_item_inserted_before_this_index = False\n for i in range(len(sorted_list_2d) - 1):\n if result[i] == collection[-i]:\n is_last_odd_item_inserted_before_this_index = True\n pivot = sorted_list_2d[i][1]\n # If last_odd_item is inserted before the item's index,\n # you should forward index one more.\n if is_last_odd_item_inserted_before_this_index:\n result = result[: i + 2] + binary_search_insertion(result[i + 2 :], pivot)\n else:\n result = result[: i + 1] + binary_search_insertion(result[i + 1 :], pivot)\n\n return result\n\n\nif __name__ == \"__main__\":\n user_input = input(\"Enter numbers separated by a comma:\\n\").strip()\n unsorted = [int(item) for item in user_input.split(\",\")]\n print(merge_insertion_sort(unsorted))\n", "path": "sorts/merge_insertion_sort.py"}], "after_files": [{"content": "\"\"\"\nThis is a pure Python implementation of the merge-insertion sort algorithm\nSource: https://en.wikipedia.org/wiki/Merge-insertion_sort\n\nFor doctests run following command:\npython3 -m doctest -v merge_insertion_sort.py\nor\npython -m doctest -v merge_insertion_sort.py\n\nFor manual testing run:\npython3 merge_insertion_sort.py\n\"\"\"\n\nfrom __future__ import annotations\n\n\ndef merge_insertion_sort(collection: list[int]) -> list[int]:\n \"\"\"Pure implementation of merge-insertion sort algorithm in Python\n\n :param collection: some mutable ordered collection with heterogeneous\n comparable items inside\n :return: the same collection ordered by ascending\n\n Examples:\n >>> merge_insertion_sort([0, 5, 3, 2, 2])\n [0, 2, 2, 3, 5]\n\n >>> merge_insertion_sort([99])\n [99]\n\n >>> merge_insertion_sort([-2, -5, -45])\n [-45, -5, -2]\n\n Testing with all permutations on range(0,5):\n >>> import itertools\n >>> permutations = list(itertools.permutations([0, 1, 2, 3, 4]))\n >>> all(merge_insertion_sort(p) == [0, 1, 2, 3, 4] for p in permutations)\n True\n \"\"\"\n\n def binary_search_insertion(sorted_list, item):\n left = 0\n right = len(sorted_list) - 1\n while left <= right:\n middle = (left + right) // 2\n if left == right:\n if sorted_list[middle] < item:\n left = middle + 1\n break\n elif sorted_list[middle] < item:\n left = middle + 1\n else:\n right = middle - 1\n sorted_list.insert(left, item)\n return sorted_list\n\n def sortlist_2d(list_2d):\n def merge(left, right):\n result = []\n while left and right:\n if left[0][0] < right[0][0]:\n result.append(left.pop(0))\n else:\n result.append(right.pop(0))\n return result + left + right\n\n length = len(list_2d)\n if length <= 1:\n return list_2d\n middle = length // 2\n return merge(sortlist_2d(list_2d[:middle]), sortlist_2d(list_2d[middle:]))\n\n if len(collection) <= 1:\n return collection\n\n \"\"\"\n Group the items into two pairs, and leave one element if there is a last odd item.\n\n Example: [999, 100, 75, 40, 10000]\n -> [999, 100], [75, 40]. Leave 10000.\n \"\"\"\n two_paired_list = []\n has_last_odd_item = False\n for i in range(0, len(collection), 2):\n if i == len(collection) - 1:\n has_last_odd_item = True\n else:\n \"\"\"\n Sort two-pairs in each groups.\n\n Example: [999, 100], [75, 40]\n -> [100, 999], [40, 75]\n \"\"\"\n if collection[i] < collection[i + 1]:\n two_paired_list.append([collection[i], collection[i + 1]])\n else:\n two_paired_list.append([collection[i + 1], collection[i]])\n\n \"\"\"\n Sort two_paired_list.\n\n Example: [100, 999], [40, 75]\n -> [40, 75], [100, 999]\n \"\"\"\n sorted_list_2d = sortlist_2d(two_paired_list)\n\n \"\"\"\n 40 < 100 is sure because it has already been sorted.\n Generate the sorted_list of them so that you can avoid unnecessary comparison.\n\n Example:\n group0 group1\n 40 100\n 75 999\n ->\n group0 group1\n [40, 100]\n 75 999\n \"\"\"\n result = [i[0] for i in sorted_list_2d]\n\n \"\"\"\n 100 < 999 is sure because it has already been sorted.\n Put 999 in last of the sorted_list so that you can avoid unnecessary comparison.\n\n Example:\n group0 group1\n [40, 100]\n 75 999\n ->\n group0 group1\n [40, 100, 999]\n 75\n \"\"\"\n result.append(sorted_list_2d[-1][1])\n\n \"\"\"\n Insert the last odd item left if there is.\n\n Example:\n group0 group1\n [40, 100, 999]\n 75\n ->\n group0 group1\n [40, 100, 999, 10000]\n 75\n \"\"\"\n if has_last_odd_item:\n pivot = collection[-1]\n result = binary_search_insertion(result, pivot)\n\n \"\"\"\n Insert the remaining items.\n In this case, 40 < 75 is sure because it has already been sorted.\n Therefore, you only need to insert 75 into [100, 999, 10000],\n so that you can avoid unnecessary comparison.\n\n Example:\n group0 group1\n [40, 100, 999, 10000]\n ^ You don't need to compare with this as 40 < 75 is already sure.\n 75\n ->\n [40, 75, 100, 999, 10000]\n \"\"\"\n is_last_odd_item_inserted_before_this_index = False\n for i in range(len(sorted_list_2d) - 1):\n if result[i] == collection[-1] and has_last_odd_item:\n is_last_odd_item_inserted_before_this_index = True\n pivot = sorted_list_2d[i][1]\n # If last_odd_item is inserted before the item's index,\n # you should forward index one more.\n if is_last_odd_item_inserted_before_this_index:\n result = result[: i + 2] + binary_search_insertion(result[i + 2 :], pivot)\n else:\n result = result[: i + 1] + binary_search_insertion(result[i + 1 :], pivot)\n\n return result\n\n\nif __name__ == \"__main__\":\n user_input = input(\"Enter numbers separated by a comma:\\n\").strip()\n unsorted = [int(item) for item in user_input.split(\",\")]\n print(merge_insertion_sort(unsorted))\n", "path": "sorts/merge_insertion_sort.py"}]}
2,289
292
gh_patches_debug_18373
rasdani/github-patches
git_diff
PlasmaPy__PlasmaPy-971
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Notebook for ExB drift A Jupyter notebook needs to be created in our documentation that not only shows how to use `plamsapy.formulary.drifts.ExB_drift()` but that also describes the physics behind the drift. The notebook has to be placed in `/docs/notebooks/` or one of it's sub-directories. Then the notebook needs to be properly reference in the documentation. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/conf.py` Content: ``` 1 #!/usr/bin/env python3.6 2 # -*- coding: utf-8 -*- 3 # 4 # PlasmaPy documentation build configuration file, created by 5 # sphinx-quickstart on Wed May 31 18:16:46 2017. 6 # 7 # This file is execfile()d with the current directory set to its 8 # containing dir. 9 # 10 # Note that not all possible configuration values are present in this 11 # autogenerated file. 12 # 13 # All configuration values have a default; values that are commented out 14 # serve to show the default. 15 16 # If extensions (or modules to document with autodoc) are in another directory, 17 # add these directories to sys.path here. If the directory is relative to the 18 # documentation root, use os.path.abspath to make it absolute, like shown here. 19 # 20 21 import os 22 import sys 23 24 from datetime import datetime 25 from pkg_resources import parse_version 26 from sphinx.application import Sphinx 27 28 sys.path.insert(0, os.path.abspath("..")) 29 30 from plasmapy import __version__ as release 31 32 # -- General configuration ------------------------------------------------ 33 34 # If your documentation needs a minimal Sphinx version, state it here. 35 # 36 # needs_sphinx = '1.0' 37 38 # Add any Sphinx extension module names here, as strings. They can be 39 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 40 # ones. 41 extensions = [ 42 "sphinx.ext.autodoc", 43 "sphinx.ext.intersphinx", 44 "sphinx.ext.graphviz", 45 "sphinx.ext.mathjax", 46 "sphinx.ext.napoleon", 47 "sphinx_automodapi.automodapi", 48 "sphinx_automodapi.smart_resolver", 49 "nbsphinx", 50 "sphinx_copybutton", 51 "sphinx_gallery.load_style", 52 ] 53 54 intersphinx_mapping = { 55 "python": ("https://docs.python.org/3", None), 56 "numpy": ("https://numpy.org/doc/stable/", None), 57 "scipy": ("https://docs.scipy.org/doc/scipy/reference/", None), 58 "pandas": ("http://pandas.pydata.org/pandas-docs/stable/", None), 59 "astropy": ("http://docs.astropy.org/en/stable/", None), 60 } 61 62 autoclass_content = "both" 63 64 # Add any paths that contain templates here, relative to this directory. 65 templates_path = ["_templates"] 66 67 # The suffix(es) of source filenames. 68 # You can specify multiple suffix as a list of string: 69 # 70 # source_suffix = ['.rst', '.md'] 71 source_suffix = ".rst" 72 73 # The master toctree document. 74 master_doc = "index" 75 76 # General information about the project. 77 project = "PlasmaPy" 78 author = "PlasmaPy Community" 79 copyright = f"2015-{datetime.utcnow().year}, {author}" 80 81 82 # The version info for the project you're documenting, acts as replacement for 83 # |version| and |release|, also used in various other places throughout the 84 # built documents. 85 # 86 # The full version, including alpha/beta/rc tags. 87 # Note: If plasmapy.__version__ can not be defined then it is set to 'unknown'. 88 # However, release needs to be a semantic style version number, so set 89 # the 'unknown' case to ''. 90 release = "" if release == "unknown" else release 91 if release == "unknown": 92 release = version = revision = "" 93 else: 94 pv = parse_version(release) 95 release = pv.public 96 version = ".".join(release.split(".")[:2]) # short X.Y version 97 if pv.local is not None: 98 revision = pv.local[1:] # revision number w/o the leading g 99 else: 100 revision = "" 101 102 103 # The language for content autogenerated by Sphinx. Refer to documentation 104 # for a list of supported languages. 105 # 106 # This is also used if you do content translation via gettext catalogs. 107 # Usually you set "language" from the command line for these cases. 108 language = None 109 110 # List of patterns, relative to source directory, that match files and 111 # directories to ignore when looking for source files. 112 # This patterns also effect to html_static_path and html_extra_path 113 exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] 114 115 # The name of the Pygments (syntax highlighting) style to use. 116 pygments_style = "sphinx" 117 118 # If true, `todo` and `todoList` produce output, else they produce nothing. 119 todo_include_todos = False 120 121 default_role = "obj" 122 123 # -- Options for HTML output ---------------------------------------------- 124 125 # The theme to use for HTML and HTML Help pages. See the documentation for 126 # a list of builtin themes. 127 # 128 # html_theme = 'alabaster' 129 # html_theme = 'traditional' 130 # html_theme = 'agogo' 131 html_theme = "sphinx_rtd_theme" 132 133 # Theme options are theme-specific and customize the look and feel of a theme 134 # further. For a list of options available for each theme, see the 135 # documentation. 136 # 137 html_logo = "./_static/with-text-light-190px.png" 138 html_theme_options = { 139 "logo_only": True, 140 # 141 # TOC options 142 # https://sphinx-rtd-theme.readthedocs.io/en/stable/configuring.html#theme-options 143 "includehidden": False, 144 } 145 146 # Add any paths that contain custom static files (such as style sheets) here, 147 # relative to this directory. They are copied after the builtin static files, 148 # so a file named "default.css" will overwrite the builtin "default.css". 149 html_static_path = ["_static"] 150 151 # A list of prefixes that are ignored for sorting the Python module 152 # index (e.g., if this is set to ['foo.'], then foo.bar is shown under 153 # B, not F). 154 modindex_common_prefix = ["plasmapy."] 155 156 # -- Options for HTMLHelp output ------------------------------------------ 157 158 # Output file base name for HTML help builder. 159 htmlhelp_basename = "PlasmaPydoc" 160 161 162 # -- Options for LaTeX output --------------------------------------------- 163 164 latex_elements = { 165 # The paper size ('letterpaper' or 'a4paper'). 166 # 'papersize': 'letterpaper', 167 # 168 # The font size ('10pt', '11pt' or '12pt'). 169 # 'pointsize': '10pt', 170 # 171 # Additional stuff for the LaTeX preamble. 172 # 'preamble': '', 173 # 174 # Latex figure (float) alignment 175 # 'figure_align': 'htbp', 176 } 177 178 # Grouping the document tree into LaTeX files. List of tuples 179 # (source start file, target name, title, 180 # author, documentclass [howto, manual, or own class]). 181 latex_documents = [ 182 ( 183 master_doc, 184 "PlasmaPy.tex", 185 "PlasmaPy Documentation", 186 "PlasmaPy Community", 187 "manual", 188 ) 189 ] 190 191 192 # -- Options for manual page output --------------------------------------- 193 194 # One entry per manual page. List of tuples 195 # (source start file, name, description, authors, manual section). 196 man_pages = [(master_doc, "plasmapy", "PlasmaPy Documentation", [author], 1)] 197 198 199 # -- Options for Texinfo output ------------------------------------------- 200 201 # Grouping the document tree into Texinfo files. List of tuples 202 # (source start file, target name, title, author, 203 # dir menu entry, description, category) 204 texinfo_documents = [ 205 ( 206 master_doc, 207 "PlasmaPy", 208 "PlasmaPy Documentation", 209 author, 210 "PlasmaPy", 211 "Python package for plasma physics", 212 "Miscellaneous", 213 ) 214 ] 215 216 html_favicon = "./_static/icon.ico" 217 218 219 # -- NBSphinx options 220 221 nbsphinx_thumbnails = {"notebooks/*": "_images/graphic-circular.png"} 222 223 # adapted from https://github.com/spatialaudio/nbsphinx/blob/58b8034dd9d7349c1b4ac3e7a7d6baa87ab2a6a9/doc/conf.py 224 225 # This is processed by Jinja2 and inserted before each notebook 226 nbsphinx_prolog = r""" 227 {% set docname = 'docs/' + env.doc2path(env.docname, base=None) %} 228 {% set nb_base = 'tree' if env.config.revision else 'blob' %} 229 {% set nb_where = env.config.revision if env.config.revision else 'master' %} 230 231 .. raw:: html 232 233 <div class="admonition note"> 234 <p style="margin-bottom:0px"> 235 This page was generated by 236 <a href="https://nbsphinx.readthedocs.io/">nbsphinx</a> from 237 <a class="reference external" href="https://github.com/PlasmaPy/PlasmaPy/{{ nb_base|e }}/{{ nb_where|e }}/{{ docname|e }}">{{ docname|e }}</a>. 238 <br> 239 Interactive online version: 240 <a href="https://mybinder.org/v2/gh/PlasmaPy/PlasmaPy/{{ nb_where|e }}/?filepath={{ docname|e }}"><img alt="Binder badge" src="https://mybinder.org/badge_logo.svg" style="vertical-align:text-bottom"></a>. 241 </p> 242 </div> 243 244 .. raw:: latex 245 246 \nbsphinxstartnotebook{\scriptsize\noindent\strut 247 \textcolor{gray}{The following section was generated from 248 \sphinxcode{\sphinxupquote{\strut {{ docname | escape_latex }}}} \dotfill}} 249 """ 250 251 252 def setup(app: Sphinx) -> None: 253 app.add_config_value("revision", "", True) 254 app.add_stylesheet("rtd_theme_overrides.css") 255 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -49,6 +49,7 @@ "nbsphinx", "sphinx_copybutton", "sphinx_gallery.load_style", + "IPython.sphinxext.ipython_console_highlighting", ] intersphinx_mapping = { @@ -110,10 +111,8 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path -exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store", "**.ipynb_checkpoints"] -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -49,6 +49,7 @@\n \"nbsphinx\",\n \"sphinx_copybutton\",\n \"sphinx_gallery.load_style\",\n+ \"IPython.sphinxext.ipython_console_highlighting\",\n ]\n \n intersphinx_mapping = {\n@@ -110,10 +111,8 @@\n # List of patterns, relative to source directory, that match files and\n # directories to ignore when looking for source files.\n # This patterns also effect to html_static_path and html_extra_path\n-exclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n+exclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\", \"**.ipynb_checkpoints\"]\n \n-# The name of the Pygments (syntax highlighting) style to use.\n-pygments_style = \"sphinx\"\n \n # If true, `todo` and `todoList` produce output, else they produce nothing.\n todo_include_todos = False\n", "issue": "Notebook for ExB drift\nA Jupyter notebook needs to be created in our documentation that not only shows how to use `plamsapy.formulary.drifts.ExB_drift()` but that also describes the physics behind the drift. The notebook has to be placed in `/docs/notebooks/` or one of it's sub-directories. Then the notebook needs to be properly reference in the documentation.\n", "before_files": [{"content": "#!/usr/bin/env python3.6\n# -*- coding: utf-8 -*-\n#\n# PlasmaPy documentation build configuration file, created by\n# sphinx-quickstart on Wed May 31 18:16:46 2017.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n\nimport os\nimport sys\n\nfrom datetime import datetime\nfrom pkg_resources import parse_version\nfrom sphinx.application import Sphinx\n\nsys.path.insert(0, os.path.abspath(\"..\"))\n\nfrom plasmapy import __version__ as release\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.graphviz\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.napoleon\",\n \"sphinx_automodapi.automodapi\",\n \"sphinx_automodapi.smart_resolver\",\n \"nbsphinx\",\n \"sphinx_copybutton\",\n \"sphinx_gallery.load_style\",\n]\n\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3\", None),\n \"numpy\": (\"https://numpy.org/doc/stable/\", None),\n \"scipy\": (\"https://docs.scipy.org/doc/scipy/reference/\", None),\n \"pandas\": (\"http://pandas.pydata.org/pandas-docs/stable/\", None),\n \"astropy\": (\"http://docs.astropy.org/en/stable/\", None),\n}\n\nautoclass_content = \"both\"\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = \".rst\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"PlasmaPy\"\nauthor = \"PlasmaPy Community\"\ncopyright = f\"2015-{datetime.utcnow().year}, {author}\"\n\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The full version, including alpha/beta/rc tags.\n# Note: If plasmapy.__version__ can not be defined then it is set to 'unknown'.\n# However, release needs to be a semantic style version number, so set\n# the 'unknown' case to ''.\nrelease = \"\" if release == \"unknown\" else release\nif release == \"unknown\":\n release = version = revision = \"\"\nelse:\n pv = parse_version(release)\n release = pv.public\n version = \".\".join(release.split(\".\")[:2]) # short X.Y version\n if pv.local is not None:\n revision = pv.local[1:] # revision number w/o the leading g\n else:\n revision = \"\"\n\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\ndefault_role = \"obj\"\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\n# html_theme = 'alabaster'\n# html_theme = 'traditional'\n# html_theme = 'agogo'\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_logo = \"./_static/with-text-light-190px.png\"\nhtml_theme_options = {\n \"logo_only\": True,\n #\n # TOC options\n # https://sphinx-rtd-theme.readthedocs.io/en/stable/configuring.html#theme-options\n \"includehidden\": False,\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# A list of prefixes that are ignored for sorting the Python module\n# index (e.g., if this is set to ['foo.'], then foo.bar is shown under\n# B, not F).\nmodindex_common_prefix = [\"plasmapy.\"]\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"PlasmaPydoc\"\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n # 'papersize': 'letterpaper',\n #\n # The font size ('10pt', '11pt' or '12pt').\n # 'pointsize': '10pt',\n #\n # Additional stuff for the LaTeX preamble.\n # 'preamble': '',\n #\n # Latex figure (float) alignment\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (\n master_doc,\n \"PlasmaPy.tex\",\n \"PlasmaPy Documentation\",\n \"PlasmaPy Community\",\n \"manual\",\n )\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"plasmapy\", \"PlasmaPy Documentation\", [author], 1)]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"PlasmaPy\",\n \"PlasmaPy Documentation\",\n author,\n \"PlasmaPy\",\n \"Python package for plasma physics\",\n \"Miscellaneous\",\n )\n]\n\nhtml_favicon = \"./_static/icon.ico\"\n\n\n# -- NBSphinx options\n\nnbsphinx_thumbnails = {\"notebooks/*\": \"_images/graphic-circular.png\"}\n\n# adapted from https://github.com/spatialaudio/nbsphinx/blob/58b8034dd9d7349c1b4ac3e7a7d6baa87ab2a6a9/doc/conf.py\n\n# This is processed by Jinja2 and inserted before each notebook\nnbsphinx_prolog = r\"\"\"\n{% set docname = 'docs/' + env.doc2path(env.docname, base=None) %}\n{% set nb_base = 'tree' if env.config.revision else 'blob' %}\n{% set nb_where = env.config.revision if env.config.revision else 'master' %}\n\n.. raw:: html\n\n <div class=\"admonition note\">\n <p style=\"margin-bottom:0px\">\n This page was generated by\n <a href=\"https://nbsphinx.readthedocs.io/\">nbsphinx</a> from\n <a class=\"reference external\" href=\"https://github.com/PlasmaPy/PlasmaPy/{{ nb_base|e }}/{{ nb_where|e }}/{{ docname|e }}\">{{ docname|e }}</a>.\n <br>\n Interactive online version:\n <a href=\"https://mybinder.org/v2/gh/PlasmaPy/PlasmaPy/{{ nb_where|e }}/?filepath={{ docname|e }}\"><img alt=\"Binder badge\" src=\"https://mybinder.org/badge_logo.svg\" style=\"vertical-align:text-bottom\"></a>.\n </p>\n </div>\n\n.. raw:: latex\n\n \\nbsphinxstartnotebook{\\scriptsize\\noindent\\strut\n \\textcolor{gray}{The following section was generated from\n \\sphinxcode{\\sphinxupquote{\\strut {{ docname | escape_latex }}}} \\dotfill}}\n\"\"\"\n\n\ndef setup(app: Sphinx) -> None:\n app.add_config_value(\"revision\", \"\", True)\n app.add_stylesheet(\"rtd_theme_overrides.css\")\n", "path": "docs/conf.py"}], "after_files": [{"content": "#!/usr/bin/env python3.6\n# -*- coding: utf-8 -*-\n#\n# PlasmaPy documentation build configuration file, created by\n# sphinx-quickstart on Wed May 31 18:16:46 2017.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n\nimport os\nimport sys\n\nfrom datetime import datetime\nfrom pkg_resources import parse_version\nfrom sphinx.application import Sphinx\n\nsys.path.insert(0, os.path.abspath(\"..\"))\n\nfrom plasmapy import __version__ as release\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.graphviz\",\n \"sphinx.ext.mathjax\",\n \"sphinx.ext.napoleon\",\n \"sphinx_automodapi.automodapi\",\n \"sphinx_automodapi.smart_resolver\",\n \"nbsphinx\",\n \"sphinx_copybutton\",\n \"sphinx_gallery.load_style\",\n \"IPython.sphinxext.ipython_console_highlighting\",\n]\n\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3\", None),\n \"numpy\": (\"https://numpy.org/doc/stable/\", None),\n \"scipy\": (\"https://docs.scipy.org/doc/scipy/reference/\", None),\n \"pandas\": (\"http://pandas.pydata.org/pandas-docs/stable/\", None),\n \"astropy\": (\"http://docs.astropy.org/en/stable/\", None),\n}\n\nautoclass_content = \"both\"\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = \".rst\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = \"PlasmaPy\"\nauthor = \"PlasmaPy Community\"\ncopyright = f\"2015-{datetime.utcnow().year}, {author}\"\n\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The full version, including alpha/beta/rc tags.\n# Note: If plasmapy.__version__ can not be defined then it is set to 'unknown'.\n# However, release needs to be a semantic style version number, so set\n# the 'unknown' case to ''.\nrelease = \"\" if release == \"unknown\" else release\nif release == \"unknown\":\n release = version = revision = \"\"\nelse:\n pv = parse_version(release)\n release = pv.public\n version = \".\".join(release.split(\".\")[:2]) # short X.Y version\n if pv.local is not None:\n revision = pv.local[1:] # revision number w/o the leading g\n else:\n revision = \"\"\n\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\", \"**.ipynb_checkpoints\"]\n\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\ndefault_role = \"obj\"\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\n# html_theme = 'alabaster'\n# html_theme = 'traditional'\n# html_theme = 'agogo'\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\nhtml_logo = \"./_static/with-text-light-190px.png\"\nhtml_theme_options = {\n \"logo_only\": True,\n #\n # TOC options\n # https://sphinx-rtd-theme.readthedocs.io/en/stable/configuring.html#theme-options\n \"includehidden\": False,\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\n\n# A list of prefixes that are ignored for sorting the Python module\n# index (e.g., if this is set to ['foo.'], then foo.bar is shown under\n# B, not F).\nmodindex_common_prefix = [\"plasmapy.\"]\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"PlasmaPydoc\"\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n # 'papersize': 'letterpaper',\n #\n # The font size ('10pt', '11pt' or '12pt').\n # 'pointsize': '10pt',\n #\n # Additional stuff for the LaTeX preamble.\n # 'preamble': '',\n #\n # Latex figure (float) alignment\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (\n master_doc,\n \"PlasmaPy.tex\",\n \"PlasmaPy Documentation\",\n \"PlasmaPy Community\",\n \"manual\",\n )\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"plasmapy\", \"PlasmaPy Documentation\", [author], 1)]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"PlasmaPy\",\n \"PlasmaPy Documentation\",\n author,\n \"PlasmaPy\",\n \"Python package for plasma physics\",\n \"Miscellaneous\",\n )\n]\n\nhtml_favicon = \"./_static/icon.ico\"\n\n\n# -- NBSphinx options\n\nnbsphinx_thumbnails = {\"notebooks/*\": \"_images/graphic-circular.png\"}\n\n# adapted from https://github.com/spatialaudio/nbsphinx/blob/58b8034dd9d7349c1b4ac3e7a7d6baa87ab2a6a9/doc/conf.py\n\n# This is processed by Jinja2 and inserted before each notebook\nnbsphinx_prolog = r\"\"\"\n{% set docname = 'docs/' + env.doc2path(env.docname, base=None) %}\n{% set nb_base = 'tree' if env.config.revision else 'blob' %}\n{% set nb_where = env.config.revision if env.config.revision else 'master' %}\n\n.. raw:: html\n\n <div class=\"admonition note\">\n <p style=\"margin-bottom:0px\">\n This page was generated by\n <a href=\"https://nbsphinx.readthedocs.io/\">nbsphinx</a> from\n <a class=\"reference external\" href=\"https://github.com/PlasmaPy/PlasmaPy/{{ nb_base|e }}/{{ nb_where|e }}/{{ docname|e }}\">{{ docname|e }}</a>.\n <br>\n Interactive online version:\n <a href=\"https://mybinder.org/v2/gh/PlasmaPy/PlasmaPy/{{ nb_where|e }}/?filepath={{ docname|e }}\"><img alt=\"Binder badge\" src=\"https://mybinder.org/badge_logo.svg\" style=\"vertical-align:text-bottom\"></a>.\n </p>\n </div>\n\n.. raw:: latex\n\n \\nbsphinxstartnotebook{\\scriptsize\\noindent\\strut\n \\textcolor{gray}{The following section was generated from\n \\sphinxcode{\\sphinxupquote{\\strut {{ docname | escape_latex }}}} \\dotfill}}\n\"\"\"\n\n\ndef setup(app: Sphinx) -> None:\n app.add_config_value(\"revision\", \"\", True)\n app.add_stylesheet(\"rtd_theme_overrides.css\")\n", "path": "docs/conf.py"}]}
3,118
229
gh_patches_debug_3864
rasdani/github-patches
git_diff
elastic__apm-agent-python-1203
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- APM Agent Serialization Error **Describe the bug**: APM Agent crashes when using Python AWS SDK's builder for DynamoDB Key condition expressions. **To Reproduce** 1. Import `from boto3.dynamodb.conditions import Key` and make a DynamoDB query that includes `KeyConditionExpression` option with a value generated by `Key` class. Example Broken Code: ``` SESSION = boto3.Session(region_name="us-west-2",) DYNAMODB_TABLE = SESSION.resource("dynamodb").Table("TableName") options = { "KeyConditionExpression": Key("primary_key").eq(1) } result = DYNAMODB_TABLE.query(**options) ``` 2. If APM Agent is running it will crash with the following exception when above code executes: ``` Exception in thread eapm event processor thread: Traceback (most recent call last): File "/var/lang/lib/python3.8/threading.py", line 932, in _bootstrap_inner self.run() File "/var/lang/lib/python3.8/threading.py", line 870, in run self._target(*self._args, **self._kwargs) File "/var/task/elasticapm/transport/base.py", line 145, in _process_queue buffer.write((self._json_serializer({event_type: data}) + "\n").encode("utf-8")) File "/var/task/elasticapm/utils/json_encoder.py", line 63, in dumps return json.dumps(value, cls=BetterJSONEncoder, **kwargs) File "/var/lang/lib/python3.8/json/__init__.py", line 234, in dumps return cls( File "/var/lang/lib/python3.8/json/encoder.py", line 199, in encode chunks = self.iterencode(o, _one_shot=True) File "/var/lang/lib/python3.8/json/encoder.py", line 257, in iterencode return _iterencode(o, 0) File "/var/task/elasticapm/utils/json_encoder.py", line 55, in default return super(BetterJSONEncoder, self).default(obj) File "/var/lang/lib/python3.8/json/encoder.py", line 179, in default raise TypeError(f'Object of type {o.__class__.__name__} ' TypeError: Object of type Equals is not JSON serializable ``` 3. Excluding the usage of `Key` class for building Condition Expressions avoids the bug. Example Working Code: ``` SESSION = boto3.Session(region_name="us-west-2",) DYNAMODB_TABLE = SESSION.resource("dynamodb").Table("TableName") options = { "KeyConditionExpression": "#PrimaryKey = :value", "ExpressionAttributeNames": {"#PrimaryKey": "primary_key"}, "ExpressionAttributeValues": {":value": 1} } result = DYNAMODB_TABLE.query(**options) ``` **Environment (please complete the following information)** - OS: Linux - Python version: 3.8 - Framework and version [e.g. Django 2.1]: Not Applicable - APM Server version: 7.13.4 - Agent version: 6.3.3 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `elasticapm/utils/json_encoder.py` Content: ``` 1 # BSD 3-Clause License 2 # 3 # Copyright (c) 2012, the Sentry Team, see AUTHORS for more details 4 # Copyright (c) 2019, Elasticsearch BV 5 # All rights reserved. 6 # 7 # Redistribution and use in source and binary forms, with or without 8 # modification, are permitted provided that the following conditions are met: 9 # 10 # * Redistributions of source code must retain the above copyright notice, this 11 # list of conditions and the following disclaimer. 12 # 13 # * Redistributions in binary form must reproduce the above copyright notice, 14 # this list of conditions and the following disclaimer in the documentation 15 # and/or other materials provided with the distribution. 16 # 17 # * Neither the name of the copyright holder nor the names of its 18 # contributors may be used to endorse or promote products derived from 19 # this software without specific prior written permission. 20 # 21 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 22 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 24 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 25 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 27 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 28 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 29 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 31 32 import datetime 33 import decimal 34 import uuid 35 36 try: 37 import json 38 except ImportError: 39 import simplejson as json 40 41 42 class BetterJSONEncoder(json.JSONEncoder): 43 ENCODERS = { 44 set: list, 45 frozenset: list, 46 datetime.datetime: lambda obj: obj.strftime("%Y-%m-%dT%H:%M:%S.%fZ"), 47 uuid.UUID: lambda obj: obj.hex, 48 bytes: lambda obj: obj.decode("utf-8", errors="replace"), 49 decimal.Decimal: lambda obj: float(obj), 50 } 51 52 def default(self, obj): 53 if type(obj) in self.ENCODERS: 54 return self.ENCODERS[type(obj)](obj) 55 return super(BetterJSONEncoder, self).default(obj) 56 57 58 def better_decoder(data): 59 return data 60 61 62 def dumps(value, **kwargs): 63 return json.dumps(value, cls=BetterJSONEncoder, **kwargs) 64 65 66 def loads(value, **kwargs): 67 return json.loads(value, object_hook=better_decoder) 68 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/elasticapm/utils/json_encoder.py b/elasticapm/utils/json_encoder.py --- a/elasticapm/utils/json_encoder.py +++ b/elasticapm/utils/json_encoder.py @@ -52,7 +52,10 @@ def default(self, obj): if type(obj) in self.ENCODERS: return self.ENCODERS[type(obj)](obj) - return super(BetterJSONEncoder, self).default(obj) + try: + return super(BetterJSONEncoder, self).default(obj) + except TypeError: + return str(obj) def better_decoder(data):
{"golden_diff": "diff --git a/elasticapm/utils/json_encoder.py b/elasticapm/utils/json_encoder.py\n--- a/elasticapm/utils/json_encoder.py\n+++ b/elasticapm/utils/json_encoder.py\n@@ -52,7 +52,10 @@\n def default(self, obj):\n if type(obj) in self.ENCODERS:\n return self.ENCODERS[type(obj)](obj)\n- return super(BetterJSONEncoder, self).default(obj)\n+ try:\n+ return super(BetterJSONEncoder, self).default(obj)\n+ except TypeError:\n+ return str(obj)\n \n \n def better_decoder(data):\n", "issue": "APM Agent Serialization Error\n**Describe the bug**: APM Agent crashes when using Python AWS SDK's builder for DynamoDB Key condition expressions.\r\n\r\n**To Reproduce**\r\n\r\n1. Import `from boto3.dynamodb.conditions import Key` and make a DynamoDB query that includes `KeyConditionExpression` option with a value generated by `Key` class. Example Broken Code:\r\n\r\n```\r\nSESSION = boto3.Session(region_name=\"us-west-2\",)\r\nDYNAMODB_TABLE = SESSION.resource(\"dynamodb\").Table(\"TableName\")\r\n\r\noptions = {\r\n \"KeyConditionExpression\": Key(\"primary_key\").eq(1)\r\n}\r\n\r\nresult = DYNAMODB_TABLE.query(**options)\r\n```\r\n\r\n2. If APM Agent is running it will crash with the following exception when above code executes:\r\n```\r\nException in thread eapm event processor thread:\r\nTraceback (most recent call last):\r\n File \"/var/lang/lib/python3.8/threading.py\", line 932, in _bootstrap_inner\r\n self.run()\r\n File \"/var/lang/lib/python3.8/threading.py\", line 870, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"/var/task/elasticapm/transport/base.py\", line 145, in _process_queue\r\n buffer.write((self._json_serializer({event_type: data}) + \"\\n\").encode(\"utf-8\"))\r\n File \"/var/task/elasticapm/utils/json_encoder.py\", line 63, in dumps\r\n return json.dumps(value, cls=BetterJSONEncoder, **kwargs)\r\n File \"/var/lang/lib/python3.8/json/__init__.py\", line 234, in dumps\r\n return cls(\r\n File \"/var/lang/lib/python3.8/json/encoder.py\", line 199, in encode\r\n chunks = self.iterencode(o, _one_shot=True)\r\n File \"/var/lang/lib/python3.8/json/encoder.py\", line 257, in iterencode\r\n return _iterencode(o, 0)\r\n File \"/var/task/elasticapm/utils/json_encoder.py\", line 55, in default\r\n return super(BetterJSONEncoder, self).default(obj)\r\n File \"/var/lang/lib/python3.8/json/encoder.py\", line 179, in default\r\n raise TypeError(f'Object of type {o.__class__.__name__} '\r\nTypeError: Object of type Equals is not JSON serializable\r\n```\r\n\r\n3. Excluding the usage of `Key` class for building Condition Expressions avoids the bug. Example Working Code:\r\n```\r\nSESSION = boto3.Session(region_name=\"us-west-2\",)\r\nDYNAMODB_TABLE = SESSION.resource(\"dynamodb\").Table(\"TableName\")\r\n\r\noptions = {\r\n \"KeyConditionExpression\": \"#PrimaryKey = :value\",\r\n \"ExpressionAttributeNames\": {\"#PrimaryKey\": \"primary_key\"},\r\n \"ExpressionAttributeValues\": {\":value\": 1}\r\n}\r\n\r\nresult = DYNAMODB_TABLE.query(**options)\r\n```\r\n\r\n**Environment (please complete the following information)**\r\n- OS: Linux\r\n- Python version: 3.8\r\n- Framework and version [e.g. Django 2.1]: Not Applicable\r\n- APM Server version: 7.13.4\r\n- Agent version: 6.3.3\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\n\nimport datetime\nimport decimal\nimport uuid\n\ntry:\n import json\nexcept ImportError:\n import simplejson as json\n\n\nclass BetterJSONEncoder(json.JSONEncoder):\n ENCODERS = {\n set: list,\n frozenset: list,\n datetime.datetime: lambda obj: obj.strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\"),\n uuid.UUID: lambda obj: obj.hex,\n bytes: lambda obj: obj.decode(\"utf-8\", errors=\"replace\"),\n decimal.Decimal: lambda obj: float(obj),\n }\n\n def default(self, obj):\n if type(obj) in self.ENCODERS:\n return self.ENCODERS[type(obj)](obj)\n return super(BetterJSONEncoder, self).default(obj)\n\n\ndef better_decoder(data):\n return data\n\n\ndef dumps(value, **kwargs):\n return json.dumps(value, cls=BetterJSONEncoder, **kwargs)\n\n\ndef loads(value, **kwargs):\n return json.loads(value, object_hook=better_decoder)\n", "path": "elasticapm/utils/json_encoder.py"}], "after_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2012, the Sentry Team, see AUTHORS for more details\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n\n\nimport datetime\nimport decimal\nimport uuid\n\ntry:\n import json\nexcept ImportError:\n import simplejson as json\n\n\nclass BetterJSONEncoder(json.JSONEncoder):\n ENCODERS = {\n set: list,\n frozenset: list,\n datetime.datetime: lambda obj: obj.strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\"),\n uuid.UUID: lambda obj: obj.hex,\n bytes: lambda obj: obj.decode(\"utf-8\", errors=\"replace\"),\n decimal.Decimal: lambda obj: float(obj),\n }\n\n def default(self, obj):\n if type(obj) in self.ENCODERS:\n return self.ENCODERS[type(obj)](obj)\n try:\n return super(BetterJSONEncoder, self).default(obj)\n except TypeError:\n return str(obj)\n\n\ndef better_decoder(data):\n return data\n\n\ndef dumps(value, **kwargs):\n return json.dumps(value, cls=BetterJSONEncoder, **kwargs)\n\n\ndef loads(value, **kwargs):\n return json.loads(value, object_hook=better_decoder)\n", "path": "elasticapm/utils/json_encoder.py"}]}
1,651
138
gh_patches_debug_24746
rasdani/github-patches
git_diff
elastic__apm-agent-python-1115
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Python agent should show database as a dependency, shows up in span **Is your feature request related to a problem? Please describe.** I can see Postgres under **Time spent by span type** graph but it is not shown as a dependency. Rails agent is able to show this as Postgresql <img width="397" alt="Screenshot 2021-04-24 at 12 03 10 PM" src="https://user-images.githubusercontent.com/36618391/115949791-14edbe80-a4f5-11eb-999a-9b2880460abc.png"> I am using asyncpg==0.21.0 **Describe the solution you'd like** The database(Postgres) should show up as a dependency just like it shows up for ruby agent. Screenshot attached below. <img width="979" alt="Screenshot 2021-04-24 at 11 40 09 AM" src="https://user-images.githubusercontent.com/36618391/115949754-d526d700-a4f4-11eb-9d43-bad114aad4eb.png"> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `elasticapm/instrumentation/packages/asyncio/asyncpg.py` Content: ``` 1 # BSD 3-Clause License 2 # 3 # Copyright (c) 2019, Elasticsearch BV 4 # All rights reserved. 5 # 6 # Redistribution and use in source and binary forms, with or without 7 # modification, are permitted provided that the following conditions are met: 8 # 9 # * Redistributions of source code must retain the above copyright notice, this 10 # list of conditions and the following disclaimer. 11 # 12 # * Redistributions in binary form must reproduce the above copyright notice, 13 # this list of conditions and the following disclaimer in the documentation 14 # and/or other materials provided with the distribution. 15 # 16 # * Neither the name of the copyright holder nor the names of its 17 # contributors may be used to endorse or promote products derived from 18 # this software without specific prior written permission. 19 # 20 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 31 from elasticapm.contrib.asyncio.traces import async_capture_span 32 from elasticapm.instrumentation.packages.asyncio.base import AsyncAbstractInstrumentedModule 33 from elasticapm.instrumentation.packages.dbapi2 import extract_signature 34 35 36 class AsyncPGInstrumentation(AsyncAbstractInstrumentedModule): 37 """ 38 Implement asyncpg instrumentation with two methods Connection.execute 39 and Connection.executemany since Connection._do_execute is not called 40 given a prepared query is passed to a connection. As in: 41 https://github.com/MagicStack/asyncpg/blob/master/asyncpg/connection.py#L294-L297 42 """ 43 44 name = "asyncpg" 45 46 instrument_list = [ 47 ("asyncpg.connection", "Connection.execute"), 48 ("asyncpg.connection", "Connection.executemany"), 49 ("asyncpg.connection", "Connection.fetch"), 50 ("asyncpg.connection", "Connection.fetchval"), 51 ("asyncpg.connection", "Connection.fetchrow"), 52 ] 53 54 async def call(self, module, method, wrapped, instance, args, kwargs): 55 query = args[0] if len(args) else kwargs["query"] 56 name = extract_signature(query) 57 context = {"db": {"type": "sql", "statement": query}} 58 action = "query" 59 async with async_capture_span( 60 name, leaf=True, span_type="db", span_subtype="postgres", span_action=action, extra=context 61 ): 62 return await wrapped(*args, **kwargs) 63 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/elasticapm/instrumentation/packages/asyncio/asyncpg.py b/elasticapm/instrumentation/packages/asyncio/asyncpg.py --- a/elasticapm/instrumentation/packages/asyncio/asyncpg.py +++ b/elasticapm/instrumentation/packages/asyncio/asyncpg.py @@ -31,6 +31,7 @@ from elasticapm.contrib.asyncio.traces import async_capture_span from elasticapm.instrumentation.packages.asyncio.base import AsyncAbstractInstrumentedModule from elasticapm.instrumentation.packages.dbapi2 import extract_signature +from elasticapm.utils import default_ports class AsyncPGInstrumentation(AsyncAbstractInstrumentedModule): @@ -56,6 +57,12 @@ name = extract_signature(query) context = {"db": {"type": "sql", "statement": query}} action = "query" + destination_info = { + "address": kwargs.get("host", "localhost"), + "port": int(kwargs.get("port", default_ports.get("postgresql"))), + "service": {"name": "postgres", "resource": "postgres", "type": "db"}, + } + context["destination"] = destination_info async with async_capture_span( name, leaf=True, span_type="db", span_subtype="postgres", span_action=action, extra=context ):
{"golden_diff": "diff --git a/elasticapm/instrumentation/packages/asyncio/asyncpg.py b/elasticapm/instrumentation/packages/asyncio/asyncpg.py\n--- a/elasticapm/instrumentation/packages/asyncio/asyncpg.py\n+++ b/elasticapm/instrumentation/packages/asyncio/asyncpg.py\n@@ -31,6 +31,7 @@\n from elasticapm.contrib.asyncio.traces import async_capture_span\n from elasticapm.instrumentation.packages.asyncio.base import AsyncAbstractInstrumentedModule\n from elasticapm.instrumentation.packages.dbapi2 import extract_signature\n+from elasticapm.utils import default_ports\n \n \n class AsyncPGInstrumentation(AsyncAbstractInstrumentedModule):\n@@ -56,6 +57,12 @@\n name = extract_signature(query)\n context = {\"db\": {\"type\": \"sql\", \"statement\": query}}\n action = \"query\"\n+ destination_info = {\n+ \"address\": kwargs.get(\"host\", \"localhost\"),\n+ \"port\": int(kwargs.get(\"port\", default_ports.get(\"postgresql\"))),\n+ \"service\": {\"name\": \"postgres\", \"resource\": \"postgres\", \"type\": \"db\"},\n+ }\n+ context[\"destination\"] = destination_info\n async with async_capture_span(\n name, leaf=True, span_type=\"db\", span_subtype=\"postgres\", span_action=action, extra=context\n ):\n", "issue": "Python agent should show database as a dependency, shows up in span\n**Is your feature request related to a problem? Please describe.**\r\nI can see Postgres under **Time spent by span type** graph but it is not shown as a dependency. Rails agent is able to show this as Postgresql\r\n\r\n<img width=\"397\" alt=\"Screenshot 2021-04-24 at 12 03 10 PM\" src=\"https://user-images.githubusercontent.com/36618391/115949791-14edbe80-a4f5-11eb-999a-9b2880460abc.png\">\r\n\r\nI am using asyncpg==0.21.0\r\n\r\n**Describe the solution you'd like**\r\nThe database(Postgres) should show up as a dependency just like it shows up for ruby agent. Screenshot attached below.\r\n\r\n<img width=\"979\" alt=\"Screenshot 2021-04-24 at 11 40 09 AM\" src=\"https://user-images.githubusercontent.com/36618391/115949754-d526d700-a4f4-11eb-9d43-bad114aad4eb.png\">\r\n\n", "before_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom elasticapm.contrib.asyncio.traces import async_capture_span\nfrom elasticapm.instrumentation.packages.asyncio.base import AsyncAbstractInstrumentedModule\nfrom elasticapm.instrumentation.packages.dbapi2 import extract_signature\n\n\nclass AsyncPGInstrumentation(AsyncAbstractInstrumentedModule):\n \"\"\"\n Implement asyncpg instrumentation with two methods Connection.execute\n and Connection.executemany since Connection._do_execute is not called\n given a prepared query is passed to a connection. As in:\n https://github.com/MagicStack/asyncpg/blob/master/asyncpg/connection.py#L294-L297\n \"\"\"\n\n name = \"asyncpg\"\n\n instrument_list = [\n (\"asyncpg.connection\", \"Connection.execute\"),\n (\"asyncpg.connection\", \"Connection.executemany\"),\n (\"asyncpg.connection\", \"Connection.fetch\"),\n (\"asyncpg.connection\", \"Connection.fetchval\"),\n (\"asyncpg.connection\", \"Connection.fetchrow\"),\n ]\n\n async def call(self, module, method, wrapped, instance, args, kwargs):\n query = args[0] if len(args) else kwargs[\"query\"]\n name = extract_signature(query)\n context = {\"db\": {\"type\": \"sql\", \"statement\": query}}\n action = \"query\"\n async with async_capture_span(\n name, leaf=True, span_type=\"db\", span_subtype=\"postgres\", span_action=action, extra=context\n ):\n return await wrapped(*args, **kwargs)\n", "path": "elasticapm/instrumentation/packages/asyncio/asyncpg.py"}], "after_files": [{"content": "# BSD 3-Clause License\n#\n# Copyright (c) 2019, Elasticsearch BV\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom elasticapm.contrib.asyncio.traces import async_capture_span\nfrom elasticapm.instrumentation.packages.asyncio.base import AsyncAbstractInstrumentedModule\nfrom elasticapm.instrumentation.packages.dbapi2 import extract_signature\nfrom elasticapm.utils import default_ports\n\n\nclass AsyncPGInstrumentation(AsyncAbstractInstrumentedModule):\n \"\"\"\n Implement asyncpg instrumentation with two methods Connection.execute\n and Connection.executemany since Connection._do_execute is not called\n given a prepared query is passed to a connection. As in:\n https://github.com/MagicStack/asyncpg/blob/master/asyncpg/connection.py#L294-L297\n \"\"\"\n\n name = \"asyncpg\"\n\n instrument_list = [\n (\"asyncpg.connection\", \"Connection.execute\"),\n (\"asyncpg.connection\", \"Connection.executemany\"),\n (\"asyncpg.connection\", \"Connection.fetch\"),\n (\"asyncpg.connection\", \"Connection.fetchval\"),\n (\"asyncpg.connection\", \"Connection.fetchrow\"),\n ]\n\n async def call(self, module, method, wrapped, instance, args, kwargs):\n query = args[0] if len(args) else kwargs[\"query\"]\n name = extract_signature(query)\n context = {\"db\": {\"type\": \"sql\", \"statement\": query}}\n action = \"query\"\n destination_info = {\n \"address\": kwargs.get(\"host\", \"localhost\"),\n \"port\": int(kwargs.get(\"port\", default_ports.get(\"postgresql\"))),\n \"service\": {\"name\": \"postgres\", \"resource\": \"postgres\", \"type\": \"db\"},\n }\n context[\"destination\"] = destination_info\n async with async_capture_span(\n name, leaf=True, span_type=\"db\", span_subtype=\"postgres\", span_action=action, extra=context\n ):\n return await wrapped(*args, **kwargs)\n", "path": "elasticapm/instrumentation/packages/asyncio/asyncpg.py"}]}
1,331
303
gh_patches_debug_23294
rasdani/github-patches
git_diff
bridgecrewio__checkov-5275
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Deprecate CKV_GCP_67 **Describe the issue** CKV_GCP_67: https://docs.bridgecrew.io/docs/ensure-legacy-compute-engine-instance-metadata-apis-are-disabled https://cloud.google.com/kubernetes-engine/docs/how-to/hardening-your-cluster#protect_node_metadata As per this article: `The v0.1 and v1beta1 Compute Engine metadata server endpoints were deprecated and shutdown on September 30, 2020.` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `checkov/terraform/checks/resource/gcp/GKELegacyInstanceMetadataDisabled.py` Content: ``` 1 from checkov.common.models.enums import CheckResult, CheckCategories 2 from checkov.common.util.type_forcers import force_float 3 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck 4 5 6 class GKELegacyInstanceMetadataDisabled(BaseResourceValueCheck): 7 8 def __init__(self): 9 name = "Ensure legacy Compute Engine instance metadata APIs are Disabled" 10 id = "CKV_GCP_67" 11 supported_resources = ['google_container_cluster'] 12 categories = [CheckCategories.KUBERNETES] 13 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) 14 15 def scan_resource_conf(self, conf): 16 """ 17 looks for min_master_version =1.12 which ensures that legacy metadata endpoints are disabled 18 https://www.terraform.io/docs/providers/google/r/compute_ssl_policy.html 19 :param conf: google_container_cluster configuration 20 :return: <CheckResult> 21 """ 22 if 'min_master_version' in conf: 23 min_master_version = force_float(conf.get('min_master_version')[0]) 24 if min_master_version and min_master_version >= 1.12: 25 return CheckResult.PASSED 26 27 return CheckResult.FAILED 28 29 def get_inspected_key(self): 30 return 'min_master_version' 31 32 def get_expected_value(self): 33 return "1.12" 34 35 36 check = GKELegacyInstanceMetadataDisabled() 37 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/checkov/terraform/checks/resource/gcp/GKELegacyInstanceMetadataDisabled.py b/checkov/terraform/checks/resource/gcp/GKELegacyInstanceMetadataDisabled.py deleted file mode 100644 --- a/checkov/terraform/checks/resource/gcp/GKELegacyInstanceMetadataDisabled.py +++ /dev/null @@ -1,36 +0,0 @@ -from checkov.common.models.enums import CheckResult, CheckCategories -from checkov.common.util.type_forcers import force_float -from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck - - -class GKELegacyInstanceMetadataDisabled(BaseResourceValueCheck): - - def __init__(self): - name = "Ensure legacy Compute Engine instance metadata APIs are Disabled" - id = "CKV_GCP_67" - supported_resources = ['google_container_cluster'] - categories = [CheckCategories.KUBERNETES] - super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) - - def scan_resource_conf(self, conf): - """ - looks for min_master_version =1.12 which ensures that legacy metadata endpoints are disabled - https://www.terraform.io/docs/providers/google/r/compute_ssl_policy.html - :param conf: google_container_cluster configuration - :return: <CheckResult> - """ - if 'min_master_version' in conf: - min_master_version = force_float(conf.get('min_master_version')[0]) - if min_master_version and min_master_version >= 1.12: - return CheckResult.PASSED - - return CheckResult.FAILED - - def get_inspected_key(self): - return 'min_master_version' - - def get_expected_value(self): - return "1.12" - - -check = GKELegacyInstanceMetadataDisabled()
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/gcp/GKELegacyInstanceMetadataDisabled.py b/checkov/terraform/checks/resource/gcp/GKELegacyInstanceMetadataDisabled.py\ndeleted file mode 100644\n--- a/checkov/terraform/checks/resource/gcp/GKELegacyInstanceMetadataDisabled.py\n+++ /dev/null\n@@ -1,36 +0,0 @@\n-from checkov.common.models.enums import CheckResult, CheckCategories\n-from checkov.common.util.type_forcers import force_float\n-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n-\n-\n-class GKELegacyInstanceMetadataDisabled(BaseResourceValueCheck):\n-\n- def __init__(self):\n- name = \"Ensure legacy Compute Engine instance metadata APIs are Disabled\"\n- id = \"CKV_GCP_67\"\n- supported_resources = ['google_container_cluster']\n- categories = [CheckCategories.KUBERNETES]\n- super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n-\n- def scan_resource_conf(self, conf):\n- \"\"\"\n- looks for min_master_version =1.12 which ensures that legacy metadata endpoints are disabled\n- https://www.terraform.io/docs/providers/google/r/compute_ssl_policy.html\n- :param conf: google_container_cluster configuration\n- :return: <CheckResult>\n- \"\"\"\n- if 'min_master_version' in conf:\n- min_master_version = force_float(conf.get('min_master_version')[0])\n- if min_master_version and min_master_version >= 1.12:\n- return CheckResult.PASSED\n-\n- return CheckResult.FAILED\n-\n- def get_inspected_key(self):\n- return 'min_master_version'\n-\n- def get_expected_value(self):\n- return \"1.12\"\n-\n-\n-check = GKELegacyInstanceMetadataDisabled()\n", "issue": "Deprecate CKV_GCP_67\n**Describe the issue**\r\n\r\nCKV_GCP_67: https://docs.bridgecrew.io/docs/ensure-legacy-compute-engine-instance-metadata-apis-are-disabled\r\n\r\nhttps://cloud.google.com/kubernetes-engine/docs/how-to/hardening-your-cluster#protect_node_metadata\r\n\r\nAs per this article: `The v0.1 and v1beta1 Compute Engine metadata server endpoints were deprecated and shutdown on September 30, 2020.`\n", "before_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.common.util.type_forcers import force_float\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass GKELegacyInstanceMetadataDisabled(BaseResourceValueCheck):\n\n def __init__(self):\n name = \"Ensure legacy Compute Engine instance metadata APIs are Disabled\"\n id = \"CKV_GCP_67\"\n supported_resources = ['google_container_cluster']\n categories = [CheckCategories.KUBERNETES]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n \"\"\"\n looks for min_master_version =1.12 which ensures that legacy metadata endpoints are disabled\n https://www.terraform.io/docs/providers/google/r/compute_ssl_policy.html\n :param conf: google_container_cluster configuration\n :return: <CheckResult>\n \"\"\"\n if 'min_master_version' in conf:\n min_master_version = force_float(conf.get('min_master_version')[0])\n if min_master_version and min_master_version >= 1.12:\n return CheckResult.PASSED\n\n return CheckResult.FAILED\n\n def get_inspected_key(self):\n return 'min_master_version'\n\n def get_expected_value(self):\n return \"1.12\"\n\n\ncheck = GKELegacyInstanceMetadataDisabled()\n", "path": "checkov/terraform/checks/resource/gcp/GKELegacyInstanceMetadataDisabled.py"}], "after_files": [{"content": null, "path": "checkov/terraform/checks/resource/gcp/GKELegacyInstanceMetadataDisabled.py"}]}
749
418
gh_patches_debug_13425
rasdani/github-patches
git_diff
python-poetry__poetry-7140
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Make config file relocation instructions more explicit - [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate. ## Issue After upgrading from `1.1` to `1.2` I received the following message: ``` Configuration file exists at /Users/xxx/Library/Application Support/pypoetry, reusing this directory. Consider moving configuration to /Users/xxx/Library/Preferences/pypoetry, as support for the legacy directory will be removed in an upcoming release. ``` Similar to #6854 I (naively) assumed (based on above message) that the entire directory was configuration related and therefore moved it from `~/Library/Application Support/` to `~/Library/Preferences`. Of course this lead to poetry no longer functioning. If an automatic move of the config file is not in the cards, at least the warning message needs to be more explicit what file(s) actually need to be moved. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/poetry/locations.py` Content: ``` 1 from __future__ import annotations 2 3 import logging 4 import os 5 import sys 6 7 from pathlib import Path 8 9 from platformdirs import user_cache_path 10 from platformdirs import user_config_path 11 from platformdirs import user_data_path 12 13 14 logger = logging.getLogger(__name__) 15 16 _APP_NAME = "pypoetry" 17 18 DEFAULT_CACHE_DIR = user_cache_path(_APP_NAME, appauthor=False) 19 CONFIG_DIR = Path( 20 os.getenv("POETRY_CONFIG_DIR") 21 or user_config_path(_APP_NAME, appauthor=False, roaming=True) 22 ) 23 24 # platformdirs 2.0.0 corrected the OSX/macOS config directory from 25 # /Users/<user>/Library/Application Support/<appname> to 26 # /Users/<user>/Library/Preferences/<appname>. 27 # 28 # For now we only deprecate use of the old directory. 29 if sys.platform == "darwin": 30 _LEGACY_CONFIG_DIR = CONFIG_DIR.parent.parent / "Application Support" / _APP_NAME 31 config_toml = _LEGACY_CONFIG_DIR / "config.toml" 32 auth_toml = _LEGACY_CONFIG_DIR / "auth.toml" 33 34 if any(file.exists() for file in (auth_toml, config_toml)): 35 logger.warning( 36 ( 37 "Configuration file exists at %s, reusing this directory.\n\nConsider" 38 " moving configuration to %s, as support for the legacy directory will" 39 " be removed in an upcoming release." 40 ), 41 _LEGACY_CONFIG_DIR, 42 CONFIG_DIR, 43 ) 44 CONFIG_DIR = _LEGACY_CONFIG_DIR 45 46 47 def data_dir() -> Path: 48 poetry_home = os.getenv("POETRY_HOME") 49 if poetry_home: 50 return Path(poetry_home).expanduser() 51 52 return user_data_path(_APP_NAME, appauthor=False, roaming=True) 53 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/poetry/locations.py b/src/poetry/locations.py --- a/src/poetry/locations.py +++ b/src/poetry/locations.py @@ -34,9 +34,12 @@ if any(file.exists() for file in (auth_toml, config_toml)): logger.warning( ( - "Configuration file exists at %s, reusing this directory.\n\nConsider" - " moving configuration to %s, as support for the legacy directory will" - " be removed in an upcoming release." + ( + "Configuration file exists at %s, reusing this" + " directory.\n\nConsider moving TOML configuration files to %s, as" + " support for the legacy directory will be removed in an upcoming" + " release." + ), ), _LEGACY_CONFIG_DIR, CONFIG_DIR,
{"golden_diff": "diff --git a/src/poetry/locations.py b/src/poetry/locations.py\n--- a/src/poetry/locations.py\n+++ b/src/poetry/locations.py\n@@ -34,9 +34,12 @@\n if any(file.exists() for file in (auth_toml, config_toml)):\n logger.warning(\n (\n- \"Configuration file exists at %s, reusing this directory.\\n\\nConsider\"\n- \" moving configuration to %s, as support for the legacy directory will\"\n- \" be removed in an upcoming release.\"\n+ (\n+ \"Configuration file exists at %s, reusing this\"\n+ \" directory.\\n\\nConsider moving TOML configuration files to %s, as\"\n+ \" support for the legacy directory will be removed in an upcoming\"\n+ \" release.\"\n+ ),\n ),\n _LEGACY_CONFIG_DIR,\n CONFIG_DIR,\n", "issue": "Make config file relocation instructions more explicit\n- [x] I have searched the [issues](https://github.com/python-poetry/poetry/issues) of this repo and believe that this is not a duplicate.\r\n\r\n## Issue\r\n\r\nAfter upgrading from `1.1` to `1.2` I received the following message:\r\n```\r\nConfiguration file exists at /Users/xxx/Library/Application Support/pypoetry, reusing this directory.\r\n\r\nConsider moving configuration to /Users/xxx/Library/Preferences/pypoetry, as support for the legacy directory will be removed in an upcoming release.\r\n```\r\n\r\nSimilar to #6854 I (naively) assumed (based on above message) that the entire directory was configuration related and therefore moved it from `~/Library/Application Support/` to `~/Library/Preferences`.\r\n\r\nOf course this lead to poetry no longer functioning.\r\n\r\nIf an automatic move of the config file is not in the cards, at least the warning message needs to be more explicit what file(s) actually need to be moved.\n", "before_files": [{"content": "from __future__ import annotations\n\nimport logging\nimport os\nimport sys\n\nfrom pathlib import Path\n\nfrom platformdirs import user_cache_path\nfrom platformdirs import user_config_path\nfrom platformdirs import user_data_path\n\n\nlogger = logging.getLogger(__name__)\n\n_APP_NAME = \"pypoetry\"\n\nDEFAULT_CACHE_DIR = user_cache_path(_APP_NAME, appauthor=False)\nCONFIG_DIR = Path(\n os.getenv(\"POETRY_CONFIG_DIR\")\n or user_config_path(_APP_NAME, appauthor=False, roaming=True)\n)\n\n# platformdirs 2.0.0 corrected the OSX/macOS config directory from\n# /Users/<user>/Library/Application Support/<appname> to\n# /Users/<user>/Library/Preferences/<appname>.\n#\n# For now we only deprecate use of the old directory.\nif sys.platform == \"darwin\":\n _LEGACY_CONFIG_DIR = CONFIG_DIR.parent.parent / \"Application Support\" / _APP_NAME\n config_toml = _LEGACY_CONFIG_DIR / \"config.toml\"\n auth_toml = _LEGACY_CONFIG_DIR / \"auth.toml\"\n\n if any(file.exists() for file in (auth_toml, config_toml)):\n logger.warning(\n (\n \"Configuration file exists at %s, reusing this directory.\\n\\nConsider\"\n \" moving configuration to %s, as support for the legacy directory will\"\n \" be removed in an upcoming release.\"\n ),\n _LEGACY_CONFIG_DIR,\n CONFIG_DIR,\n )\n CONFIG_DIR = _LEGACY_CONFIG_DIR\n\n\ndef data_dir() -> Path:\n poetry_home = os.getenv(\"POETRY_HOME\")\n if poetry_home:\n return Path(poetry_home).expanduser()\n\n return user_data_path(_APP_NAME, appauthor=False, roaming=True)\n", "path": "src/poetry/locations.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport logging\nimport os\nimport sys\n\nfrom pathlib import Path\n\nfrom platformdirs import user_cache_path\nfrom platformdirs import user_config_path\nfrom platformdirs import user_data_path\n\n\nlogger = logging.getLogger(__name__)\n\n_APP_NAME = \"pypoetry\"\n\nDEFAULT_CACHE_DIR = user_cache_path(_APP_NAME, appauthor=False)\nCONFIG_DIR = Path(\n os.getenv(\"POETRY_CONFIG_DIR\")\n or user_config_path(_APP_NAME, appauthor=False, roaming=True)\n)\n\n# platformdirs 2.0.0 corrected the OSX/macOS config directory from\n# /Users/<user>/Library/Application Support/<appname> to\n# /Users/<user>/Library/Preferences/<appname>.\n#\n# For now we only deprecate use of the old directory.\nif sys.platform == \"darwin\":\n _LEGACY_CONFIG_DIR = CONFIG_DIR.parent.parent / \"Application Support\" / _APP_NAME\n config_toml = _LEGACY_CONFIG_DIR / \"config.toml\"\n auth_toml = _LEGACY_CONFIG_DIR / \"auth.toml\"\n\n if any(file.exists() for file in (auth_toml, config_toml)):\n logger.warning(\n (\n (\n \"Configuration file exists at %s, reusing this\"\n \" directory.\\n\\nConsider moving TOML configuration files to %s, as\"\n \" support for the legacy directory will be removed in an upcoming\"\n \" release.\"\n ),\n ),\n _LEGACY_CONFIG_DIR,\n CONFIG_DIR,\n )\n CONFIG_DIR = _LEGACY_CONFIG_DIR\n\n\ndef data_dir() -> Path:\n poetry_home = os.getenv(\"POETRY_HOME\")\n if poetry_home:\n return Path(poetry_home).expanduser()\n\n return user_data_path(_APP_NAME, appauthor=False, roaming=True)\n", "path": "src/poetry/locations.py"}]}
954
197
gh_patches_debug_17079
rasdani/github-patches
git_diff
qutebrowser__qutebrowser-2953
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Use CommandParser for configmodel.bind In the `new-config` branch, there's a `CommandParser` in `commands/runners.py` which got split off from `CommandRunner` and can be used standalone. Various places doing their own command parsing got updated accordingly, but `configmodel.bind` is still doing its own parsing. An example how it's used, from `:bind`: https://github.com/qutebrowser/qutebrowser/blob/2117824cf9fdc47ea6fd9457c12cecbac117202e/qutebrowser/config/config.py#L179-L189 Split off from #2779, cc @rcorre - if you want to take a look at this, feel free to do a PR against `new-config`, or wait until that's merged and then do one against `master`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `qutebrowser/completion/models/configmodel.py` Content: ``` 1 # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: 2 3 # Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]> 4 # 5 # This file is part of qutebrowser. 6 # 7 # qutebrowser is free software: you can redistribute it and/or modify 8 # it under the terms of the GNU General Public License as published by 9 # the Free Software Foundation, either version 3 of the License, or 10 # (at your option) any later version. 11 # 12 # qutebrowser is distributed in the hope that it will be useful, 13 # but WITHOUT ANY WARRANTY; without even the implied warranty of 14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 # GNU General Public License for more details. 16 # 17 # You should have received a copy of the GNU General Public License 18 # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. 19 20 """Functions that return config-related completion models.""" 21 22 from qutebrowser.config import configdata, configexc 23 from qutebrowser.completion.models import completionmodel, listcategory, util 24 from qutebrowser.commands import cmdutils 25 26 27 def option(*, info): 28 """A CompletionModel filled with settings and their descriptions.""" 29 model = completionmodel.CompletionModel(column_widths=(20, 70, 10)) 30 options = ((opt.name, opt.description, info.config.get_str(opt.name)) 31 for opt in configdata.DATA.values()) 32 model.add_category(listcategory.ListCategory("Options", sorted(options))) 33 return model 34 35 36 def value(optname, *_values, info): 37 """A CompletionModel filled with setting values. 38 39 Args: 40 optname: The name of the config option this model shows. 41 _values: The values already provided on the command line. 42 info: A CompletionInfo instance. 43 """ 44 model = completionmodel.CompletionModel(column_widths=(30, 70, 0)) 45 46 try: 47 current = info.config.get_str(optname) or '""' 48 except configexc.NoOptionError: 49 return None 50 51 opt = info.config.get_opt(optname) 52 default = opt.typ.to_str(opt.default) 53 cur_cat = listcategory.ListCategory("Current/Default", 54 [(current, "Current value"), (default, "Default value")]) 55 model.add_category(cur_cat) 56 57 vals = opt.typ.complete() 58 if vals is not None: 59 model.add_category(listcategory.ListCategory("Completions", 60 sorted(vals))) 61 return model 62 63 64 def bind(key, *, info): 65 """A CompletionModel filled with all bindable commands and descriptions. 66 67 Args: 68 key: the key being bound. 69 """ 70 model = completionmodel.CompletionModel(column_widths=(20, 60, 20)) 71 cmd_text = info.keyconf.get_command(key, 'normal') 72 73 if cmd_text: 74 cmd_name = cmd_text.split(' ')[0] 75 cmd = cmdutils.cmd_dict.get(cmd_name) 76 data = [(cmd_text, cmd.desc, key)] 77 model.add_category(listcategory.ListCategory("Current", data)) 78 79 cmdlist = util.get_cmd_completions(info, include_hidden=True, 80 include_aliases=True) 81 model.add_category(listcategory.ListCategory("Commands", cmdlist)) 82 return model 83 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/qutebrowser/completion/models/configmodel.py b/qutebrowser/completion/models/configmodel.py --- a/qutebrowser/completion/models/configmodel.py +++ b/qutebrowser/completion/models/configmodel.py @@ -21,7 +21,7 @@ from qutebrowser.config import configdata, configexc from qutebrowser.completion.models import completionmodel, listcategory, util -from qutebrowser.commands import cmdutils +from qutebrowser.commands import runners def option(*, info): @@ -71,8 +71,8 @@ cmd_text = info.keyconf.get_command(key, 'normal') if cmd_text: - cmd_name = cmd_text.split(' ')[0] - cmd = cmdutils.cmd_dict.get(cmd_name) + parser = runners.CommandParser() + cmd = parser.parse(cmd_text).cmd data = [(cmd_text, cmd.desc, key)] model.add_category(listcategory.ListCategory("Current", data))
{"golden_diff": "diff --git a/qutebrowser/completion/models/configmodel.py b/qutebrowser/completion/models/configmodel.py\n--- a/qutebrowser/completion/models/configmodel.py\n+++ b/qutebrowser/completion/models/configmodel.py\n@@ -21,7 +21,7 @@\n \n from qutebrowser.config import configdata, configexc\n from qutebrowser.completion.models import completionmodel, listcategory, util\n-from qutebrowser.commands import cmdutils\n+from qutebrowser.commands import runners\n \n \n def option(*, info):\n@@ -71,8 +71,8 @@\n cmd_text = info.keyconf.get_command(key, 'normal')\n \n if cmd_text:\n- cmd_name = cmd_text.split(' ')[0]\n- cmd = cmdutils.cmd_dict.get(cmd_name)\n+ parser = runners.CommandParser()\n+ cmd = parser.parse(cmd_text).cmd\n data = [(cmd_text, cmd.desc, key)]\n model.add_category(listcategory.ListCategory(\"Current\", data))\n", "issue": "Use CommandParser for configmodel.bind\nIn the `new-config` branch, there's a `CommandParser` in `commands/runners.py` which got split off from `CommandRunner` and can be used standalone. Various places doing their own command parsing got updated accordingly, but `configmodel.bind` is still doing its own parsing. An example how it's used, from `:bind`:\r\n\r\nhttps://github.com/qutebrowser/qutebrowser/blob/2117824cf9fdc47ea6fd9457c12cecbac117202e/qutebrowser/config/config.py#L179-L189\r\n\r\nSplit off from #2779, cc @rcorre - if you want to take a look at this, feel free to do a PR against `new-config`, or wait until that's merged and then do one against `master`.\n", "before_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Functions that return config-related completion models.\"\"\"\n\nfrom qutebrowser.config import configdata, configexc\nfrom qutebrowser.completion.models import completionmodel, listcategory, util\nfrom qutebrowser.commands import cmdutils\n\n\ndef option(*, info):\n \"\"\"A CompletionModel filled with settings and their descriptions.\"\"\"\n model = completionmodel.CompletionModel(column_widths=(20, 70, 10))\n options = ((opt.name, opt.description, info.config.get_str(opt.name))\n for opt in configdata.DATA.values())\n model.add_category(listcategory.ListCategory(\"Options\", sorted(options)))\n return model\n\n\ndef value(optname, *_values, info):\n \"\"\"A CompletionModel filled with setting values.\n\n Args:\n optname: The name of the config option this model shows.\n _values: The values already provided on the command line.\n info: A CompletionInfo instance.\n \"\"\"\n model = completionmodel.CompletionModel(column_widths=(30, 70, 0))\n\n try:\n current = info.config.get_str(optname) or '\"\"'\n except configexc.NoOptionError:\n return None\n\n opt = info.config.get_opt(optname)\n default = opt.typ.to_str(opt.default)\n cur_cat = listcategory.ListCategory(\"Current/Default\",\n [(current, \"Current value\"), (default, \"Default value\")])\n model.add_category(cur_cat)\n\n vals = opt.typ.complete()\n if vals is not None:\n model.add_category(listcategory.ListCategory(\"Completions\",\n sorted(vals)))\n return model\n\n\ndef bind(key, *, info):\n \"\"\"A CompletionModel filled with all bindable commands and descriptions.\n\n Args:\n key: the key being bound.\n \"\"\"\n model = completionmodel.CompletionModel(column_widths=(20, 60, 20))\n cmd_text = info.keyconf.get_command(key, 'normal')\n\n if cmd_text:\n cmd_name = cmd_text.split(' ')[0]\n cmd = cmdutils.cmd_dict.get(cmd_name)\n data = [(cmd_text, cmd.desc, key)]\n model.add_category(listcategory.ListCategory(\"Current\", data))\n\n cmdlist = util.get_cmd_completions(info, include_hidden=True,\n include_aliases=True)\n model.add_category(listcategory.ListCategory(\"Commands\", cmdlist))\n return model\n", "path": "qutebrowser/completion/models/configmodel.py"}], "after_files": [{"content": "# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:\n\n# Copyright 2014-2017 Florian Bruhin (The Compiler) <[email protected]>\n#\n# This file is part of qutebrowser.\n#\n# qutebrowser is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# qutebrowser is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Functions that return config-related completion models.\"\"\"\n\nfrom qutebrowser.config import configdata, configexc\nfrom qutebrowser.completion.models import completionmodel, listcategory, util\nfrom qutebrowser.commands import runners\n\n\ndef option(*, info):\n \"\"\"A CompletionModel filled with settings and their descriptions.\"\"\"\n model = completionmodel.CompletionModel(column_widths=(20, 70, 10))\n options = ((opt.name, opt.description, info.config.get_str(opt.name))\n for opt in configdata.DATA.values())\n model.add_category(listcategory.ListCategory(\"Options\", sorted(options)))\n return model\n\n\ndef value(optname, *_values, info):\n \"\"\"A CompletionModel filled with setting values.\n\n Args:\n optname: The name of the config option this model shows.\n _values: The values already provided on the command line.\n info: A CompletionInfo instance.\n \"\"\"\n model = completionmodel.CompletionModel(column_widths=(30, 70, 0))\n\n try:\n current = info.config.get_str(optname) or '\"\"'\n except configexc.NoOptionError:\n return None\n\n opt = info.config.get_opt(optname)\n default = opt.typ.to_str(opt.default)\n cur_cat = listcategory.ListCategory(\"Current/Default\",\n [(current, \"Current value\"), (default, \"Default value\")])\n model.add_category(cur_cat)\n\n vals = opt.typ.complete()\n if vals is not None:\n model.add_category(listcategory.ListCategory(\"Completions\",\n sorted(vals)))\n return model\n\n\ndef bind(key, *, info):\n \"\"\"A CompletionModel filled with all bindable commands and descriptions.\n\n Args:\n key: the key being bound.\n \"\"\"\n model = completionmodel.CompletionModel(column_widths=(20, 60, 20))\n cmd_text = info.keyconf.get_command(key, 'normal')\n\n if cmd_text:\n parser = runners.CommandParser()\n cmd = parser.parse(cmd_text).cmd\n data = [(cmd_text, cmd.desc, key)]\n model.add_category(listcategory.ListCategory(\"Current\", data))\n\n cmdlist = util.get_cmd_completions(info, include_hidden=True,\n include_aliases=True)\n model.add_category(listcategory.ListCategory(\"Commands\", cmdlist))\n return model\n", "path": "qutebrowser/completion/models/configmodel.py"}]}
1,323
214
gh_patches_debug_198
rasdani/github-patches
git_diff
aws-cloudformation__cfn-lint-2900
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- E1017 Select does not find already supported function when using complex list with nested Selects ### CloudFormation Lint Version cfn-lint 0.80.4 ### What operating system are you using? Mac ### Describe the bug When launching a template with complex nested Selects and list to extract value from, it seems to be reporting E1017 while it should not. Templates are correctly deployed and work fine on my side. Output from command ``` E1017 Select should use a supported function of Fn::FindInMap, Fn::GetAtt, Fn::GetAZs, Fn::If, Fn::Split, Fn::Cidr, Ref /file1.yml:3189:11 ``` ### Expected behavior No E1017 reported by cfn-lint. Template is working fine in Cloudformation, E1017 should not be reported. ### Reproduction template AWSTemplateFormatVersion: '2010-09-09' Description: 'Build EC2 instance' Resources: MountTarget1: Type: AWS::EFS::MountTarget Properties: FileSystemId: fs-1234567svsdabsf76s # E1017 STARTS HERE SubnetId: !Select - 0 - !Select - 0 - [ [ "subnet-0987sknlnsdoi9j76", "subnet-875jgyjlpzj75j8k0", "subnet-5447hnd6hI8js45js" ], [ "subnet-0987sknlnsdoi9j76", "subnet-875jgyjlpzj75j8k0", "subnet-5447hnd6hI8js45js" ], [ "subnet-0987sknlnsdoi9j76", "subnet-875jgyjlpzj75j8k0", "subnet-5447hnd6hI8js45js" ] ] SecurityGroups: [sg-00qdqeef0a5c345gf] --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/cfnlint/rules/functions/Select.py` Content: ``` 1 """ 2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 SPDX-License-Identifier: MIT-0 4 """ 5 from cfnlint.rules import CloudFormationLintRule, RuleMatch 6 7 8 class Select(CloudFormationLintRule): 9 """Check if Select values are correct""" 10 11 id = "E1017" 12 shortdesc = "Select validation of parameters" 13 description = "Making sure the Select function is properly configured" 14 source_url = "https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-select.html" 15 tags = ["functions", "select"] 16 supported_functions = [ 17 "Fn::FindInMap", 18 "Fn::GetAtt", 19 "Fn::GetAZs", 20 "Fn::If", 21 "Fn::Split", 22 "Fn::Cidr", 23 "Ref", 24 ] 25 26 def _test_index_obj(self, index_obj, path): 27 matches = [] 28 if isinstance(index_obj, dict): 29 if len(index_obj) == 1: 30 for index_key, _ in index_obj.items(): 31 if index_key not in [ 32 "Ref", 33 "Fn::FindInMap", 34 "Fn::Select", 35 ]: 36 message = "Select index should be an Integer or a function Ref, Fn::FindInMap, or Fn::Select for {0}" 37 matches.append( 38 RuleMatch( 39 path, 40 message.format("/".join(map(str, path))), 41 ) 42 ) 43 else: 44 message = "Select index should be an Integer or a function Ref, Fn::FindInMap, or Fn::Select for {0}" 45 matches.append( 46 RuleMatch( 47 path, 48 message.format("/".join(map(str, path))), 49 ) 50 ) 51 elif not isinstance(index_obj, int): 52 try: 53 int(index_obj) 54 except (ValueError, TypeError): 55 message = "Select index should be an Integer or a function of Ref, Fn::FindInMap, or Fn::Select for {0}" 56 matches.append( 57 RuleMatch(path, message.format("/".join(map(str, path)))) 58 ) 59 60 return matches 61 62 def _test_list_obj(self, list_obj, path): 63 matches = [] 64 if isinstance(list_obj, dict): 65 if len(list_obj) == 1: 66 for key, _ in list_obj.items(): 67 if key not in self.supported_functions: 68 message = "Select should use a supported function of {0}" 69 matches.append( 70 RuleMatch( 71 path, 72 message.format( 73 ", ".join(map(str, self.supported_functions)) 74 ), 75 ) 76 ) 77 else: 78 message = "Select should use a supported function of {0}" 79 matches.append( 80 RuleMatch( 81 path, 82 message.format(", ".join(map(str, self.supported_functions))), 83 ) 84 ) 85 elif not isinstance(list_obj, list): 86 message = "Select should be an array of values for {0}" 87 matches.append(RuleMatch(path, message.format("/".join(map(str, path))))) 88 89 return matches 90 91 def _test_select_obj(self, select_obj, path): 92 matches = [] 93 if not isinstance(select_obj, list): 94 message = "Select should be a list of 2 elements for {0}" 95 matches.append(RuleMatch(path, message.format("/".join(map(str, path))))) 96 return matches 97 if len(select_obj) != 2: 98 message = "Select should be a list of 2 elements for {0}" 99 matches.append(RuleMatch(path, message.format("/".join(map(str, path))))) 100 return matches 101 102 index_obj = select_obj[0] 103 list_of_objs = select_obj[1] 104 matches.extend(self._test_index_obj(index_obj, path[:] + [0])) 105 matches.extend(self._test_list_obj(list_of_objs, path[:] + [1])) 106 107 return matches 108 109 def match(self, cfn): 110 matches = [] 111 112 select_objs = cfn.search_deep_keys("Fn::Select") 113 114 for select_obj in select_objs: 115 select_value_obj = select_obj[-1] 116 tree = select_obj[:-1] 117 matches.extend(self._test_select_obj(select_value_obj, tree[:])) 118 119 return matches 120 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/cfnlint/rules/functions/Select.py b/src/cfnlint/rules/functions/Select.py --- a/src/cfnlint/rules/functions/Select.py +++ b/src/cfnlint/rules/functions/Select.py @@ -20,6 +20,7 @@ "Fn::If", "Fn::Split", "Fn::Cidr", + "Fn::Select", # issue: 2895 "Ref", ]
{"golden_diff": "diff --git a/src/cfnlint/rules/functions/Select.py b/src/cfnlint/rules/functions/Select.py\n--- a/src/cfnlint/rules/functions/Select.py\n+++ b/src/cfnlint/rules/functions/Select.py\n@@ -20,6 +20,7 @@\n \"Fn::If\",\n \"Fn::Split\",\n \"Fn::Cidr\",\n+ \"Fn::Select\", # issue: 2895\n \"Ref\",\n ]\n", "issue": "E1017 Select does not find already supported function when using complex list with nested Selects\n### CloudFormation Lint Version\n\ncfn-lint 0.80.4\n\n### What operating system are you using?\n\nMac\n\n### Describe the bug\n\nWhen launching a template with complex nested Selects and list to extract value from, it seems to be reporting E1017 while it should not. Templates are correctly deployed and work fine on my side.\r\n\r\nOutput from command\r\n```\r\nE1017 Select should use a supported function of Fn::FindInMap, Fn::GetAtt, Fn::GetAZs, Fn::If, Fn::Split, Fn::Cidr, Ref\r\n/file1.yml:3189:11\r\n```\n\n### Expected behavior\n\nNo E1017 reported by cfn-lint.\r\nTemplate is working fine in Cloudformation, E1017 should not be reported.\n\n### Reproduction template\n\nAWSTemplateFormatVersion: '2010-09-09'\r\nDescription: 'Build EC2 instance'\r\n\r\nResources:\r\n MountTarget1:\r\n Type: AWS::EFS::MountTarget\r\n Properties:\r\n FileSystemId: fs-1234567svsdabsf76s\r\n# E1017 STARTS HERE\r\n SubnetId: !Select\r\n - 0\r\n - !Select\r\n - 0\r\n - [\r\n [\r\n \"subnet-0987sknlnsdoi9j76\",\r\n \"subnet-875jgyjlpzj75j8k0\",\r\n \"subnet-5447hnd6hI8js45js\"\r\n ],\r\n [\r\n \"subnet-0987sknlnsdoi9j76\",\r\n \"subnet-875jgyjlpzj75j8k0\",\r\n \"subnet-5447hnd6hI8js45js\"\r\n ],\r\n [\r\n \"subnet-0987sknlnsdoi9j76\",\r\n \"subnet-875jgyjlpzj75j8k0\",\r\n \"subnet-5447hnd6hI8js45js\"\r\n ] \r\n ]\r\n SecurityGroups: [sg-00qdqeef0a5c345gf]\r\n\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\n\n\nclass Select(CloudFormationLintRule):\n \"\"\"Check if Select values are correct\"\"\"\n\n id = \"E1017\"\n shortdesc = \"Select validation of parameters\"\n description = \"Making sure the Select function is properly configured\"\n source_url = \"https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-select.html\"\n tags = [\"functions\", \"select\"]\n supported_functions = [\n \"Fn::FindInMap\",\n \"Fn::GetAtt\",\n \"Fn::GetAZs\",\n \"Fn::If\",\n \"Fn::Split\",\n \"Fn::Cidr\",\n \"Ref\",\n ]\n\n def _test_index_obj(self, index_obj, path):\n matches = []\n if isinstance(index_obj, dict):\n if len(index_obj) == 1:\n for index_key, _ in index_obj.items():\n if index_key not in [\n \"Ref\",\n \"Fn::FindInMap\",\n \"Fn::Select\",\n ]:\n message = \"Select index should be an Integer or a function Ref, Fn::FindInMap, or Fn::Select for {0}\"\n matches.append(\n RuleMatch(\n path,\n message.format(\"/\".join(map(str, path))),\n )\n )\n else:\n message = \"Select index should be an Integer or a function Ref, Fn::FindInMap, or Fn::Select for {0}\"\n matches.append(\n RuleMatch(\n path,\n message.format(\"/\".join(map(str, path))),\n )\n )\n elif not isinstance(index_obj, int):\n try:\n int(index_obj)\n except (ValueError, TypeError):\n message = \"Select index should be an Integer or a function of Ref, Fn::FindInMap, or Fn::Select for {0}\"\n matches.append(\n RuleMatch(path, message.format(\"/\".join(map(str, path))))\n )\n\n return matches\n\n def _test_list_obj(self, list_obj, path):\n matches = []\n if isinstance(list_obj, dict):\n if len(list_obj) == 1:\n for key, _ in list_obj.items():\n if key not in self.supported_functions:\n message = \"Select should use a supported function of {0}\"\n matches.append(\n RuleMatch(\n path,\n message.format(\n \", \".join(map(str, self.supported_functions))\n ),\n )\n )\n else:\n message = \"Select should use a supported function of {0}\"\n matches.append(\n RuleMatch(\n path,\n message.format(\", \".join(map(str, self.supported_functions))),\n )\n )\n elif not isinstance(list_obj, list):\n message = \"Select should be an array of values for {0}\"\n matches.append(RuleMatch(path, message.format(\"/\".join(map(str, path)))))\n\n return matches\n\n def _test_select_obj(self, select_obj, path):\n matches = []\n if not isinstance(select_obj, list):\n message = \"Select should be a list of 2 elements for {0}\"\n matches.append(RuleMatch(path, message.format(\"/\".join(map(str, path)))))\n return matches\n if len(select_obj) != 2:\n message = \"Select should be a list of 2 elements for {0}\"\n matches.append(RuleMatch(path, message.format(\"/\".join(map(str, path)))))\n return matches\n\n index_obj = select_obj[0]\n list_of_objs = select_obj[1]\n matches.extend(self._test_index_obj(index_obj, path[:] + [0]))\n matches.extend(self._test_list_obj(list_of_objs, path[:] + [1]))\n\n return matches\n\n def match(self, cfn):\n matches = []\n\n select_objs = cfn.search_deep_keys(\"Fn::Select\")\n\n for select_obj in select_objs:\n select_value_obj = select_obj[-1]\n tree = select_obj[:-1]\n matches.extend(self._test_select_obj(select_value_obj, tree[:]))\n\n return matches\n", "path": "src/cfnlint/rules/functions/Select.py"}], "after_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nfrom cfnlint.rules import CloudFormationLintRule, RuleMatch\n\n\nclass Select(CloudFormationLintRule):\n \"\"\"Check if Select values are correct\"\"\"\n\n id = \"E1017\"\n shortdesc = \"Select validation of parameters\"\n description = \"Making sure the Select function is properly configured\"\n source_url = \"https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-select.html\"\n tags = [\"functions\", \"select\"]\n supported_functions = [\n \"Fn::FindInMap\",\n \"Fn::GetAtt\",\n \"Fn::GetAZs\",\n \"Fn::If\",\n \"Fn::Split\",\n \"Fn::Cidr\",\n \"Fn::Select\", # issue: 2895\n \"Ref\",\n ]\n\n def _test_index_obj(self, index_obj, path):\n matches = []\n if isinstance(index_obj, dict):\n if len(index_obj) == 1:\n for index_key, _ in index_obj.items():\n if index_key not in [\n \"Ref\",\n \"Fn::FindInMap\",\n \"Fn::Select\",\n ]:\n message = \"Select index should be an Integer or a function Ref, Fn::FindInMap, or Fn::Select for {0}\"\n matches.append(\n RuleMatch(\n path,\n message.format(\"/\".join(map(str, path))),\n )\n )\n else:\n message = \"Select index should be an Integer or a function Ref, Fn::FindInMap, or Fn::Select for {0}\"\n matches.append(\n RuleMatch(\n path,\n message.format(\"/\".join(map(str, path))),\n )\n )\n elif not isinstance(index_obj, int):\n try:\n int(index_obj)\n except (ValueError, TypeError):\n message = \"Select index should be an Integer or a function of Ref, Fn::FindInMap, or Fn::Select for {0}\"\n matches.append(\n RuleMatch(path, message.format(\"/\".join(map(str, path))))\n )\n\n return matches\n\n def _test_list_obj(self, list_obj, path):\n matches = []\n if isinstance(list_obj, dict):\n if len(list_obj) == 1:\n for key, _ in list_obj.items():\n if key not in self.supported_functions:\n message = \"Select should use a supported function of {0}\"\n matches.append(\n RuleMatch(\n path,\n message.format(\n \", \".join(map(str, self.supported_functions))\n ),\n )\n )\n else:\n message = \"Select should use a supported function of {0}\"\n matches.append(\n RuleMatch(\n path,\n message.format(\", \".join(map(str, self.supported_functions))),\n )\n )\n elif not isinstance(list_obj, list):\n message = \"Select should be an array of values for {0}\"\n matches.append(RuleMatch(path, message.format(\"/\".join(map(str, path)))))\n\n return matches\n\n def _test_select_obj(self, select_obj, path):\n matches = []\n if not isinstance(select_obj, list):\n message = \"Select should be a list of 2 elements for {0}\"\n matches.append(RuleMatch(path, message.format(\"/\".join(map(str, path)))))\n return matches\n if len(select_obj) != 2:\n message = \"Select should be a list of 2 elements for {0}\"\n matches.append(RuleMatch(path, message.format(\"/\".join(map(str, path)))))\n return matches\n\n index_obj = select_obj[0]\n list_of_objs = select_obj[1]\n matches.extend(self._test_index_obj(index_obj, path[:] + [0]))\n matches.extend(self._test_list_obj(list_of_objs, path[:] + [1]))\n\n return matches\n\n def match(self, cfn):\n matches = []\n\n select_objs = cfn.search_deep_keys(\"Fn::Select\")\n\n for select_obj in select_objs:\n select_value_obj = select_obj[-1]\n tree = select_obj[:-1]\n matches.extend(self._test_select_obj(select_value_obj, tree[:]))\n\n return matches\n", "path": "src/cfnlint/rules/functions/Select.py"}]}
1,924
103
gh_patches_debug_37807
rasdani/github-patches
git_diff
getsentry__sentry-55943
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add subscription for team in `models/groupsubscription` Two updates we need to make here: 1) Update `subscribe_actor` (https://github.com/getsentry/sentry/blob/master/src/sentry/models/groupsubscription.py#L67) to no longer read all the team's users and just set subscribe the Team 2) Update `bulk_subscribe` to allow for bulk subscribing teams, instead of just users --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/sentry/models/groupsubscription.py` Content: ``` 1 from __future__ import annotations 2 3 from typing import TYPE_CHECKING, Iterable, Optional, Sequence, Union 4 5 from django.conf import settings 6 from django.db import IntegrityError, models, router, transaction 7 from django.utils import timezone 8 9 from sentry.backup.scopes import RelocationScope 10 from sentry.db.models import ( 11 BaseManager, 12 BoundedPositiveIntegerField, 13 FlexibleForeignKey, 14 Model, 15 region_silo_only_model, 16 sane_repr, 17 ) 18 from sentry.db.models.fields.hybrid_cloud_foreign_key import HybridCloudForeignKey 19 from sentry.notifications.helpers import ( 20 transform_to_notification_settings_by_recipient, 21 where_should_be_participating, 22 ) 23 from sentry.notifications.types import GroupSubscriptionReason, NotificationSettingTypes 24 from sentry.services.hybrid_cloud.actor import RpcActor 25 from sentry.services.hybrid_cloud.notifications import notifications_service 26 from sentry.services.hybrid_cloud.user import RpcUser 27 28 if TYPE_CHECKING: 29 from sentry.models import Group, Team, User 30 from sentry.notifications.utils.participants import ParticipantMap 31 32 33 class GroupSubscriptionManager(BaseManager): 34 def subscribe( 35 self, 36 group: Group, 37 subscriber: User | RpcUser | Team, 38 reason: int = GroupSubscriptionReason.unknown, 39 ) -> bool: 40 """ 41 Subscribe a user or team to an issue, but only if that user or team has not explicitly 42 unsubscribed. 43 """ 44 from sentry.models import Team, User 45 46 try: 47 with transaction.atomic(router.db_for_write(GroupSubscription)): 48 if isinstance(subscriber, (User, RpcUser)): 49 self.create( 50 user_id=subscriber.id, 51 group=group, 52 project=group.project, 53 is_active=True, 54 reason=reason, 55 ) 56 elif isinstance(subscriber, Team): 57 self.create( 58 team=subscriber, 59 group=group, 60 project=group.project, 61 is_active=True, 62 reason=reason, 63 ) 64 except IntegrityError: 65 pass 66 return True 67 68 def subscribe_actor( 69 self, 70 group: Group, 71 actor: Union[Team, User, RpcUser], 72 reason: int = GroupSubscriptionReason.unknown, 73 ) -> Optional[bool]: 74 from sentry import features 75 from sentry.models import Team, User 76 77 if isinstance(actor, (RpcUser, User)): 78 return self.subscribe(group, actor, reason) 79 if isinstance(actor, Team): 80 if features.has("organizations:team-workflow-notifications", group.organization): 81 return self.subscribe(group, actor, reason) 82 else: 83 # subscribe the members of the team 84 team_users_ids = list(actor.member_set.values_list("user_id", flat=True)) 85 return self.bulk_subscribe(group, team_users_ids, reason) 86 87 raise NotImplementedError("Unknown actor type: %r" % type(actor)) 88 89 def bulk_subscribe( 90 self, 91 group: Group, 92 user_ids: Iterable[int], 93 reason: int = GroupSubscriptionReason.unknown, 94 ) -> bool: 95 """ 96 Subscribe a list of user ids to an issue, but only if the users are not explicitly 97 unsubscribed. 98 """ 99 # Unique the IDs. 100 user_ids = set(user_ids) 101 102 # 5 retries for race conditions where 103 # concurrent subscription attempts cause integrity errors 104 for i in range(4, -1, -1): # 4 3 2 1 0 105 106 existing_subscriptions = set( 107 GroupSubscription.objects.filter( 108 user_id__in=user_ids, group=group, project=group.project 109 ).values_list("user_id", flat=True) 110 ) 111 112 subscriptions = [ 113 GroupSubscription( 114 user_id=user_id, 115 group=group, 116 project=group.project, 117 is_active=True, 118 reason=reason, 119 ) 120 for user_id in user_ids 121 if user_id not in existing_subscriptions 122 ] 123 124 try: 125 with transaction.atomic(router.db_for_write(GroupSubscription)): 126 self.bulk_create(subscriptions) 127 return True 128 except IntegrityError as e: 129 if i == 0: 130 raise e 131 return False 132 133 def get_participants(self, group: Group) -> ParticipantMap: 134 """ 135 Identify all users who are participating with a given issue. 136 :param group: Group object 137 """ 138 from sentry.notifications.utils.participants import ParticipantMap 139 140 all_possible_users = RpcActor.many_from_object(group.project.get_members_as_rpc_users()) 141 active_and_disabled_subscriptions = self.filter( 142 group=group, user_id__in=[u.id for u in all_possible_users] 143 ) 144 145 notification_settings = notifications_service.get_settings_for_recipient_by_parent( 146 type=NotificationSettingTypes.WORKFLOW, 147 recipients=all_possible_users, 148 parent_id=group.project_id, 149 ) 150 subscriptions_by_user_id = { 151 subscription.user_id: subscription for subscription in active_and_disabled_subscriptions 152 } 153 notification_settings_by_recipient = transform_to_notification_settings_by_recipient( 154 notification_settings, all_possible_users 155 ) 156 157 result = ParticipantMap() 158 for user in all_possible_users: 159 subscription_option = subscriptions_by_user_id.get(user.id) 160 providers = where_should_be_participating( 161 user, 162 subscription_option, 163 notification_settings_by_recipient, 164 ) 165 for provider in providers: 166 reason = ( 167 subscription_option 168 and subscription_option.reason 169 or GroupSubscriptionReason.implicit 170 ) 171 result.add(provider, user, reason) 172 173 return result 174 175 @staticmethod 176 def get_participating_user_ids(group: Group) -> Sequence[int]: 177 """Return the list of user ids participating in this issue.""" 178 179 return list( 180 GroupSubscription.objects.filter(group=group, is_active=True).values_list( 181 "user_id", flat=True 182 ) 183 ) 184 185 186 @region_silo_only_model 187 class GroupSubscription(Model): 188 """ 189 Identifies a subscription relationship between a user and an issue. 190 """ 191 192 __relocation_scope__ = RelocationScope.Excluded 193 194 project = FlexibleForeignKey("sentry.Project", related_name="subscription_set") 195 group = FlexibleForeignKey("sentry.Group", related_name="subscription_set") 196 user_id = HybridCloudForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete="CASCADE") 197 team = FlexibleForeignKey("sentry.Team", null=True, db_index=True, on_delete=models.CASCADE) 198 is_active = models.BooleanField(default=True) 199 reason = BoundedPositiveIntegerField(default=GroupSubscriptionReason.unknown) 200 date_added = models.DateTimeField(default=timezone.now, null=True) 201 202 objects = GroupSubscriptionManager() 203 204 class Meta: 205 app_label = "sentry" 206 db_table = "sentry_groupsubscription" 207 unique_together = (("group", "user_id"), ("group", "team")) 208 constraints = [ 209 models.CheckConstraint( 210 check=models.Q(team_id__isnull=False, user_id__isnull=True) 211 | models.Q(team_id__isnull=True, user_id__isnull=False), 212 name="subscription_team_or_user_check", 213 ) 214 ] 215 216 __repr__ = sane_repr("project_id", "group_id", "user_id") 217 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/sentry/models/groupsubscription.py b/src/sentry/models/groupsubscription.py --- a/src/sentry/models/groupsubscription.py +++ b/src/sentry/models/groupsubscription.py @@ -82,22 +82,28 @@ else: # subscribe the members of the team team_users_ids = list(actor.member_set.values_list("user_id", flat=True)) - return self.bulk_subscribe(group, team_users_ids, reason) + return self.bulk_subscribe(group=group, user_ids=team_users_ids, reason=reason) raise NotImplementedError("Unknown actor type: %r" % type(actor)) def bulk_subscribe( self, group: Group, - user_ids: Iterable[int], + user_ids: Iterable[int] | None = None, + team_ids: Iterable[int] | None = None, reason: int = GroupSubscriptionReason.unknown, ) -> bool: """ - Subscribe a list of user ids to an issue, but only if the users are not explicitly + Subscribe a list of user ids and/or teams to an issue, but only if the users/teams are not explicitly unsubscribed. """ + from sentry import features + # Unique the IDs. - user_ids = set(user_ids) + user_ids = set(user_ids) if user_ids else set() + + # Unique the teams. + team_ids = set(team_ids) if team_ids else set() # 5 retries for race conditions where # concurrent subscription attempts cause integrity errors @@ -117,10 +123,29 @@ is_active=True, reason=reason, ) - for user_id in user_ids - if user_id not in existing_subscriptions + for user_id in user_ids.difference(existing_subscriptions) ] + if features.has("organizations:team-workflow-notifications", group.organization): + existing_team_subscriptions = set( + GroupSubscription.objects.filter( + team_id__in=team_ids, group=group, project=group.project + ).values_list("team_id", flat=True) + ) + + subscriptions.extend( + [ + GroupSubscription( + team_id=team_id, + group=group, + project=group.project, + is_active=True, + reason=reason, + ) + for team_id in team_ids.difference(existing_team_subscriptions) + ] + ) + try: with transaction.atomic(router.db_for_write(GroupSubscription)): self.bulk_create(subscriptions)
{"golden_diff": "diff --git a/src/sentry/models/groupsubscription.py b/src/sentry/models/groupsubscription.py\n--- a/src/sentry/models/groupsubscription.py\n+++ b/src/sentry/models/groupsubscription.py\n@@ -82,22 +82,28 @@\n else:\n # subscribe the members of the team\n team_users_ids = list(actor.member_set.values_list(\"user_id\", flat=True))\n- return self.bulk_subscribe(group, team_users_ids, reason)\n+ return self.bulk_subscribe(group=group, user_ids=team_users_ids, reason=reason)\n \n raise NotImplementedError(\"Unknown actor type: %r\" % type(actor))\n \n def bulk_subscribe(\n self,\n group: Group,\n- user_ids: Iterable[int],\n+ user_ids: Iterable[int] | None = None,\n+ team_ids: Iterable[int] | None = None,\n reason: int = GroupSubscriptionReason.unknown,\n ) -> bool:\n \"\"\"\n- Subscribe a list of user ids to an issue, but only if the users are not explicitly\n+ Subscribe a list of user ids and/or teams to an issue, but only if the users/teams are not explicitly\n unsubscribed.\n \"\"\"\n+ from sentry import features\n+\n # Unique the IDs.\n- user_ids = set(user_ids)\n+ user_ids = set(user_ids) if user_ids else set()\n+\n+ # Unique the teams.\n+ team_ids = set(team_ids) if team_ids else set()\n \n # 5 retries for race conditions where\n # concurrent subscription attempts cause integrity errors\n@@ -117,10 +123,29 @@\n is_active=True,\n reason=reason,\n )\n- for user_id in user_ids\n- if user_id not in existing_subscriptions\n+ for user_id in user_ids.difference(existing_subscriptions)\n ]\n \n+ if features.has(\"organizations:team-workflow-notifications\", group.organization):\n+ existing_team_subscriptions = set(\n+ GroupSubscription.objects.filter(\n+ team_id__in=team_ids, group=group, project=group.project\n+ ).values_list(\"team_id\", flat=True)\n+ )\n+\n+ subscriptions.extend(\n+ [\n+ GroupSubscription(\n+ team_id=team_id,\n+ group=group,\n+ project=group.project,\n+ is_active=True,\n+ reason=reason,\n+ )\n+ for team_id in team_ids.difference(existing_team_subscriptions)\n+ ]\n+ )\n+\n try:\n with transaction.atomic(router.db_for_write(GroupSubscription)):\n self.bulk_create(subscriptions)\n", "issue": "Add subscription for team in `models/groupsubscription`\nTwo updates we need to make here:\r\n1) Update `subscribe_actor` (https://github.com/getsentry/sentry/blob/master/src/sentry/models/groupsubscription.py#L67) to no longer read all the team's users and just set subscribe the Team\r\n2) Update `bulk_subscribe` to allow for bulk subscribing teams, instead of just users\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Iterable, Optional, Sequence, Union\n\nfrom django.conf import settings\nfrom django.db import IntegrityError, models, router, transaction\nfrom django.utils import timezone\n\nfrom sentry.backup.scopes import RelocationScope\nfrom sentry.db.models import (\n BaseManager,\n BoundedPositiveIntegerField,\n FlexibleForeignKey,\n Model,\n region_silo_only_model,\n sane_repr,\n)\nfrom sentry.db.models.fields.hybrid_cloud_foreign_key import HybridCloudForeignKey\nfrom sentry.notifications.helpers import (\n transform_to_notification_settings_by_recipient,\n where_should_be_participating,\n)\nfrom sentry.notifications.types import GroupSubscriptionReason, NotificationSettingTypes\nfrom sentry.services.hybrid_cloud.actor import RpcActor\nfrom sentry.services.hybrid_cloud.notifications import notifications_service\nfrom sentry.services.hybrid_cloud.user import RpcUser\n\nif TYPE_CHECKING:\n from sentry.models import Group, Team, User\n from sentry.notifications.utils.participants import ParticipantMap\n\n\nclass GroupSubscriptionManager(BaseManager):\n def subscribe(\n self,\n group: Group,\n subscriber: User | RpcUser | Team,\n reason: int = GroupSubscriptionReason.unknown,\n ) -> bool:\n \"\"\"\n Subscribe a user or team to an issue, but only if that user or team has not explicitly\n unsubscribed.\n \"\"\"\n from sentry.models import Team, User\n\n try:\n with transaction.atomic(router.db_for_write(GroupSubscription)):\n if isinstance(subscriber, (User, RpcUser)):\n self.create(\n user_id=subscriber.id,\n group=group,\n project=group.project,\n is_active=True,\n reason=reason,\n )\n elif isinstance(subscriber, Team):\n self.create(\n team=subscriber,\n group=group,\n project=group.project,\n is_active=True,\n reason=reason,\n )\n except IntegrityError:\n pass\n return True\n\n def subscribe_actor(\n self,\n group: Group,\n actor: Union[Team, User, RpcUser],\n reason: int = GroupSubscriptionReason.unknown,\n ) -> Optional[bool]:\n from sentry import features\n from sentry.models import Team, User\n\n if isinstance(actor, (RpcUser, User)):\n return self.subscribe(group, actor, reason)\n if isinstance(actor, Team):\n if features.has(\"organizations:team-workflow-notifications\", group.organization):\n return self.subscribe(group, actor, reason)\n else:\n # subscribe the members of the team\n team_users_ids = list(actor.member_set.values_list(\"user_id\", flat=True))\n return self.bulk_subscribe(group, team_users_ids, reason)\n\n raise NotImplementedError(\"Unknown actor type: %r\" % type(actor))\n\n def bulk_subscribe(\n self,\n group: Group,\n user_ids: Iterable[int],\n reason: int = GroupSubscriptionReason.unknown,\n ) -> bool:\n \"\"\"\n Subscribe a list of user ids to an issue, but only if the users are not explicitly\n unsubscribed.\n \"\"\"\n # Unique the IDs.\n user_ids = set(user_ids)\n\n # 5 retries for race conditions where\n # concurrent subscription attempts cause integrity errors\n for i in range(4, -1, -1): # 4 3 2 1 0\n\n existing_subscriptions = set(\n GroupSubscription.objects.filter(\n user_id__in=user_ids, group=group, project=group.project\n ).values_list(\"user_id\", flat=True)\n )\n\n subscriptions = [\n GroupSubscription(\n user_id=user_id,\n group=group,\n project=group.project,\n is_active=True,\n reason=reason,\n )\n for user_id in user_ids\n if user_id not in existing_subscriptions\n ]\n\n try:\n with transaction.atomic(router.db_for_write(GroupSubscription)):\n self.bulk_create(subscriptions)\n return True\n except IntegrityError as e:\n if i == 0:\n raise e\n return False\n\n def get_participants(self, group: Group) -> ParticipantMap:\n \"\"\"\n Identify all users who are participating with a given issue.\n :param group: Group object\n \"\"\"\n from sentry.notifications.utils.participants import ParticipantMap\n\n all_possible_users = RpcActor.many_from_object(group.project.get_members_as_rpc_users())\n active_and_disabled_subscriptions = self.filter(\n group=group, user_id__in=[u.id for u in all_possible_users]\n )\n\n notification_settings = notifications_service.get_settings_for_recipient_by_parent(\n type=NotificationSettingTypes.WORKFLOW,\n recipients=all_possible_users,\n parent_id=group.project_id,\n )\n subscriptions_by_user_id = {\n subscription.user_id: subscription for subscription in active_and_disabled_subscriptions\n }\n notification_settings_by_recipient = transform_to_notification_settings_by_recipient(\n notification_settings, all_possible_users\n )\n\n result = ParticipantMap()\n for user in all_possible_users:\n subscription_option = subscriptions_by_user_id.get(user.id)\n providers = where_should_be_participating(\n user,\n subscription_option,\n notification_settings_by_recipient,\n )\n for provider in providers:\n reason = (\n subscription_option\n and subscription_option.reason\n or GroupSubscriptionReason.implicit\n )\n result.add(provider, user, reason)\n\n return result\n\n @staticmethod\n def get_participating_user_ids(group: Group) -> Sequence[int]:\n \"\"\"Return the list of user ids participating in this issue.\"\"\"\n\n return list(\n GroupSubscription.objects.filter(group=group, is_active=True).values_list(\n \"user_id\", flat=True\n )\n )\n\n\n@region_silo_only_model\nclass GroupSubscription(Model):\n \"\"\"\n Identifies a subscription relationship between a user and an issue.\n \"\"\"\n\n __relocation_scope__ = RelocationScope.Excluded\n\n project = FlexibleForeignKey(\"sentry.Project\", related_name=\"subscription_set\")\n group = FlexibleForeignKey(\"sentry.Group\", related_name=\"subscription_set\")\n user_id = HybridCloudForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete=\"CASCADE\")\n team = FlexibleForeignKey(\"sentry.Team\", null=True, db_index=True, on_delete=models.CASCADE)\n is_active = models.BooleanField(default=True)\n reason = BoundedPositiveIntegerField(default=GroupSubscriptionReason.unknown)\n date_added = models.DateTimeField(default=timezone.now, null=True)\n\n objects = GroupSubscriptionManager()\n\n class Meta:\n app_label = \"sentry\"\n db_table = \"sentry_groupsubscription\"\n unique_together = ((\"group\", \"user_id\"), (\"group\", \"team\"))\n constraints = [\n models.CheckConstraint(\n check=models.Q(team_id__isnull=False, user_id__isnull=True)\n | models.Q(team_id__isnull=True, user_id__isnull=False),\n name=\"subscription_team_or_user_check\",\n )\n ]\n\n __repr__ = sane_repr(\"project_id\", \"group_id\", \"user_id\")\n", "path": "src/sentry/models/groupsubscription.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Iterable, Optional, Sequence, Union\n\nfrom django.conf import settings\nfrom django.db import IntegrityError, models, router, transaction\nfrom django.utils import timezone\n\nfrom sentry.backup.scopes import RelocationScope\nfrom sentry.db.models import (\n BaseManager,\n BoundedPositiveIntegerField,\n FlexibleForeignKey,\n Model,\n region_silo_only_model,\n sane_repr,\n)\nfrom sentry.db.models.fields.hybrid_cloud_foreign_key import HybridCloudForeignKey\nfrom sentry.notifications.helpers import (\n transform_to_notification_settings_by_recipient,\n where_should_be_participating,\n)\nfrom sentry.notifications.types import GroupSubscriptionReason, NotificationSettingTypes\nfrom sentry.services.hybrid_cloud.actor import RpcActor\nfrom sentry.services.hybrid_cloud.notifications import notifications_service\nfrom sentry.services.hybrid_cloud.user import RpcUser\n\nif TYPE_CHECKING:\n from sentry.models import Group, Team, User\n from sentry.notifications.utils.participants import ParticipantMap\n\n\nclass GroupSubscriptionManager(BaseManager):\n def subscribe(\n self,\n group: Group,\n subscriber: User | RpcUser | Team,\n reason: int = GroupSubscriptionReason.unknown,\n ) -> bool:\n \"\"\"\n Subscribe a user or team to an issue, but only if that user or team has not explicitly\n unsubscribed.\n \"\"\"\n from sentry.models import Team, User\n\n try:\n with transaction.atomic(router.db_for_write(GroupSubscription)):\n if isinstance(subscriber, (User, RpcUser)):\n self.create(\n user_id=subscriber.id,\n group=group,\n project=group.project,\n is_active=True,\n reason=reason,\n )\n elif isinstance(subscriber, Team):\n self.create(\n team=subscriber,\n group=group,\n project=group.project,\n is_active=True,\n reason=reason,\n )\n except IntegrityError:\n pass\n return True\n\n def subscribe_actor(\n self,\n group: Group,\n actor: Union[Team, User, RpcUser],\n reason: int = GroupSubscriptionReason.unknown,\n ) -> Optional[bool]:\n from sentry import features\n from sentry.models import Team, User\n\n if isinstance(actor, (RpcUser, User)):\n return self.subscribe(group, actor, reason)\n if isinstance(actor, Team):\n if features.has(\"organizations:team-workflow-notifications\", group.organization):\n return self.subscribe(group, actor, reason)\n else:\n # subscribe the members of the team\n team_users_ids = list(actor.member_set.values_list(\"user_id\", flat=True))\n return self.bulk_subscribe(group=group, user_ids=team_users_ids, reason=reason)\n\n raise NotImplementedError(\"Unknown actor type: %r\" % type(actor))\n\n def bulk_subscribe(\n self,\n group: Group,\n user_ids: Iterable[int] | None = None,\n team_ids: Iterable[int] | None = None,\n reason: int = GroupSubscriptionReason.unknown,\n ) -> bool:\n \"\"\"\n Subscribe a list of user ids and/or teams to an issue, but only if the users/teams are not explicitly\n unsubscribed.\n \"\"\"\n from sentry import features\n\n # Unique the IDs.\n user_ids = set(user_ids) if user_ids else set()\n\n # Unique the teams.\n team_ids = set(team_ids) if team_ids else set()\n\n # 5 retries for race conditions where\n # concurrent subscription attempts cause integrity errors\n for i in range(4, -1, -1): # 4 3 2 1 0\n\n existing_subscriptions = set(\n GroupSubscription.objects.filter(\n user_id__in=user_ids, group=group, project=group.project\n ).values_list(\"user_id\", flat=True)\n )\n\n subscriptions = [\n GroupSubscription(\n user_id=user_id,\n group=group,\n project=group.project,\n is_active=True,\n reason=reason,\n )\n for user_id in user_ids.difference(existing_subscriptions)\n ]\n\n if features.has(\"organizations:team-workflow-notifications\", group.organization):\n existing_team_subscriptions = set(\n GroupSubscription.objects.filter(\n team_id__in=team_ids, group=group, project=group.project\n ).values_list(\"team_id\", flat=True)\n )\n\n subscriptions.extend(\n [\n GroupSubscription(\n team_id=team_id,\n group=group,\n project=group.project,\n is_active=True,\n reason=reason,\n )\n for team_id in team_ids.difference(existing_team_subscriptions)\n ]\n )\n\n try:\n with transaction.atomic(router.db_for_write(GroupSubscription)):\n self.bulk_create(subscriptions)\n return True\n except IntegrityError as e:\n if i == 0:\n raise e\n return False\n\n def get_participants(self, group: Group) -> ParticipantMap:\n \"\"\"\n Identify all users who are participating with a given issue.\n :param group: Group object\n \"\"\"\n from sentry.notifications.utils.participants import ParticipantMap\n\n all_possible_users = RpcActor.many_from_object(group.project.get_members_as_rpc_users())\n active_and_disabled_subscriptions = self.filter(\n group=group, user_id__in=[u.id for u in all_possible_users]\n )\n\n notification_settings = notifications_service.get_settings_for_recipient_by_parent(\n type=NotificationSettingTypes.WORKFLOW,\n recipients=all_possible_users,\n parent_id=group.project_id,\n )\n subscriptions_by_user_id = {\n subscription.user_id: subscription for subscription in active_and_disabled_subscriptions\n }\n notification_settings_by_recipient = transform_to_notification_settings_by_recipient(\n notification_settings, all_possible_users\n )\n\n result = ParticipantMap()\n for user in all_possible_users:\n subscription_option = subscriptions_by_user_id.get(user.id)\n providers = where_should_be_participating(\n user,\n subscription_option,\n notification_settings_by_recipient,\n )\n for provider in providers:\n reason = (\n subscription_option\n and subscription_option.reason\n or GroupSubscriptionReason.implicit\n )\n result.add(provider, user, reason)\n\n return result\n\n @staticmethod\n def get_participating_user_ids(group: Group) -> Sequence[int]:\n \"\"\"Return the list of user ids participating in this issue.\"\"\"\n\n return list(\n GroupSubscription.objects.filter(group=group, is_active=True).values_list(\n \"user_id\", flat=True\n )\n )\n\n\n@region_silo_only_model\nclass GroupSubscription(Model):\n \"\"\"\n Identifies a subscription relationship between a user and an issue.\n \"\"\"\n\n __relocation_scope__ = RelocationScope.Excluded\n\n project = FlexibleForeignKey(\"sentry.Project\", related_name=\"subscription_set\")\n group = FlexibleForeignKey(\"sentry.Group\", related_name=\"subscription_set\")\n user_id = HybridCloudForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete=\"CASCADE\")\n team = FlexibleForeignKey(\"sentry.Team\", null=True, db_index=True, on_delete=models.CASCADE)\n is_active = models.BooleanField(default=True)\n reason = BoundedPositiveIntegerField(default=GroupSubscriptionReason.unknown)\n date_added = models.DateTimeField(default=timezone.now, null=True)\n\n objects = GroupSubscriptionManager()\n\n class Meta:\n app_label = \"sentry\"\n db_table = \"sentry_groupsubscription\"\n unique_together = ((\"group\", \"user_id\"), (\"group\", \"team\"))\n constraints = [\n models.CheckConstraint(\n check=models.Q(team_id__isnull=False, user_id__isnull=True)\n | models.Q(team_id__isnull=True, user_id__isnull=False),\n name=\"subscription_team_or_user_check\",\n )\n ]\n\n __repr__ = sane_repr(\"project_id\", \"group_id\", \"user_id\")\n", "path": "src/sentry/models/groupsubscription.py"}]}
2,395
560
gh_patches_debug_21712
rasdani/github-patches
git_diff
koxudaxi__datamodel-code-generator-1767
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- JSON Schema `const` value and type ignored when setting defaults for Pydantic V2 **Describe the bug** Use of `--force-optional` clobbers `--use-one-literal-as-default`. In my opinion `--force-optional` should use defaults where they exist and only fall back to assigning to `None` where they don't exist. ### Input ```json { "$schema": "http://json-schema.org/draft-07/schema#", "type": "object", "title": "Force optional demo", "properties": { "foo": { "const": "foo" } } } ``` ### Used command ``` datamodel-codegen --input force-optional-demo.json --output-model-type pydantic_v2.BaseModel --force-optional --use-one-literal-as-default ``` ### Actual output ```python # generated by datamodel-codegen: # filename: force-optional-demo.json # timestamp: 2023-12-05T15:06:01+00:00 from __future__ import annotations from pydantic import BaseModel from typing_extensions import Literal class ForceOptionalDemo(BaseModel): foo: Literal['foo'] = None ``` ### Expected output ```python # generated by datamodel-codegen: # filename: force-optional-demo.json # timestamp: 2023-12-05T15:06:01+00:00 from __future__ import annotations from pydantic import BaseModel from typing_extensions import Literal class ForceOptionalDemo(BaseModel): foo: Literal['foo'] = 'foo' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `datamodel_code_generator/model/pydantic_v2/base_model.py` Content: ``` 1 from pathlib import Path 2 from typing import ( 3 TYPE_CHECKING, 4 Any, 5 ClassVar, 6 DefaultDict, 7 Dict, 8 List, 9 NamedTuple, 10 Optional, 11 Set, 12 ) 13 14 from pydantic import Field 15 16 from datamodel_code_generator.model.base import UNDEFINED, DataModelFieldBase 17 from datamodel_code_generator.model.pydantic.base_model import ( 18 BaseModelBase, 19 ) 20 from datamodel_code_generator.model.pydantic.base_model import ( 21 Constraints as _Constraints, 22 ) 23 from datamodel_code_generator.model.pydantic.base_model import ( 24 DataModelField as DataModelFieldV1, 25 ) 26 from datamodel_code_generator.model.pydantic_v2.imports import IMPORT_CONFIG_DICT 27 from datamodel_code_generator.reference import Reference 28 from datamodel_code_generator.util import field_validator, model_validator 29 30 if TYPE_CHECKING: 31 from typing_extensions import Literal 32 else: 33 try: 34 from typing import Literal 35 except ImportError: 36 from typing_extensions import Literal 37 38 39 class Constraints(_Constraints): 40 # To override existing pattern alias 41 regex: Optional[str] = Field(None, alias='regex') 42 pattern: Optional[str] = Field(None, alias='pattern') 43 44 @model_validator(mode='before') 45 def validate_min_max_items(cls, values: Any) -> Dict[str, Any]: 46 if not isinstance(values, dict): # pragma: no cover 47 return values 48 min_items = values.pop('minItems', None) 49 if min_items is not None: 50 values['minLength'] = min_items 51 max_items = values.pop('maxItems', None) 52 if max_items is not None: 53 values['maxLength'] = max_items 54 return values 55 56 57 class DataModelField(DataModelFieldV1): 58 _EXCLUDE_FIELD_KEYS: ClassVar[Set[str]] = { 59 'alias', 60 'default', 61 'gt', 62 'ge', 63 'lt', 64 'le', 65 'multiple_of', 66 'min_length', 67 'max_length', 68 'pattern', 69 } 70 constraints: Optional[Constraints] = None 71 _PARSE_METHOD: ClassVar[str] = 'model_validate' 72 73 @field_validator('extras') 74 def validate_extras(cls, values: Any) -> Dict[str, Any]: 75 if not isinstance(values, dict): 76 return values 77 if 'examples' in values: 78 return values 79 80 if 'example' in values: 81 values['examples'] = [values.pop('example')] 82 return values 83 84 def process_const(self) -> None: 85 if 'const' not in self.extras: 86 return None 87 self.const = True 88 self.nullable = False 89 const = self.extras['const'] 90 if self.data_type.type == 'str' and isinstance( 91 const, str 92 ): # pragma: no cover # Literal supports only str 93 self.data_type = self.data_type.__class__(literals=[const]) 94 95 def _process_data_in_str(self, data: Dict[str, Any]) -> None: 96 if self.const: 97 # const is removed in pydantic 2.0 98 data.pop('const') 99 100 # unique_items is not supported in pydantic 2.0 101 data.pop('unique_items', None) 102 103 def _process_annotated_field_arguments( 104 self, field_arguments: List[str] 105 ) -> List[str]: 106 if not self.required: 107 if self.use_default_kwarg: 108 return [ 109 f'default={repr(self.default)}', 110 *field_arguments, 111 ] 112 else: 113 # TODO: Allow '=' style default for v1? 114 return [f'{repr(self.default)}', *field_arguments] 115 return field_arguments 116 117 118 class ConfigAttribute(NamedTuple): 119 from_: str 120 to: str 121 invert: bool 122 123 124 class BaseModel(BaseModelBase): 125 TEMPLATE_FILE_PATH: ClassVar[str] = 'pydantic_v2/BaseModel.jinja2' 126 BASE_CLASS: ClassVar[str] = 'pydantic.BaseModel' 127 CONFIG_ATTRIBUTES: ClassVar[List[ConfigAttribute]] = [ 128 ConfigAttribute('allow_population_by_field_name', 'populate_by_name', False), 129 ConfigAttribute('populate_by_name', 'populate_by_name', False), 130 ConfigAttribute('allow_mutation', 'frozen', True), 131 ConfigAttribute('frozen', 'frozen', False), 132 ] 133 134 def __init__( 135 self, 136 *, 137 reference: Reference, 138 fields: List[DataModelFieldBase], 139 decorators: Optional[List[str]] = None, 140 base_classes: Optional[List[Reference]] = None, 141 custom_base_class: Optional[str] = None, 142 custom_template_dir: Optional[Path] = None, 143 extra_template_data: Optional[DefaultDict[str, Any]] = None, 144 path: Optional[Path] = None, 145 description: Optional[str] = None, 146 default: Any = UNDEFINED, 147 nullable: bool = False, 148 ) -> None: 149 super().__init__( 150 reference=reference, 151 fields=fields, 152 decorators=decorators, 153 base_classes=base_classes, 154 custom_base_class=custom_base_class, 155 custom_template_dir=custom_template_dir, 156 extra_template_data=extra_template_data, 157 path=path, 158 description=description, 159 default=default, 160 nullable=nullable, 161 ) 162 config_parameters: Dict[str, Any] = {} 163 164 extra = self._get_config_extra() 165 if extra: 166 config_parameters['extra'] = extra 167 168 for from_, to, invert in self.CONFIG_ATTRIBUTES: 169 if from_ in self.extra_template_data: 170 config_parameters[to] = ( 171 not self.extra_template_data[from_] 172 if invert 173 else self.extra_template_data[from_] 174 ) 175 for data_type in self.all_data_types: 176 if data_type.is_custom_type: 177 config_parameters['arbitrary_types_allowed'] = True 178 break 179 180 if isinstance(self.extra_template_data.get('config'), dict): 181 for key, value in self.extra_template_data['config'].items(): 182 config_parameters[key] = value 183 184 if config_parameters: 185 from datamodel_code_generator.model.pydantic_v2 import ConfigDict 186 187 self.extra_template_data['config'] = ConfigDict.parse_obj(config_parameters) 188 self._additional_imports.append(IMPORT_CONFIG_DICT) 189 190 def _get_config_extra(self) -> Optional[Literal["'allow'", "'forbid'"]]: 191 additionalProperties = self.extra_template_data.get('additionalProperties') 192 allow_extra_fields = self.extra_template_data.get('allow_extra_fields') 193 if additionalProperties is not None or allow_extra_fields: 194 return ( 195 "'allow'" if additionalProperties or allow_extra_fields else "'forbid'" 196 ) 197 return None 198 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/datamodel_code_generator/model/pydantic_v2/base_model.py b/datamodel_code_generator/model/pydantic_v2/base_model.py --- a/datamodel_code_generator/model/pydantic_v2/base_model.py +++ b/datamodel_code_generator/model/pydantic_v2/base_model.py @@ -87,10 +87,9 @@ self.const = True self.nullable = False const = self.extras['const'] - if self.data_type.type == 'str' and isinstance( - const, str - ): # pragma: no cover # Literal supports only str - self.data_type = self.data_type.__class__(literals=[const]) + self.data_type = self.data_type.__class__(literals=[const]) + if not self.default: + self.default = const def _process_data_in_str(self, data: Dict[str, Any]) -> None: if self.const: @@ -103,7 +102,7 @@ def _process_annotated_field_arguments( self, field_arguments: List[str] ) -> List[str]: - if not self.required: + if not self.required or self.const: if self.use_default_kwarg: return [ f'default={repr(self.default)}',
{"golden_diff": "diff --git a/datamodel_code_generator/model/pydantic_v2/base_model.py b/datamodel_code_generator/model/pydantic_v2/base_model.py\n--- a/datamodel_code_generator/model/pydantic_v2/base_model.py\n+++ b/datamodel_code_generator/model/pydantic_v2/base_model.py\n@@ -87,10 +87,9 @@\n self.const = True\n self.nullable = False\n const = self.extras['const']\n- if self.data_type.type == 'str' and isinstance(\n- const, str\n- ): # pragma: no cover # Literal supports only str\n- self.data_type = self.data_type.__class__(literals=[const])\n+ self.data_type = self.data_type.__class__(literals=[const])\n+ if not self.default:\n+ self.default = const\n \n def _process_data_in_str(self, data: Dict[str, Any]) -> None:\n if self.const:\n@@ -103,7 +102,7 @@\n def _process_annotated_field_arguments(\n self, field_arguments: List[str]\n ) -> List[str]:\n- if not self.required:\n+ if not self.required or self.const:\n if self.use_default_kwarg:\n return [\n f'default={repr(self.default)}',\n", "issue": "JSON Schema `const` value and type ignored when setting defaults for Pydantic V2\n**Describe the bug**\r\nUse of `--force-optional` clobbers `--use-one-literal-as-default`. In my opinion `--force-optional` should use defaults where they exist and only fall back to assigning to `None` where they don't exist.\r\n\r\n### Input\r\n\r\n```json\r\n{\r\n \"$schema\": \"http://json-schema.org/draft-07/schema#\",\r\n \"type\": \"object\",\r\n \"title\": \"Force optional demo\",\r\n \"properties\": {\r\n \"foo\": {\r\n \"const\": \"foo\"\r\n }\r\n }\r\n}\r\n```\r\n\r\n### Used command\r\n```\r\ndatamodel-codegen --input force-optional-demo.json --output-model-type pydantic_v2.BaseModel --force-optional --use-one-literal-as-default\r\n```\r\n\r\n### Actual output\r\n\r\n```python\r\n# generated by datamodel-codegen:\r\n# filename: force-optional-demo.json\r\n# timestamp: 2023-12-05T15:06:01+00:00\r\n\r\nfrom __future__ import annotations\r\n\r\nfrom pydantic import BaseModel\r\nfrom typing_extensions import Literal\r\n\r\n\r\nclass ForceOptionalDemo(BaseModel):\r\n foo: Literal['foo'] = None\r\n```\r\n\r\n### Expected output\r\n\r\n```python\r\n# generated by datamodel-codegen:\r\n# filename: force-optional-demo.json\r\n# timestamp: 2023-12-05T15:06:01+00:00\r\n\r\nfrom __future__ import annotations\r\n\r\nfrom pydantic import BaseModel\r\nfrom typing_extensions import Literal\r\n\r\n\r\nclass ForceOptionalDemo(BaseModel):\r\n foo: Literal['foo'] = 'foo'\r\n```\n", "before_files": [{"content": "from pathlib import Path\nfrom typing import (\n TYPE_CHECKING,\n Any,\n ClassVar,\n DefaultDict,\n Dict,\n List,\n NamedTuple,\n Optional,\n Set,\n)\n\nfrom pydantic import Field\n\nfrom datamodel_code_generator.model.base import UNDEFINED, DataModelFieldBase\nfrom datamodel_code_generator.model.pydantic.base_model import (\n BaseModelBase,\n)\nfrom datamodel_code_generator.model.pydantic.base_model import (\n Constraints as _Constraints,\n)\nfrom datamodel_code_generator.model.pydantic.base_model import (\n DataModelField as DataModelFieldV1,\n)\nfrom datamodel_code_generator.model.pydantic_v2.imports import IMPORT_CONFIG_DICT\nfrom datamodel_code_generator.reference import Reference\nfrom datamodel_code_generator.util import field_validator, model_validator\n\nif TYPE_CHECKING:\n from typing_extensions import Literal\nelse:\n try:\n from typing import Literal\n except ImportError:\n from typing_extensions import Literal\n\n\nclass Constraints(_Constraints):\n # To override existing pattern alias\n regex: Optional[str] = Field(None, alias='regex')\n pattern: Optional[str] = Field(None, alias='pattern')\n\n @model_validator(mode='before')\n def validate_min_max_items(cls, values: Any) -> Dict[str, Any]:\n if not isinstance(values, dict): # pragma: no cover\n return values\n min_items = values.pop('minItems', None)\n if min_items is not None:\n values['minLength'] = min_items\n max_items = values.pop('maxItems', None)\n if max_items is not None:\n values['maxLength'] = max_items\n return values\n\n\nclass DataModelField(DataModelFieldV1):\n _EXCLUDE_FIELD_KEYS: ClassVar[Set[str]] = {\n 'alias',\n 'default',\n 'gt',\n 'ge',\n 'lt',\n 'le',\n 'multiple_of',\n 'min_length',\n 'max_length',\n 'pattern',\n }\n constraints: Optional[Constraints] = None\n _PARSE_METHOD: ClassVar[str] = 'model_validate'\n\n @field_validator('extras')\n def validate_extras(cls, values: Any) -> Dict[str, Any]:\n if not isinstance(values, dict):\n return values\n if 'examples' in values:\n return values\n\n if 'example' in values:\n values['examples'] = [values.pop('example')]\n return values\n\n def process_const(self) -> None:\n if 'const' not in self.extras:\n return None\n self.const = True\n self.nullable = False\n const = self.extras['const']\n if self.data_type.type == 'str' and isinstance(\n const, str\n ): # pragma: no cover # Literal supports only str\n self.data_type = self.data_type.__class__(literals=[const])\n\n def _process_data_in_str(self, data: Dict[str, Any]) -> None:\n if self.const:\n # const is removed in pydantic 2.0\n data.pop('const')\n\n # unique_items is not supported in pydantic 2.0\n data.pop('unique_items', None)\n\n def _process_annotated_field_arguments(\n self, field_arguments: List[str]\n ) -> List[str]:\n if not self.required:\n if self.use_default_kwarg:\n return [\n f'default={repr(self.default)}',\n *field_arguments,\n ]\n else:\n # TODO: Allow '=' style default for v1?\n return [f'{repr(self.default)}', *field_arguments]\n return field_arguments\n\n\nclass ConfigAttribute(NamedTuple):\n from_: str\n to: str\n invert: bool\n\n\nclass BaseModel(BaseModelBase):\n TEMPLATE_FILE_PATH: ClassVar[str] = 'pydantic_v2/BaseModel.jinja2'\n BASE_CLASS: ClassVar[str] = 'pydantic.BaseModel'\n CONFIG_ATTRIBUTES: ClassVar[List[ConfigAttribute]] = [\n ConfigAttribute('allow_population_by_field_name', 'populate_by_name', False),\n ConfigAttribute('populate_by_name', 'populate_by_name', False),\n ConfigAttribute('allow_mutation', 'frozen', True),\n ConfigAttribute('frozen', 'frozen', False),\n ]\n\n def __init__(\n self,\n *,\n reference: Reference,\n fields: List[DataModelFieldBase],\n decorators: Optional[List[str]] = None,\n base_classes: Optional[List[Reference]] = None,\n custom_base_class: Optional[str] = None,\n custom_template_dir: Optional[Path] = None,\n extra_template_data: Optional[DefaultDict[str, Any]] = None,\n path: Optional[Path] = None,\n description: Optional[str] = None,\n default: Any = UNDEFINED,\n nullable: bool = False,\n ) -> None:\n super().__init__(\n reference=reference,\n fields=fields,\n decorators=decorators,\n base_classes=base_classes,\n custom_base_class=custom_base_class,\n custom_template_dir=custom_template_dir,\n extra_template_data=extra_template_data,\n path=path,\n description=description,\n default=default,\n nullable=nullable,\n )\n config_parameters: Dict[str, Any] = {}\n\n extra = self._get_config_extra()\n if extra:\n config_parameters['extra'] = extra\n\n for from_, to, invert in self.CONFIG_ATTRIBUTES:\n if from_ in self.extra_template_data:\n config_parameters[to] = (\n not self.extra_template_data[from_]\n if invert\n else self.extra_template_data[from_]\n )\n for data_type in self.all_data_types:\n if data_type.is_custom_type:\n config_parameters['arbitrary_types_allowed'] = True\n break\n\n if isinstance(self.extra_template_data.get('config'), dict):\n for key, value in self.extra_template_data['config'].items():\n config_parameters[key] = value\n\n if config_parameters:\n from datamodel_code_generator.model.pydantic_v2 import ConfigDict\n\n self.extra_template_data['config'] = ConfigDict.parse_obj(config_parameters)\n self._additional_imports.append(IMPORT_CONFIG_DICT)\n\n def _get_config_extra(self) -> Optional[Literal[\"'allow'\", \"'forbid'\"]]:\n additionalProperties = self.extra_template_data.get('additionalProperties')\n allow_extra_fields = self.extra_template_data.get('allow_extra_fields')\n if additionalProperties is not None or allow_extra_fields:\n return (\n \"'allow'\" if additionalProperties or allow_extra_fields else \"'forbid'\"\n )\n return None\n", "path": "datamodel_code_generator/model/pydantic_v2/base_model.py"}], "after_files": [{"content": "from pathlib import Path\nfrom typing import (\n TYPE_CHECKING,\n Any,\n ClassVar,\n DefaultDict,\n Dict,\n List,\n NamedTuple,\n Optional,\n Set,\n)\n\nfrom pydantic import Field\n\nfrom datamodel_code_generator.model.base import UNDEFINED, DataModelFieldBase\nfrom datamodel_code_generator.model.pydantic.base_model import (\n BaseModelBase,\n)\nfrom datamodel_code_generator.model.pydantic.base_model import (\n Constraints as _Constraints,\n)\nfrom datamodel_code_generator.model.pydantic.base_model import (\n DataModelField as DataModelFieldV1,\n)\nfrom datamodel_code_generator.model.pydantic_v2.imports import IMPORT_CONFIG_DICT\nfrom datamodel_code_generator.reference import Reference\nfrom datamodel_code_generator.util import field_validator, model_validator\n\nif TYPE_CHECKING:\n from typing_extensions import Literal\nelse:\n try:\n from typing import Literal\n except ImportError:\n from typing_extensions import Literal\n\n\nclass Constraints(_Constraints):\n # To override existing pattern alias\n regex: Optional[str] = Field(None, alias='regex')\n pattern: Optional[str] = Field(None, alias='pattern')\n\n @model_validator(mode='before')\n def validate_min_max_items(cls, values: Any) -> Dict[str, Any]:\n if not isinstance(values, dict): # pragma: no cover\n return values\n min_items = values.pop('minItems', None)\n if min_items is not None:\n values['minLength'] = min_items\n max_items = values.pop('maxItems', None)\n if max_items is not None:\n values['maxLength'] = max_items\n return values\n\n\nclass DataModelField(DataModelFieldV1):\n _EXCLUDE_FIELD_KEYS: ClassVar[Set[str]] = {\n 'alias',\n 'default',\n 'gt',\n 'ge',\n 'lt',\n 'le',\n 'multiple_of',\n 'min_length',\n 'max_length',\n 'pattern',\n }\n constraints: Optional[Constraints] = None\n _PARSE_METHOD: ClassVar[str] = 'model_validate'\n\n @field_validator('extras')\n def validate_extras(cls, values: Any) -> Dict[str, Any]:\n if not isinstance(values, dict):\n return values\n if 'examples' in values:\n return values\n\n if 'example' in values:\n values['examples'] = [values.pop('example')]\n return values\n\n def process_const(self) -> None:\n if 'const' not in self.extras:\n return None\n self.const = True\n self.nullable = False\n const = self.extras['const']\n self.data_type = self.data_type.__class__(literals=[const])\n if not self.default:\n self.default = const\n\n def _process_data_in_str(self, data: Dict[str, Any]) -> None:\n if self.const:\n # const is removed in pydantic 2.0\n data.pop('const')\n\n # unique_items is not supported in pydantic 2.0\n data.pop('unique_items', None)\n\n def _process_annotated_field_arguments(\n self, field_arguments: List[str]\n ) -> List[str]:\n if not self.required or self.const:\n if self.use_default_kwarg:\n return [\n f'default={repr(self.default)}',\n *field_arguments,\n ]\n else:\n # TODO: Allow '=' style default for v1?\n return [f'{repr(self.default)}', *field_arguments]\n return field_arguments\n\n\nclass ConfigAttribute(NamedTuple):\n from_: str\n to: str\n invert: bool\n\n\nclass BaseModel(BaseModelBase):\n TEMPLATE_FILE_PATH: ClassVar[str] = 'pydantic_v2/BaseModel.jinja2'\n BASE_CLASS: ClassVar[str] = 'pydantic.BaseModel'\n CONFIG_ATTRIBUTES: ClassVar[List[ConfigAttribute]] = [\n ConfigAttribute('allow_population_by_field_name', 'populate_by_name', False),\n ConfigAttribute('populate_by_name', 'populate_by_name', False),\n ConfigAttribute('allow_mutation', 'frozen', True),\n ConfigAttribute('frozen', 'frozen', False),\n ]\n\n def __init__(\n self,\n *,\n reference: Reference,\n fields: List[DataModelFieldBase],\n decorators: Optional[List[str]] = None,\n base_classes: Optional[List[Reference]] = None,\n custom_base_class: Optional[str] = None,\n custom_template_dir: Optional[Path] = None,\n extra_template_data: Optional[DefaultDict[str, Any]] = None,\n path: Optional[Path] = None,\n description: Optional[str] = None,\n default: Any = UNDEFINED,\n nullable: bool = False,\n ) -> None:\n super().__init__(\n reference=reference,\n fields=fields,\n decorators=decorators,\n base_classes=base_classes,\n custom_base_class=custom_base_class,\n custom_template_dir=custom_template_dir,\n extra_template_data=extra_template_data,\n path=path,\n description=description,\n default=default,\n nullable=nullable,\n )\n config_parameters: Dict[str, Any] = {}\n\n extra = self._get_config_extra()\n if extra:\n config_parameters['extra'] = extra\n\n for from_, to, invert in self.CONFIG_ATTRIBUTES:\n if from_ in self.extra_template_data:\n config_parameters[to] = (\n not self.extra_template_data[from_]\n if invert\n else self.extra_template_data[from_]\n )\n for data_type in self.all_data_types:\n if data_type.is_custom_type:\n config_parameters['arbitrary_types_allowed'] = True\n break\n\n if isinstance(self.extra_template_data.get('config'), dict):\n for key, value in self.extra_template_data['config'].items():\n config_parameters[key] = value\n\n if config_parameters:\n from datamodel_code_generator.model.pydantic_v2 import ConfigDict\n\n self.extra_template_data['config'] = ConfigDict.parse_obj(config_parameters)\n self._additional_imports.append(IMPORT_CONFIG_DICT)\n\n def _get_config_extra(self) -> Optional[Literal[\"'allow'\", \"'forbid'\"]]:\n additionalProperties = self.extra_template_data.get('additionalProperties')\n allow_extra_fields = self.extra_template_data.get('allow_extra_fields')\n if additionalProperties is not None or allow_extra_fields:\n return (\n \"'allow'\" if additionalProperties or allow_extra_fields else \"'forbid'\"\n )\n return None\n", "path": "datamodel_code_generator/model/pydantic_v2/base_model.py"}]}
2,556
283
gh_patches_debug_16863
rasdani/github-patches
git_diff
TencentBlueKing__bk-user-1192
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- feat: add stringify_pydantic_error util --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/bk-user/bkuser/common/passwd/__init__.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 """ 3 TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available. 4 Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved. 5 Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at http://opensource.org/licenses/MIT 7 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on 8 an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the 9 specific language governing permissions and limitations under the License. 10 """ 11 from .exceptions import PasswordStrengthError 12 from .generator import PasswordGenerator 13 from .models import PasswordRule, ValidateResult 14 from .validator import PasswordValidator 15 16 __all__ = [ 17 # 密码规则 18 "PasswordRule", 19 # 密码生成器 20 "PasswordGenerator", 21 # 密码强度校验器 22 "PasswordValidator", 23 # 密码校验结果 24 "ValidateResult", 25 # 密码强度过低异常 26 "PasswordStrengthError", 27 ] 28 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/bk-user/bkuser/common/passwd/__init__.py b/src/bk-user/bkuser/common/passwd/__init__.py --- a/src/bk-user/bkuser/common/passwd/__init__.py +++ b/src/bk-user/bkuser/common/passwd/__init__.py @@ -8,7 +8,7 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ -from .exceptions import PasswordStrengthError +from .exceptions import PasswordGenerateError, PasswordStrengthError from .generator import PasswordGenerator from .models import PasswordRule, ValidateResult from .validator import PasswordValidator @@ -24,4 +24,6 @@ "ValidateResult", # 密码强度过低异常 "PasswordStrengthError", + # 不合理的规则导致生成密码失败 + "PasswordGenerateError", ]
{"golden_diff": "diff --git a/src/bk-user/bkuser/common/passwd/__init__.py b/src/bk-user/bkuser/common/passwd/__init__.py\n--- a/src/bk-user/bkuser/common/passwd/__init__.py\n+++ b/src/bk-user/bkuser/common/passwd/__init__.py\n@@ -8,7 +8,7 @@\n an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\n specific language governing permissions and limitations under the License.\n \"\"\"\n-from .exceptions import PasswordStrengthError\n+from .exceptions import PasswordGenerateError, PasswordStrengthError\n from .generator import PasswordGenerator\n from .models import PasswordRule, ValidateResult\n from .validator import PasswordValidator\n@@ -24,4 +24,6 @@\n \"ValidateResult\",\n # \u5bc6\u7801\u5f3a\u5ea6\u8fc7\u4f4e\u5f02\u5e38\n \"PasswordStrengthError\",\n+ # \u4e0d\u5408\u7406\u7684\u89c4\u5219\u5bfc\u81f4\u751f\u6210\u5bc6\u7801\u5931\u8d25\n+ \"PasswordGenerateError\",\n ]\n", "issue": "feat: add stringify_pydantic_error util\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nTencentBlueKing is pleased to support the open source community by making \u84dd\u9cb8\u667a\u4e91-\u7528\u6237\u7ba1\u7406(Bk-User) available.\nCopyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.\nLicensed under the MIT License (the \"License\"); you may not use this file except in compliance with the License.\nYou may obtain a copy of the License at http://opensource.org/licenses/MIT\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on\nan \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\"\"\"\nfrom .exceptions import PasswordStrengthError\nfrom .generator import PasswordGenerator\nfrom .models import PasswordRule, ValidateResult\nfrom .validator import PasswordValidator\n\n__all__ = [\n # \u5bc6\u7801\u89c4\u5219\n \"PasswordRule\",\n # \u5bc6\u7801\u751f\u6210\u5668\n \"PasswordGenerator\",\n # \u5bc6\u7801\u5f3a\u5ea6\u6821\u9a8c\u5668\n \"PasswordValidator\",\n # \u5bc6\u7801\u6821\u9a8c\u7ed3\u679c\n \"ValidateResult\",\n # \u5bc6\u7801\u5f3a\u5ea6\u8fc7\u4f4e\u5f02\u5e38\n \"PasswordStrengthError\",\n]\n", "path": "src/bk-user/bkuser/common/passwd/__init__.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nTencentBlueKing is pleased to support the open source community by making \u84dd\u9cb8\u667a\u4e91-\u7528\u6237\u7ba1\u7406(Bk-User) available.\nCopyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.\nLicensed under the MIT License (the \"License\"); you may not use this file except in compliance with the License.\nYou may obtain a copy of the License at http://opensource.org/licenses/MIT\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on\nan \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the\nspecific language governing permissions and limitations under the License.\n\"\"\"\nfrom .exceptions import PasswordGenerateError, PasswordStrengthError\nfrom .generator import PasswordGenerator\nfrom .models import PasswordRule, ValidateResult\nfrom .validator import PasswordValidator\n\n__all__ = [\n # \u5bc6\u7801\u89c4\u5219\n \"PasswordRule\",\n # \u5bc6\u7801\u751f\u6210\u5668\n \"PasswordGenerator\",\n # \u5bc6\u7801\u5f3a\u5ea6\u6821\u9a8c\u5668\n \"PasswordValidator\",\n # \u5bc6\u7801\u6821\u9a8c\u7ed3\u679c\n \"ValidateResult\",\n # \u5bc6\u7801\u5f3a\u5ea6\u8fc7\u4f4e\u5f02\u5e38\n \"PasswordStrengthError\",\n # \u4e0d\u5408\u7406\u7684\u89c4\u5219\u5bfc\u81f4\u751f\u6210\u5bc6\u7801\u5931\u8d25\n \"PasswordGenerateError\",\n]\n", "path": "src/bk-user/bkuser/common/passwd/__init__.py"}]}
597
212
gh_patches_debug_17775
rasdani/github-patches
git_diff
ocf__ocfweb-131
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- CSRF token PR fails in prod due to referer checks The cookies seem to be working properly, but referer checks are failing? ``` Jan 31 18:46:57 coma gunicorn[23653]: Forbidden (Referer checking failed - https://www.ocf.berkeley.edu/account/register/ does not match any trusted origins.): /account/register/ ``` Explained in the docs: https://docs.djangoproject.com/en/dev/ref/csrf/#how-it-works Maybe the referer header is not what we think it is due to the proxying? I reverted my change in 89a8931ff0fe9e511905780a42be24a63b1d5c9a --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ocfweb/settings.py` Content: ``` 1 import configparser 2 import os 3 from getpass import getuser 4 5 from django.template.base import TemplateSyntaxError 6 7 8 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 9 10 SECRET_KEY = 'not_a_secret' 11 DEBUG = True 12 13 ALLOWED_HOSTS = [ 14 'www.ocf.berkeley.edu', 15 'dev.ocf.berkeley.edu', 16 'dev-www.ocf.berkeley.edu', 17 'ocfweb.ocf.berkeley.edu', 18 ] 19 20 INSTALLED_APPS = ( 21 'bootstrapform', 22 'django.contrib.humanize', 23 'django.contrib.messages', 24 'django.contrib.sessions', 25 'django.contrib.staticfiles', 26 'mathfilters', 27 'ocfweb', 28 'ocfweb.about', 29 'ocfweb.account', 30 'ocfweb.docs', 31 'ocfweb.login', 32 'ocfweb.main', 33 'ocfweb.middleware', 34 'ocfweb.stats', 35 'ocfweb.test', 36 ) 37 38 MIDDLEWARE_CLASSES = ( 39 'django.contrib.sessions.middleware.SessionMiddleware', 40 'django.middleware.common.CommonMiddleware', 41 'django.middleware.csrf.CsrfViewMiddleware', 42 'django.contrib.messages.middleware.MessageMiddleware', 43 'django.middleware.clickjacking.XFrameOptionsMiddleware', 44 'ocfweb.middleware.errors.OcflibErrorMiddleware', 45 ) 46 47 SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') 48 ROOT_URLCONF = 'ocfweb.urls' 49 50 51 class InvalidReferenceInTemplate(str): 52 """Raise exceptions on invalid references in templates. 53 54 By default Django just replaces references to undefined variables with 55 empty strings. This is a horrible idea, so we instead hack it to raise an 56 exception. 57 """ 58 59 def __mod__(self, ref): 60 raise TemplateSyntaxError('Invalid reference in template: {}'.format(ref)) 61 62 63 TEMPLATES = [{ 64 'BACKEND': 'django.template.backends.django.DjangoTemplates', 65 'DIRS': [], 66 'APP_DIRS': True, 67 'OPTIONS': { 68 'context_processors': [ 69 'django.template.context_processors.request', 70 'django.contrib.messages.context_processors.messages', 71 'ocfweb.context_processors.ocf_template_processor', 72 ], 73 'string_if_invalid': InvalidReferenceInTemplate('%s'), 74 }, 75 }] 76 77 WSGI_APPLICATION = 'ocfweb.wsgi.application' 78 79 DATABASES = {} 80 81 # store sessions in the cache 82 SESSION_ENGINE = 'django.contrib.sessions.backends.cache' 83 84 # XXX: DO NOT CHANGE 85 # Ensure cookies can't be read by JavaScript. 86 SESSION_COOKIE_HTTPONLY = True 87 SESSION_COOKIE_SECURE = False 88 SESSION_COOKIE_PATH = '/' 89 SESSION_COOKIE_NAME = 'OCFWEB_SESSIONID' 90 91 CACHES = { # sessions are stored here 92 'default': { 93 # on dev, we use a file-backed cache so that you don't get logged out 94 # every time you update code and the server restarts. 95 'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache', 96 'LOCATION': os.path.expanduser('~/.ocfweb-cache'), 97 }, 98 'TIMEOUT': 60 * 60 * 12, # 12 hours 99 'OPTIONS': { 100 'MAX_ENTRIES': 1000, 101 }, 102 } 103 104 LANGUAGE_CODE = 'en-us' 105 TIME_ZONE = 'America/Los_Angeles' 106 USE_I18N = False 107 USE_L10N = False 108 USE_TZ = True 109 110 STATIC_URL = '/static/' 111 os.environ.setdefault('OCFWEB_STATIC_ROOT', '') 112 STATIC_ROOT = os.environ['OCFWEB_STATIC_ROOT'] 113 114 X_FRAME_OPTIONS = 'DENY' 115 116 # log exceptions to stderr 117 LOGGING = { 118 'version': 1, 119 'disable_existing_loggers': False, 120 'handlers': { 121 'console': { 122 'class': 'logging.StreamHandler', 123 }, 124 }, 125 'loggers': { 126 'django': { 127 'handlers': ['console'], 128 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'), 129 }, 130 }, 131 } 132 133 CELERY_BROKER = 'redis://create' 134 CELERY_BACKEND = 'redis://create' 135 136 if getuser() == 'ocfweb': 137 # not running in development, override options from config file 138 conf = configparser.ConfigParser() 139 conf.read('/etc/ocfweb/ocfweb.conf') 140 141 SECRET_KEY = conf.get('django', 'secret') 142 DEBUG = conf.getboolean('django', 'debug') 143 144 STATIC_URL = conf.get('django', 'static_url') 145 STATIC_ROOT = conf.get('django', 'static_root') 146 147 CELERY_BROKER = conf.get('celery', 'broker') 148 CELERY_BACKEND = conf.get('celery', 'backend') 149 150 # on prod, we use Redis as a cache 151 CACHES['default'] = { 152 'BACKEND': 'django_redis.cache.RedisCache', 153 'LOCATION': 'redis://localhost:6379/0', 154 'OPTIONS': { 155 'CLIENT_CLASS': 'django_redis.client.DefaultClient', 156 } 157 } 158 159 SESSION_COOKIE_SECURE = True 160 SESSION_COOKIE_DOMAIN = 'www.ocf.berkeley.edu' 161 else: 162 # running in development 163 164 # try to read celery values used by approve 165 # (only works on supernova by staff members) 166 try: 167 conf = configparser.ConfigParser() 168 conf.read('/etc/ocf-create/ocf-create.conf') 169 CELERY_BROKER = conf.get('celery', 'broker') 170 CELERY_BACKEND = conf.get('celery', 'backend') 171 except configparser.NoSectionError: 172 pass 173 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ocfweb/settings.py b/ocfweb/settings.py --- a/ocfweb/settings.py +++ b/ocfweb/settings.py @@ -82,7 +82,13 @@ SESSION_ENGINE = 'django.contrib.sessions.backends.cache' # XXX: DO NOT CHANGE -# Ensure cookies can't be read by JavaScript. +# Ensure cookies can't be read by JavaScript or users. +# Our proxy filters cookies starting with "OCFWEB_" when going to user sites, +# so it's important our cookies match this pattern. +CSRF_COOKIE_HTTPONLY = True +CSRF_COOKIE_SECURE = False +CSRF_COOKIE_PATH = '/' +CSRF_COOKIE_NAME = 'OCFWEB_CSRF_TOKEN' SESSION_COOKIE_HTTPONLY = True SESSION_COOKIE_SECURE = False SESSION_COOKIE_PATH = '/' @@ -156,6 +162,9 @@ } } + CSRF_COOKIE_SECURE = True + CSRF_COOKIE_DOMAIN = 'www.ocf.berkeley.edu' + CSRF_TRUSTED_ORIGINS = ['www.ocf.berkeley.edu'] SESSION_COOKIE_SECURE = True SESSION_COOKIE_DOMAIN = 'www.ocf.berkeley.edu' else:
{"golden_diff": "diff --git a/ocfweb/settings.py b/ocfweb/settings.py\n--- a/ocfweb/settings.py\n+++ b/ocfweb/settings.py\n@@ -82,7 +82,13 @@\n SESSION_ENGINE = 'django.contrib.sessions.backends.cache'\n \n # XXX: DO NOT CHANGE\n-# Ensure cookies can't be read by JavaScript.\n+# Ensure cookies can't be read by JavaScript or users.\n+# Our proxy filters cookies starting with \"OCFWEB_\" when going to user sites,\n+# so it's important our cookies match this pattern.\n+CSRF_COOKIE_HTTPONLY = True\n+CSRF_COOKIE_SECURE = False\n+CSRF_COOKIE_PATH = '/'\n+CSRF_COOKIE_NAME = 'OCFWEB_CSRF_TOKEN'\n SESSION_COOKIE_HTTPONLY = True\n SESSION_COOKIE_SECURE = False\n SESSION_COOKIE_PATH = '/'\n@@ -156,6 +162,9 @@\n }\n }\n \n+ CSRF_COOKIE_SECURE = True\n+ CSRF_COOKIE_DOMAIN = 'www.ocf.berkeley.edu'\n+ CSRF_TRUSTED_ORIGINS = ['www.ocf.berkeley.edu']\n SESSION_COOKIE_SECURE = True\n SESSION_COOKIE_DOMAIN = 'www.ocf.berkeley.edu'\n else:\n", "issue": "CSRF token PR fails in prod due to referer checks\nThe cookies seem to be working properly, but referer checks are failing?\n\n```\nJan 31 18:46:57 coma gunicorn[23653]: Forbidden (Referer checking failed - https://www.ocf.berkeley.edu/account/register/ does not match any trusted origins.): /account/register/\n```\n\nExplained in the docs:\nhttps://docs.djangoproject.com/en/dev/ref/csrf/#how-it-works\n\nMaybe the referer header is not what we think it is due to the proxying?\n\nI reverted my change in 89a8931ff0fe9e511905780a42be24a63b1d5c9a\n\n", "before_files": [{"content": "import configparser\nimport os\nfrom getpass import getuser\n\nfrom django.template.base import TemplateSyntaxError\n\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nSECRET_KEY = 'not_a_secret'\nDEBUG = True\n\nALLOWED_HOSTS = [\n 'www.ocf.berkeley.edu',\n 'dev.ocf.berkeley.edu',\n 'dev-www.ocf.berkeley.edu',\n 'ocfweb.ocf.berkeley.edu',\n]\n\nINSTALLED_APPS = (\n 'bootstrapform',\n 'django.contrib.humanize',\n 'django.contrib.messages',\n 'django.contrib.sessions',\n 'django.contrib.staticfiles',\n 'mathfilters',\n 'ocfweb',\n 'ocfweb.about',\n 'ocfweb.account',\n 'ocfweb.docs',\n 'ocfweb.login',\n 'ocfweb.main',\n 'ocfweb.middleware',\n 'ocfweb.stats',\n 'ocfweb.test',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'ocfweb.middleware.errors.OcflibErrorMiddleware',\n)\n\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\nROOT_URLCONF = 'ocfweb.urls'\n\n\nclass InvalidReferenceInTemplate(str):\n \"\"\"Raise exceptions on invalid references in templates.\n\n By default Django just replaces references to undefined variables with\n empty strings. This is a horrible idea, so we instead hack it to raise an\n exception.\n \"\"\"\n\n def __mod__(self, ref):\n raise TemplateSyntaxError('Invalid reference in template: {}'.format(ref))\n\n\nTEMPLATES = [{\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.request',\n 'django.contrib.messages.context_processors.messages',\n 'ocfweb.context_processors.ocf_template_processor',\n ],\n 'string_if_invalid': InvalidReferenceInTemplate('%s'),\n },\n}]\n\nWSGI_APPLICATION = 'ocfweb.wsgi.application'\n\nDATABASES = {}\n\n# store sessions in the cache\nSESSION_ENGINE = 'django.contrib.sessions.backends.cache'\n\n# XXX: DO NOT CHANGE\n# Ensure cookies can't be read by JavaScript.\nSESSION_COOKIE_HTTPONLY = True\nSESSION_COOKIE_SECURE = False\nSESSION_COOKIE_PATH = '/'\nSESSION_COOKIE_NAME = 'OCFWEB_SESSIONID'\n\nCACHES = { # sessions are stored here\n 'default': {\n # on dev, we use a file-backed cache so that you don't get logged out\n # every time you update code and the server restarts.\n 'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',\n 'LOCATION': os.path.expanduser('~/.ocfweb-cache'),\n },\n 'TIMEOUT': 60 * 60 * 12, # 12 hours\n 'OPTIONS': {\n 'MAX_ENTRIES': 1000,\n },\n}\n\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'America/Los_Angeles'\nUSE_I18N = False\nUSE_L10N = False\nUSE_TZ = True\n\nSTATIC_URL = '/static/'\nos.environ.setdefault('OCFWEB_STATIC_ROOT', '')\nSTATIC_ROOT = os.environ['OCFWEB_STATIC_ROOT']\n\nX_FRAME_OPTIONS = 'DENY'\n\n# log exceptions to stderr\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n },\n}\n\nCELERY_BROKER = 'redis://create'\nCELERY_BACKEND = 'redis://create'\n\nif getuser() == 'ocfweb':\n # not running in development, override options from config file\n conf = configparser.ConfigParser()\n conf.read('/etc/ocfweb/ocfweb.conf')\n\n SECRET_KEY = conf.get('django', 'secret')\n DEBUG = conf.getboolean('django', 'debug')\n\n STATIC_URL = conf.get('django', 'static_url')\n STATIC_ROOT = conf.get('django', 'static_root')\n\n CELERY_BROKER = conf.get('celery', 'broker')\n CELERY_BACKEND = conf.get('celery', 'backend')\n\n # on prod, we use Redis as a cache\n CACHES['default'] = {\n 'BACKEND': 'django_redis.cache.RedisCache',\n 'LOCATION': 'redis://localhost:6379/0',\n 'OPTIONS': {\n 'CLIENT_CLASS': 'django_redis.client.DefaultClient',\n }\n }\n\n SESSION_COOKIE_SECURE = True\n SESSION_COOKIE_DOMAIN = 'www.ocf.berkeley.edu'\nelse:\n # running in development\n\n # try to read celery values used by approve\n # (only works on supernova by staff members)\n try:\n conf = configparser.ConfigParser()\n conf.read('/etc/ocf-create/ocf-create.conf')\n CELERY_BROKER = conf.get('celery', 'broker')\n CELERY_BACKEND = conf.get('celery', 'backend')\n except configparser.NoSectionError:\n pass\n", "path": "ocfweb/settings.py"}], "after_files": [{"content": "import configparser\nimport os\nfrom getpass import getuser\n\nfrom django.template.base import TemplateSyntaxError\n\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nSECRET_KEY = 'not_a_secret'\nDEBUG = True\n\nALLOWED_HOSTS = [\n 'www.ocf.berkeley.edu',\n 'dev.ocf.berkeley.edu',\n 'dev-www.ocf.berkeley.edu',\n 'ocfweb.ocf.berkeley.edu',\n]\n\nINSTALLED_APPS = (\n 'bootstrapform',\n 'django.contrib.humanize',\n 'django.contrib.messages',\n 'django.contrib.sessions',\n 'django.contrib.staticfiles',\n 'mathfilters',\n 'ocfweb',\n 'ocfweb.about',\n 'ocfweb.account',\n 'ocfweb.docs',\n 'ocfweb.login',\n 'ocfweb.main',\n 'ocfweb.middleware',\n 'ocfweb.stats',\n 'ocfweb.test',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'ocfweb.middleware.errors.OcflibErrorMiddleware',\n)\n\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\nROOT_URLCONF = 'ocfweb.urls'\n\n\nclass InvalidReferenceInTemplate(str):\n \"\"\"Raise exceptions on invalid references in templates.\n\n By default Django just replaces references to undefined variables with\n empty strings. This is a horrible idea, so we instead hack it to raise an\n exception.\n \"\"\"\n\n def __mod__(self, ref):\n raise TemplateSyntaxError('Invalid reference in template: {}'.format(ref))\n\n\nTEMPLATES = [{\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.request',\n 'django.contrib.messages.context_processors.messages',\n 'ocfweb.context_processors.ocf_template_processor',\n ],\n 'string_if_invalid': InvalidReferenceInTemplate('%s'),\n },\n}]\n\nWSGI_APPLICATION = 'ocfweb.wsgi.application'\n\nDATABASES = {}\n\n# store sessions in the cache\nSESSION_ENGINE = 'django.contrib.sessions.backends.cache'\n\n# XXX: DO NOT CHANGE\n# Ensure cookies can't be read by JavaScript or users.\n# Our proxy filters cookies starting with \"OCFWEB_\" when going to user sites,\n# so it's important our cookies match this pattern.\nCSRF_COOKIE_HTTPONLY = True\nCSRF_COOKIE_SECURE = False\nCSRF_COOKIE_PATH = '/'\nCSRF_COOKIE_NAME = 'OCFWEB_CSRF_TOKEN'\nSESSION_COOKIE_HTTPONLY = True\nSESSION_COOKIE_SECURE = False\nSESSION_COOKIE_PATH = '/'\nSESSION_COOKIE_NAME = 'OCFWEB_SESSIONID'\n\nCACHES = { # sessions are stored here\n 'default': {\n # on dev, we use a file-backed cache so that you don't get logged out\n # every time you update code and the server restarts.\n 'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',\n 'LOCATION': os.path.expanduser('~/.ocfweb-cache'),\n },\n 'TIMEOUT': 60 * 60 * 12, # 12 hours\n 'OPTIONS': {\n 'MAX_ENTRIES': 1000,\n },\n}\n\nLANGUAGE_CODE = 'en-us'\nTIME_ZONE = 'America/Los_Angeles'\nUSE_I18N = False\nUSE_L10N = False\nUSE_TZ = True\n\nSTATIC_URL = '/static/'\nos.environ.setdefault('OCFWEB_STATIC_ROOT', '')\nSTATIC_ROOT = os.environ['OCFWEB_STATIC_ROOT']\n\nX_FRAME_OPTIONS = 'DENY'\n\n# log exceptions to stderr\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n },\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),\n },\n },\n}\n\nCELERY_BROKER = 'redis://create'\nCELERY_BACKEND = 'redis://create'\n\nif getuser() == 'ocfweb':\n # not running in development, override options from config file\n conf = configparser.ConfigParser()\n conf.read('/etc/ocfweb/ocfweb.conf')\n\n SECRET_KEY = conf.get('django', 'secret')\n DEBUG = conf.getboolean('django', 'debug')\n\n STATIC_URL = conf.get('django', 'static_url')\n STATIC_ROOT = conf.get('django', 'static_root')\n\n CELERY_BROKER = conf.get('celery', 'broker')\n CELERY_BACKEND = conf.get('celery', 'backend')\n\n # on prod, we use Redis as a cache\n CACHES['default'] = {\n 'BACKEND': 'django_redis.cache.RedisCache',\n 'LOCATION': 'redis://localhost:6379/0',\n 'OPTIONS': {\n 'CLIENT_CLASS': 'django_redis.client.DefaultClient',\n }\n }\n\n CSRF_COOKIE_SECURE = True\n CSRF_COOKIE_DOMAIN = 'www.ocf.berkeley.edu'\n CSRF_TRUSTED_ORIGINS = ['www.ocf.berkeley.edu']\n SESSION_COOKIE_SECURE = True\n SESSION_COOKIE_DOMAIN = 'www.ocf.berkeley.edu'\nelse:\n # running in development\n\n # try to read celery values used by approve\n # (only works on supernova by staff members)\n try:\n conf = configparser.ConfigParser()\n conf.read('/etc/ocf-create/ocf-create.conf')\n CELERY_BROKER = conf.get('celery', 'broker')\n CELERY_BACKEND = conf.get('celery', 'backend')\n except configparser.NoSectionError:\n pass\n", "path": "ocfweb/settings.py"}]}
2,042
259
gh_patches_debug_23596
rasdani/github-patches
git_diff
rasterio__rasterio-1851
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- WarpedVRT does not permit boundless reads (sample.py) ## Expected behavior and actual behavior. ``` def test_rasterio_vrt(self): import rasterio # tmp_file default crs is UTM: CRS({'init': 'epsg:32618'} with create_tmp_geotiff() as (tmp_file, expected): with rasterio.open(tmp_file) as src: with rasterio.vrt.WarpedVRT(src, crs="epsg:4326") as vrt: expected_shape = (vrt.width, vrt.height) expected_crs = vrt.crs expected_res = vrt.res # Value of single pixel in center of image lon, lat = vrt.xy(vrt.width // 2, vrt.height // 2) > expected_val = next(vrt.sample([(lon, lat)])) test/integration/test_integration__io.py:799: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ../../../miniconda/envs/test/lib/python3.7/site-packages/rasterio/sample.py:43: in sample_gen data = read(indexes, window=window, masked=masked, boundless=True) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ > ??? E ValueError: WarpedVRT does not permit boundless reads ``` ## Operating system For example: Mac OS, Windows, Linux ## Rasterio version and provenance 1.1.1 from conda-forge --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `rasterio/sample.py` Content: ``` 1 # Workaround for issue #378. A pure Python generator. 2 3 import numpy 4 5 from rasterio.windows import Window 6 7 8 def sample_gen(dataset, xy, indexes=None, masked=False): 9 """Sample pixels from a dataset 10 11 Parameters 12 ---------- 13 dataset : rasterio Dataset 14 Opened in "r" mode. 15 xy : iterable 16 Pairs of x, y coordinates in the dataset's reference system. 17 indexes : int or list of int 18 Indexes of dataset bands to sample. 19 masked : bool, default: False 20 Whether to mask samples that fall outside the extent of the 21 dataset. 22 23 Yields 24 ------ 25 array 26 A array of length equal to the number of specified indexes 27 containing the dataset values for the bands corresponding to 28 those indexes. 29 30 """ 31 index = dataset.index 32 read = dataset.read 33 34 if isinstance(indexes, int): 35 indexes = [indexes] 36 37 for x, y in xy: 38 row_off, col_off = index(x, y) 39 # if row_off < 0 or col_off < 0: 40 # yield numpy.ones((dataset.count,), dtype=dataset.dtypes[0]) * dataset.nodata 41 # else: 42 window = Window(col_off, row_off, 1, 1) 43 data = read(indexes, window=window, masked=masked, boundless=True) 44 yield data[:, 0, 0] 45 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/rasterio/sample.py b/rasterio/sample.py --- a/rasterio/sample.py +++ b/rasterio/sample.py @@ -2,6 +2,7 @@ import numpy +from rasterio.enums import MaskFlags from rasterio.windows import Window @@ -31,14 +32,24 @@ index = dataset.index read = dataset.read - if isinstance(indexes, int): + if indexes is None: + indexes = dataset.indexes + elif isinstance(indexes, int): indexes = [indexes] for x, y in xy: + row_off, col_off = index(x, y) -# if row_off < 0 or col_off < 0: -# yield numpy.ones((dataset.count,), dtype=dataset.dtypes[0]) * dataset.nodata -# else: - window = Window(col_off, row_off, 1, 1) - data = read(indexes, window=window, masked=masked, boundless=True) - yield data[:, 0, 0] + + if row_off < 0 or col_off < 0 or row_off >= dataset.height or col_off >= dataset.width: + data = numpy.ones((len(indexes),), dtype=dataset.dtypes[0]) * (dataset.nodata or 0) + if masked: + mask = [False if MaskFlags.all_valid in dataset.mask_flag_enums[i - 1] else True for i in indexes] + yield numpy.ma.array(data, mask=mask) + else: + yield data + + else: + window = Window(col_off, row_off, 1, 1) + data = read(indexes, window=window, masked=masked) + yield data[:, 0, 0]
{"golden_diff": "diff --git a/rasterio/sample.py b/rasterio/sample.py\n--- a/rasterio/sample.py\n+++ b/rasterio/sample.py\n@@ -2,6 +2,7 @@\n \n import numpy\n \n+from rasterio.enums import MaskFlags\n from rasterio.windows import Window\n \n \n@@ -31,14 +32,24 @@\n index = dataset.index\n read = dataset.read\n \n- if isinstance(indexes, int):\n+ if indexes is None:\n+ indexes = dataset.indexes\n+ elif isinstance(indexes, int):\n indexes = [indexes]\n \n for x, y in xy:\n+\n row_off, col_off = index(x, y)\n-# if row_off < 0 or col_off < 0:\n-# yield numpy.ones((dataset.count,), dtype=dataset.dtypes[0]) * dataset.nodata\n-# else:\n- window = Window(col_off, row_off, 1, 1)\n- data = read(indexes, window=window, masked=masked, boundless=True)\n- yield data[:, 0, 0]\n+\n+ if row_off < 0 or col_off < 0 or row_off >= dataset.height or col_off >= dataset.width:\n+ data = numpy.ones((len(indexes),), dtype=dataset.dtypes[0]) * (dataset.nodata or 0)\n+ if masked:\n+ mask = [False if MaskFlags.all_valid in dataset.mask_flag_enums[i - 1] else True for i in indexes]\n+ yield numpy.ma.array(data, mask=mask)\n+ else:\n+ yield data\n+\n+ else:\n+ window = Window(col_off, row_off, 1, 1)\n+ data = read(indexes, window=window, masked=masked)\n+ yield data[:, 0, 0]\n", "issue": "WarpedVRT does not permit boundless reads (sample.py)\n## Expected behavior and actual behavior.\r\n\r\n```\r\n def test_rasterio_vrt(self):\r\n\r\n import rasterio\r\n\r\n \r\n\r\n # tmp_file default crs is UTM: CRS({'init': 'epsg:32618'}\r\n\r\n with create_tmp_geotiff() as (tmp_file, expected):\r\n\r\n with rasterio.open(tmp_file) as src:\r\n\r\n with rasterio.vrt.WarpedVRT(src, crs=\"epsg:4326\") as vrt:\r\n\r\n expected_shape = (vrt.width, vrt.height)\r\n\r\n expected_crs = vrt.crs\r\n\r\n expected_res = vrt.res\r\n\r\n # Value of single pixel in center of image\r\n\r\n lon, lat = vrt.xy(vrt.width // 2, vrt.height // 2)\r\n\r\n> expected_val = next(vrt.sample([(lon, lat)]))\r\n\r\ntest/integration/test_integration__io.py:799: \r\n\r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \r\n\r\n../../../miniconda/envs/test/lib/python3.7/site-packages/rasterio/sample.py:43: in sample_gen\r\n\r\n data = read(indexes, window=window, masked=masked, boundless=True)\r\n\r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \r\n\r\n> ???\r\n\r\nE ValueError: WarpedVRT does not permit boundless reads\r\n```\r\n\r\n## Operating system\r\n\r\nFor example: Mac OS, Windows, Linux\r\n\r\n## Rasterio version and provenance\r\n\r\n1.1.1 from conda-forge\n", "before_files": [{"content": "# Workaround for issue #378. A pure Python generator.\n\nimport numpy\n\nfrom rasterio.windows import Window\n\n\ndef sample_gen(dataset, xy, indexes=None, masked=False):\n \"\"\"Sample pixels from a dataset\n\n Parameters\n ----------\n dataset : rasterio Dataset\n Opened in \"r\" mode.\n xy : iterable\n Pairs of x, y coordinates in the dataset's reference system.\n indexes : int or list of int\n Indexes of dataset bands to sample.\n masked : bool, default: False\n Whether to mask samples that fall outside the extent of the\n dataset.\n\n Yields\n ------\n array\n A array of length equal to the number of specified indexes\n containing the dataset values for the bands corresponding to\n those indexes.\n\n \"\"\"\n index = dataset.index\n read = dataset.read\n\n if isinstance(indexes, int):\n indexes = [indexes]\n\n for x, y in xy:\n row_off, col_off = index(x, y)\n# if row_off < 0 or col_off < 0:\n# yield numpy.ones((dataset.count,), dtype=dataset.dtypes[0]) * dataset.nodata\n# else:\n window = Window(col_off, row_off, 1, 1)\n data = read(indexes, window=window, masked=masked, boundless=True)\n yield data[:, 0, 0]\n", "path": "rasterio/sample.py"}], "after_files": [{"content": "# Workaround for issue #378. A pure Python generator.\n\nimport numpy\n\nfrom rasterio.enums import MaskFlags\nfrom rasterio.windows import Window\n\n\ndef sample_gen(dataset, xy, indexes=None, masked=False):\n \"\"\"Sample pixels from a dataset\n\n Parameters\n ----------\n dataset : rasterio Dataset\n Opened in \"r\" mode.\n xy : iterable\n Pairs of x, y coordinates in the dataset's reference system.\n indexes : int or list of int\n Indexes of dataset bands to sample.\n masked : bool, default: False\n Whether to mask samples that fall outside the extent of the\n dataset.\n\n Yields\n ------\n array\n A array of length equal to the number of specified indexes\n containing the dataset values for the bands corresponding to\n those indexes.\n\n \"\"\"\n index = dataset.index\n read = dataset.read\n\n if indexes is None:\n indexes = dataset.indexes\n elif isinstance(indexes, int):\n indexes = [indexes]\n\n for x, y in xy:\n\n row_off, col_off = index(x, y)\n\n if row_off < 0 or col_off < 0 or row_off >= dataset.height or col_off >= dataset.width:\n data = numpy.ones((len(indexes),), dtype=dataset.dtypes[0]) * (dataset.nodata or 0)\n if masked:\n mask = [False if MaskFlags.all_valid in dataset.mask_flag_enums[i - 1] else True for i in indexes]\n yield numpy.ma.array(data, mask=mask)\n else:\n yield data\n\n else:\n window = Window(col_off, row_off, 1, 1)\n data = read(indexes, window=window, masked=masked)\n yield data[:, 0, 0]\n", "path": "rasterio/sample.py"}]}
1,043
403
gh_patches_debug_38933
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-2802
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Spider anthonys_restaurants is broken During the global build at 2021-06-16-14-42-20, spider **anthonys_restaurants** failed with **0 features** and **0 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-06-16-14-42-20/logs/anthonys_restaurants.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-16-14-42-20/output/anthonys_restaurants.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-16-14-42-20/output/anthonys_restaurants.geojson)) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `locations/spiders/anthonys_restaurants.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 import scrapy 3 from locations.items import GeojsonPointItem 4 5 6 class AnthonysRestaurantsSpiders(scrapy.Spider): 7 name = "anthonys_restaurants" 8 item_attributes = { 'brand': "Anthony's" } 9 allowed_domains = ["www.anthonys.com"] 10 start_urls = ( 11 'https://www.anthonys.com/restaurants/search/47.6062095/-122.3320708/2000', 12 ) 13 14 def parse(self, response): 15 for match in response.xpath("//markers/marker"): 16 fullAddress=match.xpath('.//@address').extract_first().replace('<br />', ',') 17 18 # Accounts for cases with second address line 19 if(len(fullAddress.split(",")) == 4): 20 cityString = fullAddress.split(",")[2].strip() 21 stateString = fullAddress.split(",")[3].strip().split(" ")[0].strip() 22 postString = fullAddress.split(",")[3].strip().split(" ")[1].strip() 23 addrLineOne = fullAddress.split(",")[0].strip() 24 addrLineTwo = fullAddress.split(",")[1].strip() 25 addrString = addrLineOne + ", " + addrLineTwo 26 else: 27 cityString = fullAddress.split(",")[1].strip() 28 stateString = fullAddress.split(",")[2].strip().split(" ")[0].strip() 29 postString = fullAddress.split(",")[2].strip().split(" ")[1].strip() 30 addrString = fullAddress.split(",")[0] 31 32 yield GeojsonPointItem( 33 ref=match.xpath('.//@title').extract_first().strip(), 34 lat=match.xpath('.//@lat').extract_first().strip(), 35 lon=match.xpath('.//@lng').extract_first().strip(), 36 addr_full=addrString, 37 city=cityString, 38 state=stateString, 39 postcode=postString, 40 phone=match.xpath('.//@phone').extract_first().replace(" ", ""), 41 ) 42 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/locations/spiders/anthonys_restaurants.py b/locations/spiders/anthonys_restaurants.py --- a/locations/spiders/anthonys_restaurants.py +++ b/locations/spiders/anthonys_restaurants.py @@ -1,41 +1,49 @@ # -*- coding: utf-8 -*- +import json +import re + import scrapy + from locations.items import GeojsonPointItem class AnthonysRestaurantsSpiders(scrapy.Spider): name = "anthonys_restaurants" - item_attributes = { 'brand': "Anthony's" } + item_attributes = {"brand": "Anthony's"} allowed_domains = ["www.anthonys.com"] - start_urls = ( - 'https://www.anthonys.com/restaurants/search/47.6062095/-122.3320708/2000', - ) + start_urls = ("https://www.anthonys.com/restaurants/",) def parse(self, response): - for match in response.xpath("//markers/marker"): - fullAddress=match.xpath('.//@address').extract_first().replace('<br />', ',') - - # Accounts for cases with second address line - if(len(fullAddress.split(",")) == 4): - cityString = fullAddress.split(",")[2].strip() - stateString = fullAddress.split(",")[3].strip().split(" ")[0].strip() - postString = fullAddress.split(",")[3].strip().split(" ")[1].strip() - addrLineOne = fullAddress.split(",")[0].strip() - addrLineTwo = fullAddress.split(",")[1].strip() - addrString = addrLineOne + ", " + addrLineTwo - else: - cityString = fullAddress.split(",")[1].strip() - stateString = fullAddress.split(",")[2].strip().split(" ")[0].strip() - postString = fullAddress.split(",")[2].strip().split(" ")[1].strip() - addrString = fullAddress.split(",")[0] - - yield GeojsonPointItem( - ref=match.xpath('.//@title').extract_first().strip(), - lat=match.xpath('.//@lat').extract_first().strip(), - lon=match.xpath('.//@lng').extract_first().strip(), - addr_full=addrString, - city=cityString, - state=stateString, - postcode=postString, - phone=match.xpath('.//@phone').extract_first().replace(" ", ""), - ) + script = response.css("#acf-block-locations-map-script-js-extra::text").get() + j = json.loads(script[script.find("{") : 1 + script.rfind("}")]) + for row in j["restaurants"]: + meta = {"json": row} + yield scrapy.Request(row["link"], meta=meta, callback=self.parse_location) + + def parse_location(self, response): + json_data = response.meta["json"] + address = json_data["address"] + # decode entities + name = scrapy.Selector(text=json_data["name"]).xpath("//text()").get() + + # These are weird enough that there's no hope of parsing them, but + # clean the text up + hours = response.xpath('//strong[text()="Hours:"]/../text()').extract() + hours = ';'.join(s.strip().replace('\xa0', ' ') for s in hours) + + properties = { + "ref": re.search(r"postid-(\d+)", response.css("body").attrib["class"])[1], + "lat": address["latitude"], + "lon": address["longitude"], + "addr_full": address["address"], + "city": address["city"], + "state": address["state"], + "postcode": address["zip_code"], + "name": name, + "website": response.url, + "phone": ( + response.xpath("//*[starts-with(@href, 'tel:')]/@href").get() or "" + )[4:], + "opening_hours": hours, + } + return GeojsonPointItem(**properties)
{"golden_diff": "diff --git a/locations/spiders/anthonys_restaurants.py b/locations/spiders/anthonys_restaurants.py\n--- a/locations/spiders/anthonys_restaurants.py\n+++ b/locations/spiders/anthonys_restaurants.py\n@@ -1,41 +1,49 @@\n # -*- coding: utf-8 -*-\n+import json\n+import re\n+\n import scrapy\n+\n from locations.items import GeojsonPointItem\n \n \n class AnthonysRestaurantsSpiders(scrapy.Spider):\n name = \"anthonys_restaurants\"\n- item_attributes = { 'brand': \"Anthony's\" }\n+ item_attributes = {\"brand\": \"Anthony's\"}\n allowed_domains = [\"www.anthonys.com\"]\n- start_urls = (\n- 'https://www.anthonys.com/restaurants/search/47.6062095/-122.3320708/2000',\n- )\n+ start_urls = (\"https://www.anthonys.com/restaurants/\",)\n \n def parse(self, response):\n- for match in response.xpath(\"//markers/marker\"):\n- fullAddress=match.xpath('.//@address').extract_first().replace('<br />', ',')\n-\n- # Accounts for cases with second address line\n- if(len(fullAddress.split(\",\")) == 4):\n- cityString = fullAddress.split(\",\")[2].strip()\n- stateString = fullAddress.split(\",\")[3].strip().split(\" \")[0].strip()\n- postString = fullAddress.split(\",\")[3].strip().split(\" \")[1].strip()\n- addrLineOne = fullAddress.split(\",\")[0].strip()\n- addrLineTwo = fullAddress.split(\",\")[1].strip()\n- addrString = addrLineOne + \", \" + addrLineTwo\n- else:\n- cityString = fullAddress.split(\",\")[1].strip()\n- stateString = fullAddress.split(\",\")[2].strip().split(\" \")[0].strip()\n- postString = fullAddress.split(\",\")[2].strip().split(\" \")[1].strip()\n- addrString = fullAddress.split(\",\")[0]\n-\n- yield GeojsonPointItem(\n- ref=match.xpath('.//@title').extract_first().strip(),\n- lat=match.xpath('.//@lat').extract_first().strip(),\n- lon=match.xpath('.//@lng').extract_first().strip(),\n- addr_full=addrString,\n- city=cityString,\n- state=stateString,\n- postcode=postString,\n- phone=match.xpath('.//@phone').extract_first().replace(\" \", \"\"),\n- )\n+ script = response.css(\"#acf-block-locations-map-script-js-extra::text\").get()\n+ j = json.loads(script[script.find(\"{\") : 1 + script.rfind(\"}\")])\n+ for row in j[\"restaurants\"]:\n+ meta = {\"json\": row}\n+ yield scrapy.Request(row[\"link\"], meta=meta, callback=self.parse_location)\n+\n+ def parse_location(self, response):\n+ json_data = response.meta[\"json\"]\n+ address = json_data[\"address\"]\n+ # decode entities\n+ name = scrapy.Selector(text=json_data[\"name\"]).xpath(\"//text()\").get()\n+\n+ # These are weird enough that there's no hope of parsing them, but\n+ # clean the text up\n+ hours = response.xpath('//strong[text()=\"Hours:\"]/../text()').extract()\n+ hours = ';'.join(s.strip().replace('\\xa0', ' ') for s in hours)\n+\n+ properties = {\n+ \"ref\": re.search(r\"postid-(\\d+)\", response.css(\"body\").attrib[\"class\"])[1],\n+ \"lat\": address[\"latitude\"],\n+ \"lon\": address[\"longitude\"],\n+ \"addr_full\": address[\"address\"],\n+ \"city\": address[\"city\"],\n+ \"state\": address[\"state\"],\n+ \"postcode\": address[\"zip_code\"],\n+ \"name\": name,\n+ \"website\": response.url,\n+ \"phone\": (\n+ response.xpath(\"//*[starts-with(@href, 'tel:')]/@href\").get() or \"\"\n+ )[4:],\n+ \"opening_hours\": hours,\n+ }\n+ return GeojsonPointItem(**properties)\n", "issue": "Spider anthonys_restaurants is broken\nDuring the global build at 2021-06-16-14-42-20, spider **anthonys_restaurants** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-06-16-14-42-20/logs/anthonys_restaurants.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-16-14-42-20/output/anthonys_restaurants.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-16-14-42-20/output/anthonys_restaurants.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom locations.items import GeojsonPointItem\n\n\nclass AnthonysRestaurantsSpiders(scrapy.Spider):\n name = \"anthonys_restaurants\"\n item_attributes = { 'brand': \"Anthony's\" }\n allowed_domains = [\"www.anthonys.com\"]\n start_urls = (\n 'https://www.anthonys.com/restaurants/search/47.6062095/-122.3320708/2000',\n )\n\n def parse(self, response):\n for match in response.xpath(\"//markers/marker\"):\n fullAddress=match.xpath('.//@address').extract_first().replace('<br />', ',')\n\n # Accounts for cases with second address line\n if(len(fullAddress.split(\",\")) == 4):\n cityString = fullAddress.split(\",\")[2].strip()\n stateString = fullAddress.split(\",\")[3].strip().split(\" \")[0].strip()\n postString = fullAddress.split(\",\")[3].strip().split(\" \")[1].strip()\n addrLineOne = fullAddress.split(\",\")[0].strip()\n addrLineTwo = fullAddress.split(\",\")[1].strip()\n addrString = addrLineOne + \", \" + addrLineTwo\n else:\n cityString = fullAddress.split(\",\")[1].strip()\n stateString = fullAddress.split(\",\")[2].strip().split(\" \")[0].strip()\n postString = fullAddress.split(\",\")[2].strip().split(\" \")[1].strip()\n addrString = fullAddress.split(\",\")[0]\n\n yield GeojsonPointItem(\n ref=match.xpath('.//@title').extract_first().strip(),\n lat=match.xpath('.//@lat').extract_first().strip(),\n lon=match.xpath('.//@lng').extract_first().strip(),\n addr_full=addrString,\n city=cityString,\n state=stateString,\n postcode=postString,\n phone=match.xpath('.//@phone').extract_first().replace(\" \", \"\"),\n )\n", "path": "locations/spiders/anthonys_restaurants.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport re\n\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\n\n\nclass AnthonysRestaurantsSpiders(scrapy.Spider):\n name = \"anthonys_restaurants\"\n item_attributes = {\"brand\": \"Anthony's\"}\n allowed_domains = [\"www.anthonys.com\"]\n start_urls = (\"https://www.anthonys.com/restaurants/\",)\n\n def parse(self, response):\n script = response.css(\"#acf-block-locations-map-script-js-extra::text\").get()\n j = json.loads(script[script.find(\"{\") : 1 + script.rfind(\"}\")])\n for row in j[\"restaurants\"]:\n meta = {\"json\": row}\n yield scrapy.Request(row[\"link\"], meta=meta, callback=self.parse_location)\n\n def parse_location(self, response):\n json_data = response.meta[\"json\"]\n address = json_data[\"address\"]\n # decode entities\n name = scrapy.Selector(text=json_data[\"name\"]).xpath(\"//text()\").get()\n\n # These are weird enough that there's no hope of parsing them, but\n # clean the text up\n hours = response.xpath('//strong[text()=\"Hours:\"]/../text()').extract()\n hours = ';'.join(s.strip().replace('\\xa0', ' ') for s in hours)\n\n properties = {\n \"ref\": re.search(r\"postid-(\\d+)\", response.css(\"body\").attrib[\"class\"])[1],\n \"lat\": address[\"latitude\"],\n \"lon\": address[\"longitude\"],\n \"addr_full\": address[\"address\"],\n \"city\": address[\"city\"],\n \"state\": address[\"state\"],\n \"postcode\": address[\"zip_code\"],\n \"name\": name,\n \"website\": response.url,\n \"phone\": (\n response.xpath(\"//*[starts-with(@href, 'tel:')]/@href\").get() or \"\"\n )[4:],\n \"opening_hours\": hours,\n }\n return GeojsonPointItem(**properties)\n", "path": "locations/spiders/anthonys_restaurants.py"}]}
967
927
gh_patches_debug_11635
rasdani/github-patches
git_diff
hpcaitech__ColossalAI-4320
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [BUG]: Multi-rank on same device ### 🐛 Describe the bug When I use colossalai CLI with 2 node, I got an error "rank 8 and rank 0 both on CUDA device d000" I have examined my scripts and command. And torchrun works well. The error msg is: ``` Error: failed to run torchrun --nproc_per_node=8 --nnodes=2 --node_rank=0 --master_addr=192.168.0.64 --master_port=29500 benchmark.py -c 7b --plugin zero --zero 1 -l 2048 -g -b 10 on 192.168.0.64, is localhost: True, exception: I/O operation on closed file Error: failed to run torchrun --nproc_per_node=8 --nnodes=2 --node_rank=1 --master_addr=192.168.0.64 --master_port=29500 benchmark.py -c 7b --plugin zero --zero 1 -l 2048 -g -b 10 on 192.168.0.189, is localhost: True, exception: I/O operation on closed file ``` ### Environment _No response_ [tensor] fix some unittests [tensor] fix some unittests --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `colossalai/cli/launcher/hostinfo.py` Content: ``` 1 import socket 2 from typing import List 3 4 5 class HostInfo: 6 """ 7 A data class to store host connection-related data. 8 9 Args: 10 hostname (str): name or IP address of the host 11 port (str): the port for ssh connection 12 """ 13 14 def __init__( 15 self, 16 hostname: str, 17 port: str = None, 18 ): 19 self.hostname = hostname 20 self.port = port 21 self.is_local_host = HostInfo.is_host_localhost(hostname, port) 22 23 @staticmethod 24 def is_host_localhost(hostname: str, port: str = None) -> None: 25 """ 26 Check if the host refers to the local machine. 27 28 Args: 29 hostname (str): name or IP address of the host 30 port (str): the port for ssh connection 31 32 Returns: 33 bool: True if it is local, False otherwise 34 """ 35 36 if port is None: 37 port = 22 # no port specified, lets just use the ssh port 38 39 # socket.getfqdn("127.0.0.1") does not return localhost 40 # on some users' machines 41 # thus, we directly return True if hostname is localhost, 127.0.0.1 or 0.0.0.0 42 if hostname in ("localhost", "127.0.0.1", "0.0.0.0"): 43 return True 44 45 hostname = socket.getfqdn(hostname) 46 localhost = socket.gethostname() 47 localaddrs = socket.getaddrinfo(localhost, port) 48 targetaddrs = socket.getaddrinfo(hostname, port) 49 for (family, socktype, proto, canonname, sockaddr) in localaddrs: 50 for (rfamily, rsocktype, rproto, rcanonname, rsockaddr) in targetaddrs: 51 if rsockaddr[0] == sockaddr[0]: 52 return True 53 return False 54 55 def __str__(self): 56 return f'hostname: {self.hostname}, port: {self.port}' 57 58 def __repr__(self): 59 return self.__str__() 60 61 62 class HostInfoList: 63 """ 64 A data class to store a list of HostInfo objects. 65 """ 66 67 def __init__(self): 68 self.hostinfo_list = [] 69 70 def append(self, hostinfo: HostInfo) -> None: 71 """ 72 Add an HostInfo object to the list. 73 74 Args: 75 hostinfo (HostInfo): host information 76 """ 77 78 self.hostinfo_list.append(hostinfo) 79 80 def remove(self, hostname: str) -> None: 81 """ 82 Add an HostInfo object to the list. 83 84 Args: 85 hostname (str): the name of the host 86 """ 87 88 hostinfo = self.get_hostinfo(hostname) 89 self.hostinfo_list.remove(hostinfo) 90 91 def get_hostinfo(self, hostname: str) -> HostInfo: 92 """ 93 Return the HostInfo object which matches with the hostname. 94 95 Args: 96 hostname (str): the name of the host 97 98 Returns: 99 hostinfo (HostInfo): the HostInfo object which matches with the hostname 100 """ 101 102 for hostinfo in self.hostinfo_list: 103 if hostinfo.hostname == hostname: 104 return hostinfo 105 106 raise Exception(f"Hostname {hostname} is not found") 107 108 def has(self, hostname: str) -> bool: 109 """ 110 Check if the hostname has been added. 111 112 Args: 113 hostname (str): the name of the host 114 115 Returns: 116 bool: True if added, False otherwise 117 """ 118 for hostinfo in self.hostinfo_list: 119 if hostinfo.hostname == hostname: 120 return True 121 return False 122 123 def __iter__(self): 124 return iter(self.hostinfo_list) 125 126 def __len__(self): 127 return len(self.hostinfo_list) 128 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/colossalai/cli/launcher/hostinfo.py b/colossalai/cli/launcher/hostinfo.py --- a/colossalai/cli/launcher/hostinfo.py +++ b/colossalai/cli/launcher/hostinfo.py @@ -46,11 +46,8 @@ localhost = socket.gethostname() localaddrs = socket.getaddrinfo(localhost, port) targetaddrs = socket.getaddrinfo(hostname, port) - for (family, socktype, proto, canonname, sockaddr) in localaddrs: - for (rfamily, rsocktype, rproto, rcanonname, rsockaddr) in targetaddrs: - if rsockaddr[0] == sockaddr[0]: - return True - return False + + return localaddrs == targetaddrs def __str__(self): return f'hostname: {self.hostname}, port: {self.port}'
{"golden_diff": "diff --git a/colossalai/cli/launcher/hostinfo.py b/colossalai/cli/launcher/hostinfo.py\n--- a/colossalai/cli/launcher/hostinfo.py\n+++ b/colossalai/cli/launcher/hostinfo.py\n@@ -46,11 +46,8 @@\n localhost = socket.gethostname()\n localaddrs = socket.getaddrinfo(localhost, port)\n targetaddrs = socket.getaddrinfo(hostname, port)\n- for (family, socktype, proto, canonname, sockaddr) in localaddrs:\n- for (rfamily, rsocktype, rproto, rcanonname, rsockaddr) in targetaddrs:\n- if rsockaddr[0] == sockaddr[0]:\n- return True\n- return False\n+\n+ return localaddrs == targetaddrs\n \n def __str__(self):\n return f'hostname: {self.hostname}, port: {self.port}'\n", "issue": "[BUG]: Multi-rank on same device\n### \ud83d\udc1b Describe the bug\n\nWhen I use colossalai CLI with 2 node, I got an error \"rank 8 and rank 0 both on CUDA device d000\"\r\nI have examined my scripts and command. And torchrun works well.\r\n\r\nThe error msg is:\r\n```\r\nError: failed to run torchrun --nproc_per_node=8 --nnodes=2 --node_rank=0 --master_addr=192.168.0.64 --master_port=29500 benchmark.py -c 7b --plugin zero --zero 1 -l 2048 -g -b 10 on 192.168.0.64, is localhost: True, exception: I/O operation on closed file\r\nError: failed to run torchrun --nproc_per_node=8 --nnodes=2 --node_rank=1 --master_addr=192.168.0.64 --master_port=29500 benchmark.py -c 7b --plugin zero --zero 1 -l 2048 -g -b 10 on 192.168.0.189, is localhost: True, exception: I/O operation on closed file\r\n```\r\n\n\n### Environment\n\n_No response_\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "import socket\nfrom typing import List\n\n\nclass HostInfo:\n \"\"\"\n A data class to store host connection-related data.\n\n Args:\n hostname (str): name or IP address of the host\n port (str): the port for ssh connection\n \"\"\"\n\n def __init__(\n self,\n hostname: str,\n port: str = None,\n ):\n self.hostname = hostname\n self.port = port\n self.is_local_host = HostInfo.is_host_localhost(hostname, port)\n\n @staticmethod\n def is_host_localhost(hostname: str, port: str = None) -> None:\n \"\"\"\n Check if the host refers to the local machine.\n\n Args:\n hostname (str): name or IP address of the host\n port (str): the port for ssh connection\n\n Returns:\n bool: True if it is local, False otherwise\n \"\"\"\n\n if port is None:\n port = 22 # no port specified, lets just use the ssh port\n\n # socket.getfqdn(\"127.0.0.1\") does not return localhost\n # on some users' machines\n # thus, we directly return True if hostname is localhost, 127.0.0.1 or 0.0.0.0\n if hostname in (\"localhost\", \"127.0.0.1\", \"0.0.0.0\"):\n return True\n\n hostname = socket.getfqdn(hostname)\n localhost = socket.gethostname()\n localaddrs = socket.getaddrinfo(localhost, port)\n targetaddrs = socket.getaddrinfo(hostname, port)\n for (family, socktype, proto, canonname, sockaddr) in localaddrs:\n for (rfamily, rsocktype, rproto, rcanonname, rsockaddr) in targetaddrs:\n if rsockaddr[0] == sockaddr[0]:\n return True\n return False\n\n def __str__(self):\n return f'hostname: {self.hostname}, port: {self.port}'\n\n def __repr__(self):\n return self.__str__()\n\n\nclass HostInfoList:\n \"\"\"\n A data class to store a list of HostInfo objects.\n \"\"\"\n\n def __init__(self):\n self.hostinfo_list = []\n\n def append(self, hostinfo: HostInfo) -> None:\n \"\"\"\n Add an HostInfo object to the list.\n\n Args:\n hostinfo (HostInfo): host information\n \"\"\"\n\n self.hostinfo_list.append(hostinfo)\n\n def remove(self, hostname: str) -> None:\n \"\"\"\n Add an HostInfo object to the list.\n\n Args:\n hostname (str): the name of the host\n \"\"\"\n\n hostinfo = self.get_hostinfo(hostname)\n self.hostinfo_list.remove(hostinfo)\n\n def get_hostinfo(self, hostname: str) -> HostInfo:\n \"\"\"\n Return the HostInfo object which matches with the hostname.\n\n Args:\n hostname (str): the name of the host\n\n Returns:\n hostinfo (HostInfo): the HostInfo object which matches with the hostname\n \"\"\"\n\n for hostinfo in self.hostinfo_list:\n if hostinfo.hostname == hostname:\n return hostinfo\n\n raise Exception(f\"Hostname {hostname} is not found\")\n\n def has(self, hostname: str) -> bool:\n \"\"\"\n Check if the hostname has been added.\n\n Args:\n hostname (str): the name of the host\n\n Returns:\n bool: True if added, False otherwise\n \"\"\"\n for hostinfo in self.hostinfo_list:\n if hostinfo.hostname == hostname:\n return True\n return False\n\n def __iter__(self):\n return iter(self.hostinfo_list)\n\n def __len__(self):\n return len(self.hostinfo_list)\n", "path": "colossalai/cli/launcher/hostinfo.py"}], "after_files": [{"content": "import socket\nfrom typing import List\n\n\nclass HostInfo:\n \"\"\"\n A data class to store host connection-related data.\n\n Args:\n hostname (str): name or IP address of the host\n port (str): the port for ssh connection\n \"\"\"\n\n def __init__(\n self,\n hostname: str,\n port: str = None,\n ):\n self.hostname = hostname\n self.port = port\n self.is_local_host = HostInfo.is_host_localhost(hostname, port)\n\n @staticmethod\n def is_host_localhost(hostname: str, port: str = None) -> None:\n \"\"\"\n Check if the host refers to the local machine.\n\n Args:\n hostname (str): name or IP address of the host\n port (str): the port for ssh connection\n\n Returns:\n bool: True if it is local, False otherwise\n \"\"\"\n\n if port is None:\n port = 22 # no port specified, lets just use the ssh port\n\n # socket.getfqdn(\"127.0.0.1\") does not return localhost\n # on some users' machines\n # thus, we directly return True if hostname is localhost, 127.0.0.1 or 0.0.0.0\n if hostname in (\"localhost\", \"127.0.0.1\", \"0.0.0.0\"):\n return True\n\n hostname = socket.getfqdn(hostname)\n localhost = socket.gethostname()\n localaddrs = socket.getaddrinfo(localhost, port)\n targetaddrs = socket.getaddrinfo(hostname, port)\n\n return localaddrs == targetaddrs\n\n def __str__(self):\n return f'hostname: {self.hostname}, port: {self.port}'\n\n def __repr__(self):\n return self.__str__()\n\n\nclass HostInfoList:\n \"\"\"\n A data class to store a list of HostInfo objects.\n \"\"\"\n\n def __init__(self):\n self.hostinfo_list = []\n\n def append(self, hostinfo: HostInfo) -> None:\n \"\"\"\n Add an HostInfo object to the list.\n\n Args:\n hostinfo (HostInfo): host information\n \"\"\"\n\n self.hostinfo_list.append(hostinfo)\n\n def remove(self, hostname: str) -> None:\n \"\"\"\n Add an HostInfo object to the list.\n\n Args:\n hostname (str): the name of the host\n \"\"\"\n\n hostinfo = self.get_hostinfo(hostname)\n self.hostinfo_list.remove(hostinfo)\n\n def get_hostinfo(self, hostname: str) -> HostInfo:\n \"\"\"\n Return the HostInfo object which matches with the hostname.\n\n Args:\n hostname (str): the name of the host\n\n Returns:\n hostinfo (HostInfo): the HostInfo object which matches with the hostname\n \"\"\"\n\n for hostinfo in self.hostinfo_list:\n if hostinfo.hostname == hostname:\n return hostinfo\n\n raise Exception(f\"Hostname {hostname} is not found\")\n\n def has(self, hostname: str) -> bool:\n \"\"\"\n Check if the hostname has been added.\n\n Args:\n hostname (str): the name of the host\n\n Returns:\n bool: True if added, False otherwise\n \"\"\"\n for hostinfo in self.hostinfo_list:\n if hostinfo.hostname == hostname:\n return True\n return False\n\n def __iter__(self):\n return iter(self.hostinfo_list)\n\n def __len__(self):\n return len(self.hostinfo_list)\n", "path": "colossalai/cli/launcher/hostinfo.py"}]}
1,683
207
gh_patches_debug_891
rasdani/github-patches
git_diff
openvinotoolkit__datumaro-743
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Wrong annotated return type in Registry class https://github.com/openvinotoolkit/datumaro/blob/0d4a73d3bbe3a93585af7a0148a0e344fd1106b3/datumaro/components/environment.py#L41-L42 In the referenced code the return type of the method appears to be wrong. Either it should be `Iterator[str]` since iteration over a dict returns its keys which are of type `str` or the return statement should be `return iter(self.items.values())`. When using the library with static type checkers this annotation causes type check errors. When removing the annotation, type checkers correctly infer the type `Iterator[str]`. Wrong annotated return type in Registry class https://github.com/openvinotoolkit/datumaro/blob/0d4a73d3bbe3a93585af7a0148a0e344fd1106b3/datumaro/components/environment.py#L41-L42 In the referenced code the return type of the method appears to be wrong. Either it should be `Iterator[str]` since iteration over a dict returns its keys which are of type `str` or the return statement should be `return iter(self.items.values())`. When using the library with static type checkers this annotation causes type check errors. When removing the annotation, type checkers correctly infer the type `Iterator[str]`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `datumaro/components/environment.py` Content: ``` 1 # Copyright (C) 2020-2022 Intel Corporation 2 # 3 # SPDX-License-Identifier: MIT 4 5 import glob 6 import importlib 7 import logging as log 8 import os.path as osp 9 from functools import partial 10 from inspect import isclass 11 from typing import Callable, Dict, Generic, Iterable, Iterator, List, Optional, Type, TypeVar 12 13 from datumaro.components.cli_plugin import CliPlugin, plugin_types 14 from datumaro.components.format_detection import RejectionReason, detect_dataset_format 15 from datumaro.util.os_util import import_foreign_module, split_path 16 17 T = TypeVar("T") 18 19 20 class Registry(Generic[T]): 21 def __init__(self): 22 self.items: Dict[str, T] = {} 23 24 def register(self, name: str, value: T) -> T: 25 self.items[name] = value 26 return value 27 28 def unregister(self, name: str) -> Optional[T]: 29 return self.items.pop(name, None) 30 31 def get(self, key: str): 32 """Returns a class or a factory function""" 33 return self.items[key] 34 35 def __getitem__(self, key: str) -> T: 36 return self.get(key) 37 38 def __contains__(self, key) -> bool: 39 return key in self.items 40 41 def __iter__(self) -> Iterator[T]: 42 return iter(self.items) 43 44 45 class PluginRegistry(Registry[Type[CliPlugin]]): 46 def __init__( 47 self, filter: Callable[[Type[CliPlugin]], bool] = None 48 ): # pylint: disable=redefined-builtin 49 super().__init__() 50 self._filter = filter 51 52 def batch_register(self, values: Iterable[CliPlugin]): 53 for v in values: 54 if self._filter and not self._filter(v): 55 continue 56 57 self.register(v.NAME, v) 58 59 60 class Environment: 61 _builtin_plugins = None 62 63 @classmethod 64 def _make_filter(cls, accept, skip=None): 65 accept = (accept,) if isclass(accept) else tuple(accept) 66 skip = {skip} if isclass(skip) else set(skip or []) 67 skip = tuple(skip | set(accept)) 68 return partial(cls._check_type, accept=accept, skip=skip) 69 70 @staticmethod 71 def _check_type(t, *, accept, skip): 72 return issubclass(t, accept) and t not in skip 73 74 def __init__(self): 75 from datumaro.components.converter import Converter 76 from datumaro.components.dataset_generator import DatasetGenerator 77 from datumaro.components.extractor import ( 78 Extractor, 79 Importer, 80 ItemTransform, 81 SourceExtractor, 82 Transform, 83 ) 84 from datumaro.components.launcher import Launcher 85 from datumaro.components.validator import Validator 86 87 _filter = self._make_filter 88 self._extractors = PluginRegistry(_filter(Extractor, skip=SourceExtractor)) 89 self._importers = PluginRegistry(_filter(Importer)) 90 self._launchers = PluginRegistry(_filter(Launcher)) 91 self._converters = PluginRegistry(_filter(Converter)) 92 self._generators = PluginRegistry(_filter(DatasetGenerator)) 93 self._transforms = PluginRegistry(_filter(Transform, skip=ItemTransform)) 94 self._validators = PluginRegistry(_filter(Validator)) 95 self._builtins_initialized = False 96 97 def _get_plugin_registry(self, name): 98 if not self._builtins_initialized: 99 self._builtins_initialized = True 100 self._register_builtin_plugins() 101 return getattr(self, name) 102 103 @property 104 def extractors(self) -> PluginRegistry: 105 return self._get_plugin_registry("_extractors") 106 107 @property 108 def importers(self) -> PluginRegistry: 109 return self._get_plugin_registry("_importers") 110 111 @property 112 def launchers(self) -> PluginRegistry: 113 return self._get_plugin_registry("_launchers") 114 115 @property 116 def converters(self) -> PluginRegistry: 117 return self._get_plugin_registry("_converters") 118 119 @property 120 def generators(self) -> PluginRegistry: 121 return self._get_plugin_registry("_generators") 122 123 @property 124 def transforms(self) -> PluginRegistry: 125 return self._get_plugin_registry("_transforms") 126 127 @property 128 def validators(self) -> PluginRegistry: 129 return self._get_plugin_registry("_validators") 130 131 @staticmethod 132 def _find_plugins(plugins_dir): 133 plugins = [] 134 135 for pattern in ("*.py", "*/*.py"): 136 for path in glob.glob(osp.join(glob.escape(plugins_dir), pattern)): 137 if not osp.isfile(path): 138 continue 139 140 path_rel = osp.relpath(path, plugins_dir) 141 name_parts = split_path(osp.splitext(path_rel)[0]) 142 143 # a module with a dot in the name won't load correctly 144 if any("." in part for part in name_parts): 145 log.warning( 146 "Python file '%s' in directory '%s' can't be imported " 147 "due to a dot in the name; skipping.", 148 path_rel, 149 plugins_dir, 150 ) 151 continue 152 plugins.append(".".join(name_parts)) 153 154 return plugins 155 156 @classmethod 157 def _get_plugin_exports(cls, module, types): 158 exports = [] 159 if hasattr(module, "exports"): 160 exports = module.exports 161 else: 162 for symbol in dir(module): 163 if symbol.startswith("_"): 164 continue 165 exports.append(getattr(module, symbol)) 166 167 exports = [s for s in exports if isclass(s) and issubclass(s, types) and not s in types] 168 169 return exports 170 171 @classmethod 172 def _load_plugins(cls, module_names, *, importer, types=None): 173 types = tuple(types or plugin_types()) 174 175 all_exports = [] 176 for module_name in module_names: 177 try: 178 module = importer(module_name) 179 exports = cls._get_plugin_exports(module, types) 180 except Exception as e: 181 module_search_error = ModuleNotFoundError 182 183 message = ["Failed to import module '%s': %s", module_name, e] 184 if isinstance(e, module_search_error): 185 log.debug(*message) 186 else: 187 log.warning(*message) 188 continue 189 190 log.debug( 191 "Imported the following symbols from %s: %s" 192 % (module_name, ", ".join(s.__name__ for s in exports)) 193 ) 194 all_exports.extend(exports) 195 196 return all_exports 197 198 @classmethod 199 def _load_builtin_plugins(cls): 200 if cls._builtin_plugins is None: 201 import datumaro.plugins 202 203 plugins_dir = osp.dirname(datumaro.plugins.__file__) 204 module_names = [ 205 datumaro.plugins.__name__ + "." + name for name in cls._find_plugins(plugins_dir) 206 ] 207 cls._builtin_plugins = cls._load_plugins(module_names, importer=importlib.import_module) 208 return cls._builtin_plugins 209 210 def load_plugins(self, plugins_dir): 211 module_names = self._find_plugins(plugins_dir) 212 plugins = self._load_plugins( 213 module_names, importer=partial(import_foreign_module, path=plugins_dir) 214 ) 215 self._register_plugins(plugins) 216 217 def _register_builtin_plugins(self): 218 self._register_plugins(self._load_builtin_plugins()) 219 220 def _register_plugins(self, plugins): 221 self.extractors.batch_register(plugins) 222 self.importers.batch_register(plugins) 223 self.launchers.batch_register(plugins) 224 self.converters.batch_register(plugins) 225 self.generators.batch_register(plugins) 226 self.transforms.batch_register(plugins) 227 self.validators.batch_register(plugins) 228 229 def make_extractor(self, name, *args, **kwargs): 230 return self.extractors.get(name)(*args, **kwargs) 231 232 def make_importer(self, name, *args, **kwargs): 233 return self.importers.get(name)(*args, **kwargs) 234 235 def make_launcher(self, name, *args, **kwargs): 236 return self.launchers.get(name)(*args, **kwargs) 237 238 def make_converter(self, name, *args, **kwargs): 239 result = self.converters.get(name) 240 if isclass(result): 241 result = result.convert 242 return partial(result, *args, **kwargs) 243 244 def make_transform(self, name, *args, **kwargs): 245 return partial(self.transforms.get(name), *args, **kwargs) 246 247 def is_format_known(self, name): 248 return name in self.importers or name in self.extractors 249 250 def detect_dataset( 251 self, 252 path: str, 253 depth: int = 1, 254 rejection_callback: Optional[Callable[[str, RejectionReason, str], None]] = None, 255 ) -> List[str]: 256 ignore_dirs = {"__MSOSX", "__MACOSX"} 257 matched_formats = set() 258 for _ in range(depth + 1): 259 detected_formats = detect_dataset_format( 260 ( 261 (format_name, importer.detect) 262 for format_name, importer in self.importers.items.items() 263 ), 264 path, 265 rejection_callback=rejection_callback, 266 ) 267 268 if detected_formats and len(detected_formats) == 1: 269 return detected_formats 270 elif detected_formats: 271 matched_formats |= set(detected_formats) 272 273 paths = glob.glob(osp.join(path, "*")) 274 path = "" if len(paths) != 1 else paths[0] 275 if not osp.isdir(path) or osp.basename(path) in ignore_dirs: 276 break 277 278 return list(matched_formats) 279 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/datumaro/components/environment.py b/datumaro/components/environment.py --- a/datumaro/components/environment.py +++ b/datumaro/components/environment.py @@ -38,7 +38,7 @@ def __contains__(self, key) -> bool: return key in self.items - def __iter__(self) -> Iterator[T]: + def __iter__(self) -> Iterator[str]: return iter(self.items)
{"golden_diff": "diff --git a/datumaro/components/environment.py b/datumaro/components/environment.py\n--- a/datumaro/components/environment.py\n+++ b/datumaro/components/environment.py\n@@ -38,7 +38,7 @@\n def __contains__(self, key) -> bool:\n return key in self.items\n \n- def __iter__(self) -> Iterator[T]:\n+ def __iter__(self) -> Iterator[str]:\n return iter(self.items)\n", "issue": "Wrong annotated return type in Registry class\nhttps://github.com/openvinotoolkit/datumaro/blob/0d4a73d3bbe3a93585af7a0148a0e344fd1106b3/datumaro/components/environment.py#L41-L42\r\nIn the referenced code the return type of the method appears to be wrong. \r\n\r\nEither it should be `Iterator[str]` since iteration over a dict returns its keys which are of type `str` or the return statement should be `return iter(self.items.values())`.\r\n\r\nWhen using the library with static type checkers this annotation causes type check errors. When removing the annotation, type checkers correctly infer the type `Iterator[str]`.\nWrong annotated return type in Registry class\nhttps://github.com/openvinotoolkit/datumaro/blob/0d4a73d3bbe3a93585af7a0148a0e344fd1106b3/datumaro/components/environment.py#L41-L42\r\nIn the referenced code the return type of the method appears to be wrong. \r\n\r\nEither it should be `Iterator[str]` since iteration over a dict returns its keys which are of type `str` or the return statement should be `return iter(self.items.values())`.\r\n\r\nWhen using the library with static type checkers this annotation causes type check errors. When removing the annotation, type checkers correctly infer the type `Iterator[str]`.\n", "before_files": [{"content": "# Copyright (C) 2020-2022 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\nimport glob\nimport importlib\nimport logging as log\nimport os.path as osp\nfrom functools import partial\nfrom inspect import isclass\nfrom typing import Callable, Dict, Generic, Iterable, Iterator, List, Optional, Type, TypeVar\n\nfrom datumaro.components.cli_plugin import CliPlugin, plugin_types\nfrom datumaro.components.format_detection import RejectionReason, detect_dataset_format\nfrom datumaro.util.os_util import import_foreign_module, split_path\n\nT = TypeVar(\"T\")\n\n\nclass Registry(Generic[T]):\n def __init__(self):\n self.items: Dict[str, T] = {}\n\n def register(self, name: str, value: T) -> T:\n self.items[name] = value\n return value\n\n def unregister(self, name: str) -> Optional[T]:\n return self.items.pop(name, None)\n\n def get(self, key: str):\n \"\"\"Returns a class or a factory function\"\"\"\n return self.items[key]\n\n def __getitem__(self, key: str) -> T:\n return self.get(key)\n\n def __contains__(self, key) -> bool:\n return key in self.items\n\n def __iter__(self) -> Iterator[T]:\n return iter(self.items)\n\n\nclass PluginRegistry(Registry[Type[CliPlugin]]):\n def __init__(\n self, filter: Callable[[Type[CliPlugin]], bool] = None\n ): # pylint: disable=redefined-builtin\n super().__init__()\n self._filter = filter\n\n def batch_register(self, values: Iterable[CliPlugin]):\n for v in values:\n if self._filter and not self._filter(v):\n continue\n\n self.register(v.NAME, v)\n\n\nclass Environment:\n _builtin_plugins = None\n\n @classmethod\n def _make_filter(cls, accept, skip=None):\n accept = (accept,) if isclass(accept) else tuple(accept)\n skip = {skip} if isclass(skip) else set(skip or [])\n skip = tuple(skip | set(accept))\n return partial(cls._check_type, accept=accept, skip=skip)\n\n @staticmethod\n def _check_type(t, *, accept, skip):\n return issubclass(t, accept) and t not in skip\n\n def __init__(self):\n from datumaro.components.converter import Converter\n from datumaro.components.dataset_generator import DatasetGenerator\n from datumaro.components.extractor import (\n Extractor,\n Importer,\n ItemTransform,\n SourceExtractor,\n Transform,\n )\n from datumaro.components.launcher import Launcher\n from datumaro.components.validator import Validator\n\n _filter = self._make_filter\n self._extractors = PluginRegistry(_filter(Extractor, skip=SourceExtractor))\n self._importers = PluginRegistry(_filter(Importer))\n self._launchers = PluginRegistry(_filter(Launcher))\n self._converters = PluginRegistry(_filter(Converter))\n self._generators = PluginRegistry(_filter(DatasetGenerator))\n self._transforms = PluginRegistry(_filter(Transform, skip=ItemTransform))\n self._validators = PluginRegistry(_filter(Validator))\n self._builtins_initialized = False\n\n def _get_plugin_registry(self, name):\n if not self._builtins_initialized:\n self._builtins_initialized = True\n self._register_builtin_plugins()\n return getattr(self, name)\n\n @property\n def extractors(self) -> PluginRegistry:\n return self._get_plugin_registry(\"_extractors\")\n\n @property\n def importers(self) -> PluginRegistry:\n return self._get_plugin_registry(\"_importers\")\n\n @property\n def launchers(self) -> PluginRegistry:\n return self._get_plugin_registry(\"_launchers\")\n\n @property\n def converters(self) -> PluginRegistry:\n return self._get_plugin_registry(\"_converters\")\n\n @property\n def generators(self) -> PluginRegistry:\n return self._get_plugin_registry(\"_generators\")\n\n @property\n def transforms(self) -> PluginRegistry:\n return self._get_plugin_registry(\"_transforms\")\n\n @property\n def validators(self) -> PluginRegistry:\n return self._get_plugin_registry(\"_validators\")\n\n @staticmethod\n def _find_plugins(plugins_dir):\n plugins = []\n\n for pattern in (\"*.py\", \"*/*.py\"):\n for path in glob.glob(osp.join(glob.escape(plugins_dir), pattern)):\n if not osp.isfile(path):\n continue\n\n path_rel = osp.relpath(path, plugins_dir)\n name_parts = split_path(osp.splitext(path_rel)[0])\n\n # a module with a dot in the name won't load correctly\n if any(\".\" in part for part in name_parts):\n log.warning(\n \"Python file '%s' in directory '%s' can't be imported \"\n \"due to a dot in the name; skipping.\",\n path_rel,\n plugins_dir,\n )\n continue\n plugins.append(\".\".join(name_parts))\n\n return plugins\n\n @classmethod\n def _get_plugin_exports(cls, module, types):\n exports = []\n if hasattr(module, \"exports\"):\n exports = module.exports\n else:\n for symbol in dir(module):\n if symbol.startswith(\"_\"):\n continue\n exports.append(getattr(module, symbol))\n\n exports = [s for s in exports if isclass(s) and issubclass(s, types) and not s in types]\n\n return exports\n\n @classmethod\n def _load_plugins(cls, module_names, *, importer, types=None):\n types = tuple(types or plugin_types())\n\n all_exports = []\n for module_name in module_names:\n try:\n module = importer(module_name)\n exports = cls._get_plugin_exports(module, types)\n except Exception as e:\n module_search_error = ModuleNotFoundError\n\n message = [\"Failed to import module '%s': %s\", module_name, e]\n if isinstance(e, module_search_error):\n log.debug(*message)\n else:\n log.warning(*message)\n continue\n\n log.debug(\n \"Imported the following symbols from %s: %s\"\n % (module_name, \", \".join(s.__name__ for s in exports))\n )\n all_exports.extend(exports)\n\n return all_exports\n\n @classmethod\n def _load_builtin_plugins(cls):\n if cls._builtin_plugins is None:\n import datumaro.plugins\n\n plugins_dir = osp.dirname(datumaro.plugins.__file__)\n module_names = [\n datumaro.plugins.__name__ + \".\" + name for name in cls._find_plugins(plugins_dir)\n ]\n cls._builtin_plugins = cls._load_plugins(module_names, importer=importlib.import_module)\n return cls._builtin_plugins\n\n def load_plugins(self, plugins_dir):\n module_names = self._find_plugins(plugins_dir)\n plugins = self._load_plugins(\n module_names, importer=partial(import_foreign_module, path=plugins_dir)\n )\n self._register_plugins(plugins)\n\n def _register_builtin_plugins(self):\n self._register_plugins(self._load_builtin_plugins())\n\n def _register_plugins(self, plugins):\n self.extractors.batch_register(plugins)\n self.importers.batch_register(plugins)\n self.launchers.batch_register(plugins)\n self.converters.batch_register(plugins)\n self.generators.batch_register(plugins)\n self.transforms.batch_register(plugins)\n self.validators.batch_register(plugins)\n\n def make_extractor(self, name, *args, **kwargs):\n return self.extractors.get(name)(*args, **kwargs)\n\n def make_importer(self, name, *args, **kwargs):\n return self.importers.get(name)(*args, **kwargs)\n\n def make_launcher(self, name, *args, **kwargs):\n return self.launchers.get(name)(*args, **kwargs)\n\n def make_converter(self, name, *args, **kwargs):\n result = self.converters.get(name)\n if isclass(result):\n result = result.convert\n return partial(result, *args, **kwargs)\n\n def make_transform(self, name, *args, **kwargs):\n return partial(self.transforms.get(name), *args, **kwargs)\n\n def is_format_known(self, name):\n return name in self.importers or name in self.extractors\n\n def detect_dataset(\n self,\n path: str,\n depth: int = 1,\n rejection_callback: Optional[Callable[[str, RejectionReason, str], None]] = None,\n ) -> List[str]:\n ignore_dirs = {\"__MSOSX\", \"__MACOSX\"}\n matched_formats = set()\n for _ in range(depth + 1):\n detected_formats = detect_dataset_format(\n (\n (format_name, importer.detect)\n for format_name, importer in self.importers.items.items()\n ),\n path,\n rejection_callback=rejection_callback,\n )\n\n if detected_formats and len(detected_formats) == 1:\n return detected_formats\n elif detected_formats:\n matched_formats |= set(detected_formats)\n\n paths = glob.glob(osp.join(path, \"*\"))\n path = \"\" if len(paths) != 1 else paths[0]\n if not osp.isdir(path) or osp.basename(path) in ignore_dirs:\n break\n\n return list(matched_formats)\n", "path": "datumaro/components/environment.py"}], "after_files": [{"content": "# Copyright (C) 2020-2022 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\nimport glob\nimport importlib\nimport logging as log\nimport os.path as osp\nfrom functools import partial\nfrom inspect import isclass\nfrom typing import Callable, Dict, Generic, Iterable, Iterator, List, Optional, Type, TypeVar\n\nfrom datumaro.components.cli_plugin import CliPlugin, plugin_types\nfrom datumaro.components.format_detection import RejectionReason, detect_dataset_format\nfrom datumaro.util.os_util import import_foreign_module, split_path\n\nT = TypeVar(\"T\")\n\n\nclass Registry(Generic[T]):\n def __init__(self):\n self.items: Dict[str, T] = {}\n\n def register(self, name: str, value: T) -> T:\n self.items[name] = value\n return value\n\n def unregister(self, name: str) -> Optional[T]:\n return self.items.pop(name, None)\n\n def get(self, key: str):\n \"\"\"Returns a class or a factory function\"\"\"\n return self.items[key]\n\n def __getitem__(self, key: str) -> T:\n return self.get(key)\n\n def __contains__(self, key) -> bool:\n return key in self.items\n\n def __iter__(self) -> Iterator[str]:\n return iter(self.items)\n\n\nclass PluginRegistry(Registry[Type[CliPlugin]]):\n def __init__(\n self, filter: Callable[[Type[CliPlugin]], bool] = None\n ): # pylint: disable=redefined-builtin\n super().__init__()\n self._filter = filter\n\n def batch_register(self, values: Iterable[CliPlugin]):\n for v in values:\n if self._filter and not self._filter(v):\n continue\n\n self.register(v.NAME, v)\n\n\nclass Environment:\n _builtin_plugins = None\n\n @classmethod\n def _make_filter(cls, accept, skip=None):\n accept = (accept,) if isclass(accept) else tuple(accept)\n skip = {skip} if isclass(skip) else set(skip or [])\n skip = tuple(skip | set(accept))\n return partial(cls._check_type, accept=accept, skip=skip)\n\n @staticmethod\n def _check_type(t, *, accept, skip):\n return issubclass(t, accept) and t not in skip\n\n def __init__(self):\n from datumaro.components.converter import Converter\n from datumaro.components.dataset_generator import DatasetGenerator\n from datumaro.components.extractor import (\n Extractor,\n Importer,\n ItemTransform,\n SourceExtractor,\n Transform,\n )\n from datumaro.components.launcher import Launcher\n from datumaro.components.validator import Validator\n\n _filter = self._make_filter\n self._extractors = PluginRegistry(_filter(Extractor, skip=SourceExtractor))\n self._importers = PluginRegistry(_filter(Importer))\n self._launchers = PluginRegistry(_filter(Launcher))\n self._converters = PluginRegistry(_filter(Converter))\n self._generators = PluginRegistry(_filter(DatasetGenerator))\n self._transforms = PluginRegistry(_filter(Transform, skip=ItemTransform))\n self._validators = PluginRegistry(_filter(Validator))\n self._builtins_initialized = False\n\n def _get_plugin_registry(self, name):\n if not self._builtins_initialized:\n self._builtins_initialized = True\n self._register_builtin_plugins()\n return getattr(self, name)\n\n @property\n def extractors(self) -> PluginRegistry:\n return self._get_plugin_registry(\"_extractors\")\n\n @property\n def importers(self) -> PluginRegistry:\n return self._get_plugin_registry(\"_importers\")\n\n @property\n def launchers(self) -> PluginRegistry:\n return self._get_plugin_registry(\"_launchers\")\n\n @property\n def converters(self) -> PluginRegistry:\n return self._get_plugin_registry(\"_converters\")\n\n @property\n def generators(self) -> PluginRegistry:\n return self._get_plugin_registry(\"_generators\")\n\n @property\n def transforms(self) -> PluginRegistry:\n return self._get_plugin_registry(\"_transforms\")\n\n @property\n def validators(self) -> PluginRegistry:\n return self._get_plugin_registry(\"_validators\")\n\n @staticmethod\n def _find_plugins(plugins_dir):\n plugins = []\n\n for pattern in (\"*.py\", \"*/*.py\"):\n for path in glob.glob(osp.join(glob.escape(plugins_dir), pattern)):\n if not osp.isfile(path):\n continue\n\n path_rel = osp.relpath(path, plugins_dir)\n name_parts = split_path(osp.splitext(path_rel)[0])\n\n # a module with a dot in the name won't load correctly\n if any(\".\" in part for part in name_parts):\n log.warning(\n \"Python file '%s' in directory '%s' can't be imported \"\n \"due to a dot in the name; skipping.\",\n path_rel,\n plugins_dir,\n )\n continue\n plugins.append(\".\".join(name_parts))\n\n return plugins\n\n @classmethod\n def _get_plugin_exports(cls, module, types):\n exports = []\n if hasattr(module, \"exports\"):\n exports = module.exports\n else:\n for symbol in dir(module):\n if symbol.startswith(\"_\"):\n continue\n exports.append(getattr(module, symbol))\n\n exports = [s for s in exports if isclass(s) and issubclass(s, types) and not s in types]\n\n return exports\n\n @classmethod\n def _load_plugins(cls, module_names, *, importer, types=None):\n types = tuple(types or plugin_types())\n\n all_exports = []\n for module_name in module_names:\n try:\n module = importer(module_name)\n exports = cls._get_plugin_exports(module, types)\n except Exception as e:\n module_search_error = ModuleNotFoundError\n\n message = [\"Failed to import module '%s': %s\", module_name, e]\n if isinstance(e, module_search_error):\n log.debug(*message)\n else:\n log.warning(*message)\n continue\n\n log.debug(\n \"Imported the following symbols from %s: %s\"\n % (module_name, \", \".join(s.__name__ for s in exports))\n )\n all_exports.extend(exports)\n\n return all_exports\n\n @classmethod\n def _load_builtin_plugins(cls):\n if cls._builtin_plugins is None:\n import datumaro.plugins\n\n plugins_dir = osp.dirname(datumaro.plugins.__file__)\n module_names = [\n datumaro.plugins.__name__ + \".\" + name for name in cls._find_plugins(plugins_dir)\n ]\n cls._builtin_plugins = cls._load_plugins(module_names, importer=importlib.import_module)\n return cls._builtin_plugins\n\n def load_plugins(self, plugins_dir):\n module_names = self._find_plugins(plugins_dir)\n plugins = self._load_plugins(\n module_names, importer=partial(import_foreign_module, path=plugins_dir)\n )\n self._register_plugins(plugins)\n\n def _register_builtin_plugins(self):\n self._register_plugins(self._load_builtin_plugins())\n\n def _register_plugins(self, plugins):\n self.extractors.batch_register(plugins)\n self.importers.batch_register(plugins)\n self.launchers.batch_register(plugins)\n self.converters.batch_register(plugins)\n self.generators.batch_register(plugins)\n self.transforms.batch_register(plugins)\n self.validators.batch_register(plugins)\n\n def make_extractor(self, name, *args, **kwargs):\n return self.extractors.get(name)(*args, **kwargs)\n\n def make_importer(self, name, *args, **kwargs):\n return self.importers.get(name)(*args, **kwargs)\n\n def make_launcher(self, name, *args, **kwargs):\n return self.launchers.get(name)(*args, **kwargs)\n\n def make_converter(self, name, *args, **kwargs):\n result = self.converters.get(name)\n if isclass(result):\n result = result.convert\n return partial(result, *args, **kwargs)\n\n def make_transform(self, name, *args, **kwargs):\n return partial(self.transforms.get(name), *args, **kwargs)\n\n def is_format_known(self, name):\n return name in self.importers or name in self.extractors\n\n def detect_dataset(\n self,\n path: str,\n depth: int = 1,\n rejection_callback: Optional[Callable[[str, RejectionReason, str], None]] = None,\n ) -> List[str]:\n ignore_dirs = {\"__MSOSX\", \"__MACOSX\"}\n matched_formats = set()\n for _ in range(depth + 1):\n detected_formats = detect_dataset_format(\n (\n (format_name, importer.detect)\n for format_name, importer in self.importers.items.items()\n ),\n path,\n rejection_callback=rejection_callback,\n )\n\n if detected_formats and len(detected_formats) == 1:\n return detected_formats\n elif detected_formats:\n matched_formats |= set(detected_formats)\n\n paths = glob.glob(osp.join(path, \"*\"))\n path = \"\" if len(paths) != 1 else paths[0]\n if not osp.isdir(path) or osp.basename(path) in ignore_dirs:\n break\n\n return list(matched_formats)\n", "path": "datumaro/components/environment.py"}]}
3,331
97
gh_patches_debug_25454
rasdani/github-patches
git_diff
buildbot__buildbot-3179
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- RolesFromEmails doesn't work with GitHub auth (and maybe others) I've got a setup like this: ``` auth = util.GitHubAuth(CLIENT_ID, CLIENT_SECRET) authz = util.Authz( allowRules = [ util.AnyControlEndpointMatcher(role = "admins") ], roleMatchers = [ util.RolesFromEmails( admins = [ "[email protected]" ] ) ] ) ``` `[email protected]` is my primary email address. This doesn't work; I have to use my username, `samizzo`, as the email address in the `RolesFromEmails`. Looking through the code, I can't see how this has ever worked. The authentication mechanism ends up calling [`UserInfoProviderBase.getUserInfo`](https://github.com/buildbot/buildbot/blob/master/master/buildbot/www/auth.py#L83) which returns the username as the email address in the user info. I'm not sure what the right fix for this is; I don't know the buildbot code very well. I've switched over to using `RolesFromUsername` which is more convenient anyway. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `master/buildbot/www/auth.py` Content: ``` 1 # This file is part of Buildbot. Buildbot is free software: you can 2 # redistribute it and/or modify it under the terms of the GNU General Public 3 # License as published by the Free Software Foundation, version 2. 4 # 5 # This program is distributed in the hope that it will be useful, but WITHOUT 6 # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS 7 # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more 8 # details. 9 # 10 # You should have received a copy of the GNU General Public License along with 11 # this program; if not, write to the Free Software Foundation, Inc., 51 12 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 13 # 14 # Copyright Buildbot Team Members 15 16 from __future__ import absolute_import 17 from __future__ import print_function 18 19 import re 20 21 from twisted.cred.checkers import FilePasswordDB 22 from twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse 23 from twisted.cred.portal import IRealm 24 from twisted.cred.portal import Portal 25 from twisted.internet import defer 26 from twisted.web.error import Error 27 from twisted.web.guard import BasicCredentialFactory 28 from twisted.web.guard import DigestCredentialFactory 29 from twisted.web.guard import HTTPAuthSessionWrapper 30 from twisted.web.resource import IResource 31 from zope.interface import implementer 32 33 from buildbot.util import bytes2NativeString 34 from buildbot.util import config 35 from buildbot.www import resource 36 37 38 class AuthRootResource(resource.Resource): 39 40 def getChild(self, path, request): 41 # return dynamically generated resources 42 if path == b'login': 43 return self.master.www.auth.getLoginResource() 44 elif path == b'logout': 45 return self.master.www.auth.getLogoutResource() 46 return resource.Resource.getChild(self, path, request) 47 48 49 class AuthBase(config.ConfiguredMixin): 50 51 def __init__(self, userInfoProvider=None): 52 if userInfoProvider is None: 53 userInfoProvider = UserInfoProviderBase() 54 self.userInfoProvider = userInfoProvider 55 56 def reconfigAuth(self, master, new_config): 57 self.master = master 58 59 def maybeAutoLogin(self, request): 60 return defer.succeed(None) 61 62 def getLoginResource(self): 63 raise Error(501, "not implemented") 64 65 def getLogoutResource(self): 66 return LogoutResource(self.master) 67 68 @defer.inlineCallbacks 69 def updateUserInfo(self, request): 70 session = request.getSession() 71 if self.userInfoProvider is not None: 72 infos = yield self.userInfoProvider.getUserInfo(session.user_info['username']) 73 session.user_info.update(infos) 74 session.updateSession(request) 75 76 def getConfigDict(self): 77 return {'name': type(self).__name__} 78 79 80 class UserInfoProviderBase(config.ConfiguredMixin): 81 name = "noinfo" 82 83 def getUserInfo(self, username): 84 return defer.succeed({'email': username}) 85 86 87 class LoginResource(resource.Resource): 88 89 def render_GET(self, request): 90 return self.asyncRenderHelper(request, self.renderLogin) 91 92 @defer.inlineCallbacks 93 def renderLogin(self, request): 94 raise NotImplementedError 95 96 97 class NoAuth(AuthBase): 98 pass 99 100 101 class RemoteUserAuth(AuthBase): 102 header = "REMOTE_USER" 103 headerRegex = re.compile(r"(?P<username>[^ @]+)@(?P<realm>[^ @]+)") 104 105 def __init__(self, header=None, headerRegex=None, **kwargs): 106 AuthBase.__init__(self, **kwargs) 107 if header is not None: 108 self.header = header 109 if headerRegex is not None: 110 self.headerRegex = re.compile(headerRegex) 111 112 @defer.inlineCallbacks 113 def maybeAutoLogin(self, request): 114 header = request.getHeader(self.header) 115 if header is None: 116 raise Error(403, "missing http header %s. Check your reverse proxy config!" % ( 117 self.header)) 118 res = self.headerRegex.match(header) 119 if res is None: 120 raise Error( 121 403, 'http header does not match regex! "%s" not matching %s' % 122 (header, self.headerRegex.pattern)) 123 session = request.getSession() 124 if session.user_info != dict(res.groupdict()): 125 session.user_info = dict(res.groupdict()) 126 yield self.updateUserInfo(request) 127 128 129 @implementer(IRealm) 130 class AuthRealm(object): 131 132 def __init__(self, master, auth): 133 self.auth = auth 134 self.master = master 135 136 def requestAvatar(self, avatarId, mind, *interfaces): 137 if IResource in interfaces: 138 return (IResource, 139 PreAuthenticatedLoginResource(self.master, avatarId), 140 lambda: None) 141 raise NotImplementedError() 142 143 144 class TwistedICredAuthBase(AuthBase): 145 146 def __init__(self, credentialFactories, checkers, **kwargs): 147 AuthBase.__init__(self, **kwargs) 148 self.credentialFactories = credentialFactories 149 self.checkers = checkers 150 151 def getLoginResource(self): 152 return HTTPAuthSessionWrapper( 153 Portal(AuthRealm(self.master, self), self.checkers), 154 self.credentialFactories) 155 156 157 class HTPasswdAuth(TwistedICredAuthBase): 158 159 def __init__(self, passwdFile, **kwargs): 160 TwistedICredAuthBase.__init__( 161 self, 162 [DigestCredentialFactory(b"md5", b"buildbot"), 163 BasicCredentialFactory(b"buildbot")], 164 [FilePasswordDB(passwdFile)], 165 **kwargs) 166 167 168 class UserPasswordAuth(TwistedICredAuthBase): 169 170 def __init__(self, users, **kwargs): 171 TwistedICredAuthBase.__init__( 172 self, 173 [DigestCredentialFactory(b"md5", b"buildbot"), 174 BasicCredentialFactory(b"buildbot")], 175 [InMemoryUsernamePasswordDatabaseDontUse(**dict(users))], 176 **kwargs) 177 178 179 def _redirect(master, request): 180 url = request.args.get("redirect", ["/"])[0] 181 return resource.Redirect(master.config.buildbotURL + "#" + url) 182 183 184 class PreAuthenticatedLoginResource(LoginResource): 185 # a LoginResource which is already authenticated via a 186 # HTTPAuthSessionWrapper 187 188 def __init__(self, master, username): 189 LoginResource.__init__(self, master) 190 self.username = username 191 192 @defer.inlineCallbacks 193 def renderLogin(self, request): 194 session = request.getSession() 195 session.user_info = dict(username=bytes2NativeString(self.username)) 196 yield self.master.www.auth.updateUserInfo(request) 197 raise _redirect(self.master, request) 198 199 200 class LogoutResource(resource.Resource): 201 202 def render_GET(self, request): 203 session = request.getSession() 204 session.expire() 205 session.updateSession(request) 206 request.redirect(_redirect(self.master, request).url) 207 return b'' 208 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/master/buildbot/www/auth.py b/master/buildbot/www/auth.py --- a/master/buildbot/www/auth.py +++ b/master/buildbot/www/auth.py @@ -49,8 +49,6 @@ class AuthBase(config.ConfiguredMixin): def __init__(self, userInfoProvider=None): - if userInfoProvider is None: - userInfoProvider = UserInfoProviderBase() self.userInfoProvider = userInfoProvider def reconfigAuth(self, master, new_config): @@ -104,6 +102,8 @@ def __init__(self, header=None, headerRegex=None, **kwargs): AuthBase.__init__(self, **kwargs) + if self.userInfoProvider is None: + self.userInfoProvider = UserInfoProviderBase() if header is not None: self.header = header if headerRegex is not None: @@ -145,6 +145,8 @@ def __init__(self, credentialFactories, checkers, **kwargs): AuthBase.__init__(self, **kwargs) + if self.userInfoProvider is None: + self.userInfoProvider = UserInfoProviderBase() self.credentialFactories = credentialFactories self.checkers = checkers
{"golden_diff": "diff --git a/master/buildbot/www/auth.py b/master/buildbot/www/auth.py\n--- a/master/buildbot/www/auth.py\n+++ b/master/buildbot/www/auth.py\n@@ -49,8 +49,6 @@\n class AuthBase(config.ConfiguredMixin):\n \n def __init__(self, userInfoProvider=None):\n- if userInfoProvider is None:\n- userInfoProvider = UserInfoProviderBase()\n self.userInfoProvider = userInfoProvider\n \n def reconfigAuth(self, master, new_config):\n@@ -104,6 +102,8 @@\n \n def __init__(self, header=None, headerRegex=None, **kwargs):\n AuthBase.__init__(self, **kwargs)\n+ if self.userInfoProvider is None:\n+ self.userInfoProvider = UserInfoProviderBase()\n if header is not None:\n self.header = header\n if headerRegex is not None:\n@@ -145,6 +145,8 @@\n \n def __init__(self, credentialFactories, checkers, **kwargs):\n AuthBase.__init__(self, **kwargs)\n+ if self.userInfoProvider is None:\n+ self.userInfoProvider = UserInfoProviderBase()\n self.credentialFactories = credentialFactories\n self.checkers = checkers\n", "issue": "RolesFromEmails doesn't work with GitHub auth (and maybe others)\nI've got a setup like this:\r\n\r\n```\r\nauth = util.GitHubAuth(CLIENT_ID, CLIENT_SECRET)\r\nauthz = util.Authz(\r\n allowRules = [ util.AnyControlEndpointMatcher(role = \"admins\") ],\r\n roleMatchers = [\r\n util.RolesFromEmails(\r\n admins = [ \"[email protected]\" ]\r\n )\r\n ]\r\n)\r\n```\r\n\r\n`[email protected]` is my primary email address. This doesn't work; I have to use my username, `samizzo`, as the email address in the `RolesFromEmails`.\r\n\r\nLooking through the code, I can't see how this has ever worked. The authentication mechanism ends up calling [`UserInfoProviderBase.getUserInfo`](https://github.com/buildbot/buildbot/blob/master/master/buildbot/www/auth.py#L83) which returns the username as the email address in the user info.\r\n\r\nI'm not sure what the right fix for this is; I don't know the buildbot code very well. I've switched over to using `RolesFromUsername` which is more convenient anyway.\n", "before_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport re\n\nfrom twisted.cred.checkers import FilePasswordDB\nfrom twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse\nfrom twisted.cred.portal import IRealm\nfrom twisted.cred.portal import Portal\nfrom twisted.internet import defer\nfrom twisted.web.error import Error\nfrom twisted.web.guard import BasicCredentialFactory\nfrom twisted.web.guard import DigestCredentialFactory\nfrom twisted.web.guard import HTTPAuthSessionWrapper\nfrom twisted.web.resource import IResource\nfrom zope.interface import implementer\n\nfrom buildbot.util import bytes2NativeString\nfrom buildbot.util import config\nfrom buildbot.www import resource\n\n\nclass AuthRootResource(resource.Resource):\n\n def getChild(self, path, request):\n # return dynamically generated resources\n if path == b'login':\n return self.master.www.auth.getLoginResource()\n elif path == b'logout':\n return self.master.www.auth.getLogoutResource()\n return resource.Resource.getChild(self, path, request)\n\n\nclass AuthBase(config.ConfiguredMixin):\n\n def __init__(self, userInfoProvider=None):\n if userInfoProvider is None:\n userInfoProvider = UserInfoProviderBase()\n self.userInfoProvider = userInfoProvider\n\n def reconfigAuth(self, master, new_config):\n self.master = master\n\n def maybeAutoLogin(self, request):\n return defer.succeed(None)\n\n def getLoginResource(self):\n raise Error(501, \"not implemented\")\n\n def getLogoutResource(self):\n return LogoutResource(self.master)\n\n @defer.inlineCallbacks\n def updateUserInfo(self, request):\n session = request.getSession()\n if self.userInfoProvider is not None:\n infos = yield self.userInfoProvider.getUserInfo(session.user_info['username'])\n session.user_info.update(infos)\n session.updateSession(request)\n\n def getConfigDict(self):\n return {'name': type(self).__name__}\n\n\nclass UserInfoProviderBase(config.ConfiguredMixin):\n name = \"noinfo\"\n\n def getUserInfo(self, username):\n return defer.succeed({'email': username})\n\n\nclass LoginResource(resource.Resource):\n\n def render_GET(self, request):\n return self.asyncRenderHelper(request, self.renderLogin)\n\n @defer.inlineCallbacks\n def renderLogin(self, request):\n raise NotImplementedError\n\n\nclass NoAuth(AuthBase):\n pass\n\n\nclass RemoteUserAuth(AuthBase):\n header = \"REMOTE_USER\"\n headerRegex = re.compile(r\"(?P<username>[^ @]+)@(?P<realm>[^ @]+)\")\n\n def __init__(self, header=None, headerRegex=None, **kwargs):\n AuthBase.__init__(self, **kwargs)\n if header is not None:\n self.header = header\n if headerRegex is not None:\n self.headerRegex = re.compile(headerRegex)\n\n @defer.inlineCallbacks\n def maybeAutoLogin(self, request):\n header = request.getHeader(self.header)\n if header is None:\n raise Error(403, \"missing http header %s. Check your reverse proxy config!\" % (\n self.header))\n res = self.headerRegex.match(header)\n if res is None:\n raise Error(\n 403, 'http header does not match regex! \"%s\" not matching %s' %\n (header, self.headerRegex.pattern))\n session = request.getSession()\n if session.user_info != dict(res.groupdict()):\n session.user_info = dict(res.groupdict())\n yield self.updateUserInfo(request)\n\n\n@implementer(IRealm)\nclass AuthRealm(object):\n\n def __init__(self, master, auth):\n self.auth = auth\n self.master = master\n\n def requestAvatar(self, avatarId, mind, *interfaces):\n if IResource in interfaces:\n return (IResource,\n PreAuthenticatedLoginResource(self.master, avatarId),\n lambda: None)\n raise NotImplementedError()\n\n\nclass TwistedICredAuthBase(AuthBase):\n\n def __init__(self, credentialFactories, checkers, **kwargs):\n AuthBase.__init__(self, **kwargs)\n self.credentialFactories = credentialFactories\n self.checkers = checkers\n\n def getLoginResource(self):\n return HTTPAuthSessionWrapper(\n Portal(AuthRealm(self.master, self), self.checkers),\n self.credentialFactories)\n\n\nclass HTPasswdAuth(TwistedICredAuthBase):\n\n def __init__(self, passwdFile, **kwargs):\n TwistedICredAuthBase.__init__(\n self,\n [DigestCredentialFactory(b\"md5\", b\"buildbot\"),\n BasicCredentialFactory(b\"buildbot\")],\n [FilePasswordDB(passwdFile)],\n **kwargs)\n\n\nclass UserPasswordAuth(TwistedICredAuthBase):\n\n def __init__(self, users, **kwargs):\n TwistedICredAuthBase.__init__(\n self,\n [DigestCredentialFactory(b\"md5\", b\"buildbot\"),\n BasicCredentialFactory(b\"buildbot\")],\n [InMemoryUsernamePasswordDatabaseDontUse(**dict(users))],\n **kwargs)\n\n\ndef _redirect(master, request):\n url = request.args.get(\"redirect\", [\"/\"])[0]\n return resource.Redirect(master.config.buildbotURL + \"#\" + url)\n\n\nclass PreAuthenticatedLoginResource(LoginResource):\n # a LoginResource which is already authenticated via a\n # HTTPAuthSessionWrapper\n\n def __init__(self, master, username):\n LoginResource.__init__(self, master)\n self.username = username\n\n @defer.inlineCallbacks\n def renderLogin(self, request):\n session = request.getSession()\n session.user_info = dict(username=bytes2NativeString(self.username))\n yield self.master.www.auth.updateUserInfo(request)\n raise _redirect(self.master, request)\n\n\nclass LogoutResource(resource.Resource):\n\n def render_GET(self, request):\n session = request.getSession()\n session.expire()\n session.updateSession(request)\n request.redirect(_redirect(self.master, request).url)\n return b''\n", "path": "master/buildbot/www/auth.py"}], "after_files": [{"content": "# This file is part of Buildbot. Buildbot is free software: you can\n# redistribute it and/or modify it under the terms of the GNU General Public\n# License as published by the Free Software Foundation, version 2.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.\n#\n# Copyright Buildbot Team Members\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport re\n\nfrom twisted.cred.checkers import FilePasswordDB\nfrom twisted.cred.checkers import InMemoryUsernamePasswordDatabaseDontUse\nfrom twisted.cred.portal import IRealm\nfrom twisted.cred.portal import Portal\nfrom twisted.internet import defer\nfrom twisted.web.error import Error\nfrom twisted.web.guard import BasicCredentialFactory\nfrom twisted.web.guard import DigestCredentialFactory\nfrom twisted.web.guard import HTTPAuthSessionWrapper\nfrom twisted.web.resource import IResource\nfrom zope.interface import implementer\n\nfrom buildbot.util import bytes2NativeString\nfrom buildbot.util import config\nfrom buildbot.www import resource\n\n\nclass AuthRootResource(resource.Resource):\n\n def getChild(self, path, request):\n # return dynamically generated resources\n if path == b'login':\n return self.master.www.auth.getLoginResource()\n elif path == b'logout':\n return self.master.www.auth.getLogoutResource()\n return resource.Resource.getChild(self, path, request)\n\n\nclass AuthBase(config.ConfiguredMixin):\n\n def __init__(self, userInfoProvider=None):\n self.userInfoProvider = userInfoProvider\n\n def reconfigAuth(self, master, new_config):\n self.master = master\n\n def maybeAutoLogin(self, request):\n return defer.succeed(None)\n\n def getLoginResource(self):\n raise Error(501, \"not implemented\")\n\n def getLogoutResource(self):\n return LogoutResource(self.master)\n\n @defer.inlineCallbacks\n def updateUserInfo(self, request):\n session = request.getSession()\n if self.userInfoProvider is not None:\n infos = yield self.userInfoProvider.getUserInfo(session.user_info['username'])\n session.user_info.update(infos)\n session.updateSession(request)\n\n def getConfigDict(self):\n return {'name': type(self).__name__}\n\n\nclass UserInfoProviderBase(config.ConfiguredMixin):\n name = \"noinfo\"\n\n def getUserInfo(self, username):\n return defer.succeed({'email': username})\n\n\nclass LoginResource(resource.Resource):\n\n def render_GET(self, request):\n return self.asyncRenderHelper(request, self.renderLogin)\n\n @defer.inlineCallbacks\n def renderLogin(self, request):\n raise NotImplementedError\n\n\nclass NoAuth(AuthBase):\n pass\n\n\nclass RemoteUserAuth(AuthBase):\n header = \"REMOTE_USER\"\n headerRegex = re.compile(r\"(?P<username>[^ @]+)@(?P<realm>[^ @]+)\")\n\n def __init__(self, header=None, headerRegex=None, **kwargs):\n AuthBase.__init__(self, **kwargs)\n if self.userInfoProvider is None:\n self.userInfoProvider = UserInfoProviderBase()\n if header is not None:\n self.header = header\n if headerRegex is not None:\n self.headerRegex = re.compile(headerRegex)\n\n @defer.inlineCallbacks\n def maybeAutoLogin(self, request):\n header = request.getHeader(self.header)\n if header is None:\n raise Error(403, \"missing http header %s. Check your reverse proxy config!\" % (\n self.header))\n res = self.headerRegex.match(header)\n if res is None:\n raise Error(\n 403, 'http header does not match regex! \"%s\" not matching %s' %\n (header, self.headerRegex.pattern))\n session = request.getSession()\n if session.user_info != dict(res.groupdict()):\n session.user_info = dict(res.groupdict())\n yield self.updateUserInfo(request)\n\n\n@implementer(IRealm)\nclass AuthRealm(object):\n\n def __init__(self, master, auth):\n self.auth = auth\n self.master = master\n\n def requestAvatar(self, avatarId, mind, *interfaces):\n if IResource in interfaces:\n return (IResource,\n PreAuthenticatedLoginResource(self.master, avatarId),\n lambda: None)\n raise NotImplementedError()\n\n\nclass TwistedICredAuthBase(AuthBase):\n\n def __init__(self, credentialFactories, checkers, **kwargs):\n AuthBase.__init__(self, **kwargs)\n if self.userInfoProvider is None:\n self.userInfoProvider = UserInfoProviderBase()\n self.credentialFactories = credentialFactories\n self.checkers = checkers\n\n def getLoginResource(self):\n return HTTPAuthSessionWrapper(\n Portal(AuthRealm(self.master, self), self.checkers),\n self.credentialFactories)\n\n\nclass HTPasswdAuth(TwistedICredAuthBase):\n\n def __init__(self, passwdFile, **kwargs):\n TwistedICredAuthBase.__init__(\n self,\n [DigestCredentialFactory(b\"md5\", b\"buildbot\"),\n BasicCredentialFactory(b\"buildbot\")],\n [FilePasswordDB(passwdFile)],\n **kwargs)\n\n\nclass UserPasswordAuth(TwistedICredAuthBase):\n\n def __init__(self, users, **kwargs):\n TwistedICredAuthBase.__init__(\n self,\n [DigestCredentialFactory(b\"md5\", b\"buildbot\"),\n BasicCredentialFactory(b\"buildbot\")],\n [InMemoryUsernamePasswordDatabaseDontUse(**dict(users))],\n **kwargs)\n\n\ndef _redirect(master, request):\n url = request.args.get(\"redirect\", [\"/\"])[0]\n return resource.Redirect(master.config.buildbotURL + \"#\" + url)\n\n\nclass PreAuthenticatedLoginResource(LoginResource):\n # a LoginResource which is already authenticated via a\n # HTTPAuthSessionWrapper\n\n def __init__(self, master, username):\n LoginResource.__init__(self, master)\n self.username = username\n\n @defer.inlineCallbacks\n def renderLogin(self, request):\n session = request.getSession()\n session.user_info = dict(username=bytes2NativeString(self.username))\n yield self.master.www.auth.updateUserInfo(request)\n raise _redirect(self.master, request)\n\n\nclass LogoutResource(resource.Resource):\n\n def render_GET(self, request):\n session = request.getSession()\n session.expire()\n session.updateSession(request)\n request.redirect(_redirect(self.master, request).url)\n return b''\n", "path": "master/buildbot/www/auth.py"}]}
2,481
269
gh_patches_debug_39826
rasdani/github-patches
git_diff
mozmeao__basket-508
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- TypeError when SFMC error is returned SFMC response.content is bytes, but the sfmc.py module tries to concat that message with strings to throw NewsletterExceptions. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `basket/news/backends/sfmc.py` Content: ``` 1 """ 2 API Client Library for Salesforce Marketing Cloud (SFMC) 3 Formerly ExactTarget 4 """ 5 from random import randint 6 from time import time 7 8 from django.conf import settings 9 from django.core.cache import cache 10 11 import requests 12 from django_statsd.clients import statsd 13 from FuelSDK import ET_Client, ET_DataExtension_Row, ET_TriggeredSend 14 15 from basket.news.backends.common import get_timer_decorator, NewsletterException, \ 16 NewsletterNoResultsException 17 18 19 time_request = get_timer_decorator('news.backends.sfmc') 20 21 22 HERD_TIMEOUT = 60 23 AUTH_BUFFER = 300 # 5 min 24 MAX_BUFFER = HERD_TIMEOUT + AUTH_BUFFER 25 26 27 class ETRefreshClient(ET_Client): 28 token_cache_key = 'backends:sfmc:auth:tokens' 29 authTokenExpiresIn = None 30 token_property_names = [ 31 'authToken', 32 'authTokenExpiration', 33 'internalAuthToken', 34 'refreshKey', 35 ] 36 _old_authToken = None 37 38 def __init__(self, get_server_wsdl=False, debug=False, params=None): 39 # setting this manually as it has thrown errors and doesn't change 40 if settings.USE_SANDBOX_BACKEND: 41 self.endpoint = 'https://webservice.test.exacttarget.com/Service.asmx' 42 else: 43 self.endpoint = 'https://webservice.s4.exacttarget.com/Service.asmx' 44 45 super(ETRefreshClient, self).__init__(get_server_wsdl, debug, params) 46 47 def token_is_expired(self): 48 """Report token is expired between 5 and 6 minutes early 49 50 Having the expiration be random helps prevent multiple basket 51 instances simultaneously requesting a new token from SFMC, 52 a.k.a. the Thundering Herd problem. 53 """ 54 if self.authTokenExpiration is None: 55 return True 56 57 time_buffer = randint(1, HERD_TIMEOUT) + AUTH_BUFFER 58 return time() + time_buffer > self.authTokenExpiration 59 60 def refresh_auth_tokens_from_cache(self): 61 """Refresh the auth token and other values from cache""" 62 if self.authToken is not None and time() + MAX_BUFFER < self.authTokenExpiration: 63 # no need to refresh if the current tokens are still good 64 return 65 66 tokens = cache.get(self.token_cache_key) 67 if tokens: 68 if not isinstance(tokens, dict): 69 # something wrong was cached 70 cache.delete(self.token_cache_key) 71 return 72 73 for prop, value in tokens.items(): 74 if prop in self.token_property_names: 75 setattr(self, prop, value) 76 77 # set the value so we can detect if it changed later 78 self._old_authToken = self.authToken 79 self.build_soap_client() 80 81 def cache_auth_tokens(self): 82 if self.authToken is not None and self.authToken != self._old_authToken: 83 new_tokens = {prop: getattr(self, prop) for prop in self.token_property_names} 84 # 10 min longer than expiration so that refreshKey can be used 85 cache.set(self.token_cache_key, new_tokens, self.authTokenExpiresIn + 600) 86 87 def request_token(self, payload): 88 r = requests.post(self.auth_url, json=payload) 89 try: 90 token_response = r.json() 91 except ValueError: 92 raise NewsletterException('SFMC Error During Auth: ' + r.content, 93 status_code=r.status_code) 94 95 if 'accessToken' in token_response: 96 return token_response 97 98 # try again without refreshToken 99 if 'refreshToken' in payload: 100 # not strictly required, makes testing easier 101 payload = payload.copy() 102 del payload['refreshToken'] 103 return self.request_token(payload) 104 105 raise NewsletterException('SFMC Error During Auth: ' + r.content, 106 status_code=r.status_code) 107 108 def refresh_token(self, force_refresh=False): 109 """ 110 Called from many different places right before executing a SOAP call 111 """ 112 # If we don't already have a token or the token expires within 5 min(300 seconds), get one 113 self.refresh_auth_tokens_from_cache() 114 if force_refresh or self.authToken is None or self.token_is_expired(): 115 payload = { 116 'clientId': self.client_id, 117 'clientSecret': self.client_secret, 118 'accessType': 'offline', 119 } 120 if self.refreshKey: 121 payload['refreshToken'] = self.refreshKey 122 123 token_response = self.request_token(payload) 124 statsd.incr('news.backends.sfmc.auth_token_refresh') 125 self.authToken = token_response['accessToken'] 126 self.authTokenExpiresIn = token_response['expiresIn'] 127 self.authTokenExpiration = time() + self.authTokenExpiresIn 128 self.internalAuthToken = token_response['legacyToken'] 129 if 'refreshToken' in token_response: 130 self.refreshKey = token_response['refreshToken'] 131 132 self.build_soap_client() 133 self.cache_auth_tokens() 134 135 136 def assert_response(resp): 137 if not resp.status: 138 raise NewsletterException(str(resp.results)) 139 140 141 def assert_results(resp): 142 assert_response(resp) 143 if not resp.results: 144 raise NewsletterNoResultsException() 145 146 147 def build_attributes(data): 148 return [{'Name': key, 'Value': value} for key, value in data.items()] 149 150 151 class SFMC(object): 152 _client = None 153 sms_api_url = 'https://www.exacttargetapis.com/sms/v1/messageContact/{}/send' 154 rowset_api_url = 'https://www.exacttargetapis.com/hub/v1/dataevents/key:{}/rowset' 155 156 @property 157 def client(self): 158 if self._client is None and 'clientid' in settings.SFMC_SETTINGS: 159 self._client = ETRefreshClient(False, settings.SFMC_DEBUG, settings.SFMC_SETTINGS) 160 161 return self._client 162 163 @property 164 def auth_header(self): 165 self.client.refresh_token() 166 return {'Authorization': 'Bearer {0}'.format(self.client.authToken)} 167 168 def _get_row_obj(self, de_name, props): 169 row = ET_DataExtension_Row() 170 row.auth_stub = self.client 171 row.CustomerKey = row.Name = de_name 172 row.props = props 173 return row 174 175 @time_request 176 def get_row(self, de_name, fields, token=None, email=None): 177 """ 178 Get the values of `fields` from a data extension. Either token or email is required. 179 180 @param de_name: name of the data extension 181 @param fields: list of column names 182 @param token: the user's token 183 @param email: the user's email address 184 @return: dict of user data 185 """ 186 assert token or email, 'token or email required' 187 row = self._get_row_obj(de_name, fields) 188 if token: 189 row.search_filter = { 190 'Property': 'TOKEN', 191 'SimpleOperator': 'equals', 192 'Value': token, 193 } 194 elif email: 195 row.search_filter = { 196 'Property': 'EMAIL_ADDRESS_', 197 'SimpleOperator': 'equals', 198 'Value': email, 199 } 200 201 resp = row.get() 202 assert_results(resp) 203 # TODO do something if more than 1 result is returned 204 return dict((p.Name, p.Value) 205 for p in resp.results[0].Properties.Property) 206 207 @time_request 208 def add_row(self, de_name, values): 209 """ 210 Add a row to a data extension. 211 212 @param de_name: name of the data extension 213 @param values: dict containing the COLUMN: VALUE pairs 214 @return: None 215 """ 216 row = self._get_row_obj(de_name, values) 217 resp = row.post() 218 assert_response(resp) 219 220 @time_request 221 def update_row(self, de_name, values): 222 """ 223 Update a row in a data extension. 224 225 @param de_name: name of the data extension 226 @param values: dict containing the COLUMN: VALUE pairs. 227 Must contain TOKEN or EMAIL_ADDRESS_. 228 @return: None 229 """ 230 row = self._get_row_obj(de_name, values) 231 resp = row.patch() 232 assert_response(resp) 233 234 @time_request 235 def upsert_row(self, de_name, values): 236 """ 237 Add or update a row in a data extension. 238 239 @param de_name: name of the data extension 240 @param values: dict containing the COLUMN: VALUE pairs. 241 Must contain TOKEN or EMAIL_ADDRESS_. 242 @return: None 243 """ 244 row = self._get_row_obj(de_name, values) 245 resp = row.patch(True) 246 assert_response(resp) 247 248 @time_request 249 def delete_row(self, de_name, column, value): 250 """ 251 Delete a row from a data extension. Either token or email are required. 252 253 @param de_name: name of the data extension 254 @param token: user's token 255 @param email: user's email address 256 @return: None 257 """ 258 row = self._get_row_obj(de_name, {column: value}) 259 resp = row.delete() 260 assert_response(resp) 261 262 @time_request 263 def send_mail(self, ts_name, email, subscriber_key, token=None): 264 """ 265 Send an email message to a user (Triggered Send). 266 267 @param ts_name: the name of the message to send 268 @param email: the email address of the user 269 @param subscriber_key: the key for the user in SFMC 270 @param format: T or H for Text or HTML 271 @param token: optional token if a recovery message 272 @return: None 273 """ 274 ts = ET_TriggeredSend() 275 ts.auth_stub = self.client 276 ts.props = {'CustomerKey': ts_name} 277 subscriber = { 278 'EmailAddress': email, 279 'SubscriberKey': subscriber_key, 280 } 281 if token: 282 ts.attributes = build_attributes({ 283 'Token__c': token, 284 }) 285 subscriber['Attributes'] = ts.attributes 286 ts.subscribers = [subscriber] 287 resp = ts.send() 288 assert_response(resp) 289 290 @time_request 291 def send_sms(self, phone_numbers, message_id): 292 if isinstance(phone_numbers, str): 293 phone_numbers = [phone_numbers] 294 295 phone_numbers = [pn.lstrip('+') for pn in phone_numbers] 296 data = { 297 'mobileNumbers': phone_numbers, 298 'Subscribe': True, 299 'Resubscribe': True, 300 'keyword': 'FFDROID', # TODO: Set keyword in arguments. 301 } 302 url = self.sms_api_url.format(message_id) 303 response = requests.post(url, json=data, headers=self.auth_header, timeout=10) 304 if response.status_code >= 500: 305 raise NewsletterException('SFMC Server Error: {}'.format(response.content), 306 status_code=response.status_code) 307 308 if response.status_code >= 400: 309 raise NewsletterException('SFMC Request Error: {}'.format(response.content), 310 status_code=response.status_code) 311 312 @time_request 313 def bulk_upsert_rows(self, de_name, values): 314 url = self.rowset_api_url.format(de_name) 315 response = requests.post(url, json=values, headers=self.auth_header, timeout=30) 316 if response.status_code >= 500: 317 raise NewsletterException('SFMC Server Error: {}'.format(response.content), 318 status_code=response.status_code) 319 320 if response.status_code >= 400: 321 raise NewsletterException(response.content, status_code=response.status_code) 322 323 324 sfmc = SFMC() 325 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/basket/news/backends/sfmc.py b/basket/news/backends/sfmc.py --- a/basket/news/backends/sfmc.py +++ b/basket/news/backends/sfmc.py @@ -7,6 +7,7 @@ from django.conf import settings from django.core.cache import cache +from django.utils.encoding import force_str import requests from django_statsd.clients import statsd @@ -89,7 +90,7 @@ try: token_response = r.json() except ValueError: - raise NewsletterException('SFMC Error During Auth: ' + r.content, + raise NewsletterException('SFMC Error During Auth: ' + force_str(r.content), status_code=r.status_code) if 'accessToken' in token_response: @@ -102,7 +103,7 @@ del payload['refreshToken'] return self.request_token(payload) - raise NewsletterException('SFMC Error During Auth: ' + r.content, + raise NewsletterException('SFMC Error During Auth: ' + force_str(r.content), status_code=r.status_code) def refresh_token(self, force_refresh=False): @@ -302,11 +303,11 @@ url = self.sms_api_url.format(message_id) response = requests.post(url, json=data, headers=self.auth_header, timeout=10) if response.status_code >= 500: - raise NewsletterException('SFMC Server Error: {}'.format(response.content), + raise NewsletterException('SFMC Server Error: {}'.format(force_str(response.content)), status_code=response.status_code) if response.status_code >= 400: - raise NewsletterException('SFMC Request Error: {}'.format(response.content), + raise NewsletterException('SFMC Request Error: {}'.format(force_str(response.content)), status_code=response.status_code) @time_request @@ -314,11 +315,11 @@ url = self.rowset_api_url.format(de_name) response = requests.post(url, json=values, headers=self.auth_header, timeout=30) if response.status_code >= 500: - raise NewsletterException('SFMC Server Error: {}'.format(response.content), + raise NewsletterException('SFMC Server Error: {}'.format(force_str(response.content)), status_code=response.status_code) if response.status_code >= 400: - raise NewsletterException(response.content, status_code=response.status_code) + raise NewsletterException(force_str(response.content), status_code=response.status_code) sfmc = SFMC()
{"golden_diff": "diff --git a/basket/news/backends/sfmc.py b/basket/news/backends/sfmc.py\n--- a/basket/news/backends/sfmc.py\n+++ b/basket/news/backends/sfmc.py\n@@ -7,6 +7,7 @@\n \n from django.conf import settings\n from django.core.cache import cache\n+from django.utils.encoding import force_str\n \n import requests\n from django_statsd.clients import statsd\n@@ -89,7 +90,7 @@\n try:\n token_response = r.json()\n except ValueError:\n- raise NewsletterException('SFMC Error During Auth: ' + r.content,\n+ raise NewsletterException('SFMC Error During Auth: ' + force_str(r.content),\n status_code=r.status_code)\n \n if 'accessToken' in token_response:\n@@ -102,7 +103,7 @@\n del payload['refreshToken']\n return self.request_token(payload)\n \n- raise NewsletterException('SFMC Error During Auth: ' + r.content,\n+ raise NewsletterException('SFMC Error During Auth: ' + force_str(r.content),\n status_code=r.status_code)\n \n def refresh_token(self, force_refresh=False):\n@@ -302,11 +303,11 @@\n url = self.sms_api_url.format(message_id)\n response = requests.post(url, json=data, headers=self.auth_header, timeout=10)\n if response.status_code >= 500:\n- raise NewsletterException('SFMC Server Error: {}'.format(response.content),\n+ raise NewsletterException('SFMC Server Error: {}'.format(force_str(response.content)),\n status_code=response.status_code)\n \n if response.status_code >= 400:\n- raise NewsletterException('SFMC Request Error: {}'.format(response.content),\n+ raise NewsletterException('SFMC Request Error: {}'.format(force_str(response.content)),\n status_code=response.status_code)\n \n @time_request\n@@ -314,11 +315,11 @@\n url = self.rowset_api_url.format(de_name)\n response = requests.post(url, json=values, headers=self.auth_header, timeout=30)\n if response.status_code >= 500:\n- raise NewsletterException('SFMC Server Error: {}'.format(response.content),\n+ raise NewsletterException('SFMC Server Error: {}'.format(force_str(response.content)),\n status_code=response.status_code)\n \n if response.status_code >= 400:\n- raise NewsletterException(response.content, status_code=response.status_code)\n+ raise NewsletterException(force_str(response.content), status_code=response.status_code)\n \n \n sfmc = SFMC()\n", "issue": "TypeError when SFMC error is returned\nSFMC response.content is bytes, but the sfmc.py module tries to concat that message with strings to throw NewsletterExceptions. \n", "before_files": [{"content": "\"\"\"\nAPI Client Library for Salesforce Marketing Cloud (SFMC)\nFormerly ExactTarget\n\"\"\"\nfrom random import randint\nfrom time import time\n\nfrom django.conf import settings\nfrom django.core.cache import cache\n\nimport requests\nfrom django_statsd.clients import statsd\nfrom FuelSDK import ET_Client, ET_DataExtension_Row, ET_TriggeredSend\n\nfrom basket.news.backends.common import get_timer_decorator, NewsletterException, \\\n NewsletterNoResultsException\n\n\ntime_request = get_timer_decorator('news.backends.sfmc')\n\n\nHERD_TIMEOUT = 60\nAUTH_BUFFER = 300 # 5 min\nMAX_BUFFER = HERD_TIMEOUT + AUTH_BUFFER\n\n\nclass ETRefreshClient(ET_Client):\n token_cache_key = 'backends:sfmc:auth:tokens'\n authTokenExpiresIn = None\n token_property_names = [\n 'authToken',\n 'authTokenExpiration',\n 'internalAuthToken',\n 'refreshKey',\n ]\n _old_authToken = None\n\n def __init__(self, get_server_wsdl=False, debug=False, params=None):\n # setting this manually as it has thrown errors and doesn't change\n if settings.USE_SANDBOX_BACKEND:\n self.endpoint = 'https://webservice.test.exacttarget.com/Service.asmx'\n else:\n self.endpoint = 'https://webservice.s4.exacttarget.com/Service.asmx'\n\n super(ETRefreshClient, self).__init__(get_server_wsdl, debug, params)\n\n def token_is_expired(self):\n \"\"\"Report token is expired between 5 and 6 minutes early\n\n Having the expiration be random helps prevent multiple basket\n instances simultaneously requesting a new token from SFMC,\n a.k.a. the Thundering Herd problem.\n \"\"\"\n if self.authTokenExpiration is None:\n return True\n\n time_buffer = randint(1, HERD_TIMEOUT) + AUTH_BUFFER\n return time() + time_buffer > self.authTokenExpiration\n\n def refresh_auth_tokens_from_cache(self):\n \"\"\"Refresh the auth token and other values from cache\"\"\"\n if self.authToken is not None and time() + MAX_BUFFER < self.authTokenExpiration:\n # no need to refresh if the current tokens are still good\n return\n\n tokens = cache.get(self.token_cache_key)\n if tokens:\n if not isinstance(tokens, dict):\n # something wrong was cached\n cache.delete(self.token_cache_key)\n return\n\n for prop, value in tokens.items():\n if prop in self.token_property_names:\n setattr(self, prop, value)\n\n # set the value so we can detect if it changed later\n self._old_authToken = self.authToken\n self.build_soap_client()\n\n def cache_auth_tokens(self):\n if self.authToken is not None and self.authToken != self._old_authToken:\n new_tokens = {prop: getattr(self, prop) for prop in self.token_property_names}\n # 10 min longer than expiration so that refreshKey can be used\n cache.set(self.token_cache_key, new_tokens, self.authTokenExpiresIn + 600)\n\n def request_token(self, payload):\n r = requests.post(self.auth_url, json=payload)\n try:\n token_response = r.json()\n except ValueError:\n raise NewsletterException('SFMC Error During Auth: ' + r.content,\n status_code=r.status_code)\n\n if 'accessToken' in token_response:\n return token_response\n\n # try again without refreshToken\n if 'refreshToken' in payload:\n # not strictly required, makes testing easier\n payload = payload.copy()\n del payload['refreshToken']\n return self.request_token(payload)\n\n raise NewsletterException('SFMC Error During Auth: ' + r.content,\n status_code=r.status_code)\n\n def refresh_token(self, force_refresh=False):\n \"\"\"\n Called from many different places right before executing a SOAP call\n \"\"\"\n # If we don't already have a token or the token expires within 5 min(300 seconds), get one\n self.refresh_auth_tokens_from_cache()\n if force_refresh or self.authToken is None or self.token_is_expired():\n payload = {\n 'clientId': self.client_id,\n 'clientSecret': self.client_secret,\n 'accessType': 'offline',\n }\n if self.refreshKey:\n payload['refreshToken'] = self.refreshKey\n\n token_response = self.request_token(payload)\n statsd.incr('news.backends.sfmc.auth_token_refresh')\n self.authToken = token_response['accessToken']\n self.authTokenExpiresIn = token_response['expiresIn']\n self.authTokenExpiration = time() + self.authTokenExpiresIn\n self.internalAuthToken = token_response['legacyToken']\n if 'refreshToken' in token_response:\n self.refreshKey = token_response['refreshToken']\n\n self.build_soap_client()\n self.cache_auth_tokens()\n\n\ndef assert_response(resp):\n if not resp.status:\n raise NewsletterException(str(resp.results))\n\n\ndef assert_results(resp):\n assert_response(resp)\n if not resp.results:\n raise NewsletterNoResultsException()\n\n\ndef build_attributes(data):\n return [{'Name': key, 'Value': value} for key, value in data.items()]\n\n\nclass SFMC(object):\n _client = None\n sms_api_url = 'https://www.exacttargetapis.com/sms/v1/messageContact/{}/send'\n rowset_api_url = 'https://www.exacttargetapis.com/hub/v1/dataevents/key:{}/rowset'\n\n @property\n def client(self):\n if self._client is None and 'clientid' in settings.SFMC_SETTINGS:\n self._client = ETRefreshClient(False, settings.SFMC_DEBUG, settings.SFMC_SETTINGS)\n\n return self._client\n\n @property\n def auth_header(self):\n self.client.refresh_token()\n return {'Authorization': 'Bearer {0}'.format(self.client.authToken)}\n\n def _get_row_obj(self, de_name, props):\n row = ET_DataExtension_Row()\n row.auth_stub = self.client\n row.CustomerKey = row.Name = de_name\n row.props = props\n return row\n\n @time_request\n def get_row(self, de_name, fields, token=None, email=None):\n \"\"\"\n Get the values of `fields` from a data extension. Either token or email is required.\n\n @param de_name: name of the data extension\n @param fields: list of column names\n @param token: the user's token\n @param email: the user's email address\n @return: dict of user data\n \"\"\"\n assert token or email, 'token or email required'\n row = self._get_row_obj(de_name, fields)\n if token:\n row.search_filter = {\n 'Property': 'TOKEN',\n 'SimpleOperator': 'equals',\n 'Value': token,\n }\n elif email:\n row.search_filter = {\n 'Property': 'EMAIL_ADDRESS_',\n 'SimpleOperator': 'equals',\n 'Value': email,\n }\n\n resp = row.get()\n assert_results(resp)\n # TODO do something if more than 1 result is returned\n return dict((p.Name, p.Value)\n for p in resp.results[0].Properties.Property)\n\n @time_request\n def add_row(self, de_name, values):\n \"\"\"\n Add a row to a data extension.\n\n @param de_name: name of the data extension\n @param values: dict containing the COLUMN: VALUE pairs\n @return: None\n \"\"\"\n row = self._get_row_obj(de_name, values)\n resp = row.post()\n assert_response(resp)\n\n @time_request\n def update_row(self, de_name, values):\n \"\"\"\n Update a row in a data extension.\n\n @param de_name: name of the data extension\n @param values: dict containing the COLUMN: VALUE pairs.\n Must contain TOKEN or EMAIL_ADDRESS_.\n @return: None\n \"\"\"\n row = self._get_row_obj(de_name, values)\n resp = row.patch()\n assert_response(resp)\n\n @time_request\n def upsert_row(self, de_name, values):\n \"\"\"\n Add or update a row in a data extension.\n\n @param de_name: name of the data extension\n @param values: dict containing the COLUMN: VALUE pairs.\n Must contain TOKEN or EMAIL_ADDRESS_.\n @return: None\n \"\"\"\n row = self._get_row_obj(de_name, values)\n resp = row.patch(True)\n assert_response(resp)\n\n @time_request\n def delete_row(self, de_name, column, value):\n \"\"\"\n Delete a row from a data extension. Either token or email are required.\n\n @param de_name: name of the data extension\n @param token: user's token\n @param email: user's email address\n @return: None\n \"\"\"\n row = self._get_row_obj(de_name, {column: value})\n resp = row.delete()\n assert_response(resp)\n\n @time_request\n def send_mail(self, ts_name, email, subscriber_key, token=None):\n \"\"\"\n Send an email message to a user (Triggered Send).\n\n @param ts_name: the name of the message to send\n @param email: the email address of the user\n @param subscriber_key: the key for the user in SFMC\n @param format: T or H for Text or HTML\n @param token: optional token if a recovery message\n @return: None\n \"\"\"\n ts = ET_TriggeredSend()\n ts.auth_stub = self.client\n ts.props = {'CustomerKey': ts_name}\n subscriber = {\n 'EmailAddress': email,\n 'SubscriberKey': subscriber_key,\n }\n if token:\n ts.attributes = build_attributes({\n 'Token__c': token,\n })\n subscriber['Attributes'] = ts.attributes\n ts.subscribers = [subscriber]\n resp = ts.send()\n assert_response(resp)\n\n @time_request\n def send_sms(self, phone_numbers, message_id):\n if isinstance(phone_numbers, str):\n phone_numbers = [phone_numbers]\n\n phone_numbers = [pn.lstrip('+') for pn in phone_numbers]\n data = {\n 'mobileNumbers': phone_numbers,\n 'Subscribe': True,\n 'Resubscribe': True,\n 'keyword': 'FFDROID', # TODO: Set keyword in arguments.\n }\n url = self.sms_api_url.format(message_id)\n response = requests.post(url, json=data, headers=self.auth_header, timeout=10)\n if response.status_code >= 500:\n raise NewsletterException('SFMC Server Error: {}'.format(response.content),\n status_code=response.status_code)\n\n if response.status_code >= 400:\n raise NewsletterException('SFMC Request Error: {}'.format(response.content),\n status_code=response.status_code)\n\n @time_request\n def bulk_upsert_rows(self, de_name, values):\n url = self.rowset_api_url.format(de_name)\n response = requests.post(url, json=values, headers=self.auth_header, timeout=30)\n if response.status_code >= 500:\n raise NewsletterException('SFMC Server Error: {}'.format(response.content),\n status_code=response.status_code)\n\n if response.status_code >= 400:\n raise NewsletterException(response.content, status_code=response.status_code)\n\n\nsfmc = SFMC()\n", "path": "basket/news/backends/sfmc.py"}], "after_files": [{"content": "\"\"\"\nAPI Client Library for Salesforce Marketing Cloud (SFMC)\nFormerly ExactTarget\n\"\"\"\nfrom random import randint\nfrom time import time\n\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom django.utils.encoding import force_str\n\nimport requests\nfrom django_statsd.clients import statsd\nfrom FuelSDK import ET_Client, ET_DataExtension_Row, ET_TriggeredSend\n\nfrom basket.news.backends.common import get_timer_decorator, NewsletterException, \\\n NewsletterNoResultsException\n\n\ntime_request = get_timer_decorator('news.backends.sfmc')\n\n\nHERD_TIMEOUT = 60\nAUTH_BUFFER = 300 # 5 min\nMAX_BUFFER = HERD_TIMEOUT + AUTH_BUFFER\n\n\nclass ETRefreshClient(ET_Client):\n token_cache_key = 'backends:sfmc:auth:tokens'\n authTokenExpiresIn = None\n token_property_names = [\n 'authToken',\n 'authTokenExpiration',\n 'internalAuthToken',\n 'refreshKey',\n ]\n _old_authToken = None\n\n def __init__(self, get_server_wsdl=False, debug=False, params=None):\n # setting this manually as it has thrown errors and doesn't change\n if settings.USE_SANDBOX_BACKEND:\n self.endpoint = 'https://webservice.test.exacttarget.com/Service.asmx'\n else:\n self.endpoint = 'https://webservice.s4.exacttarget.com/Service.asmx'\n\n super(ETRefreshClient, self).__init__(get_server_wsdl, debug, params)\n\n def token_is_expired(self):\n \"\"\"Report token is expired between 5 and 6 minutes early\n\n Having the expiration be random helps prevent multiple basket\n instances simultaneously requesting a new token from SFMC,\n a.k.a. the Thundering Herd problem.\n \"\"\"\n if self.authTokenExpiration is None:\n return True\n\n time_buffer = randint(1, HERD_TIMEOUT) + AUTH_BUFFER\n return time() + time_buffer > self.authTokenExpiration\n\n def refresh_auth_tokens_from_cache(self):\n \"\"\"Refresh the auth token and other values from cache\"\"\"\n if self.authToken is not None and time() + MAX_BUFFER < self.authTokenExpiration:\n # no need to refresh if the current tokens are still good\n return\n\n tokens = cache.get(self.token_cache_key)\n if tokens:\n if not isinstance(tokens, dict):\n # something wrong was cached\n cache.delete(self.token_cache_key)\n return\n\n for prop, value in tokens.items():\n if prop in self.token_property_names:\n setattr(self, prop, value)\n\n # set the value so we can detect if it changed later\n self._old_authToken = self.authToken\n self.build_soap_client()\n\n def cache_auth_tokens(self):\n if self.authToken is not None and self.authToken != self._old_authToken:\n new_tokens = {prop: getattr(self, prop) for prop in self.token_property_names}\n # 10 min longer than expiration so that refreshKey can be used\n cache.set(self.token_cache_key, new_tokens, self.authTokenExpiresIn + 600)\n\n def request_token(self, payload):\n r = requests.post(self.auth_url, json=payload)\n try:\n token_response = r.json()\n except ValueError:\n raise NewsletterException('SFMC Error During Auth: ' + force_str(r.content),\n status_code=r.status_code)\n\n if 'accessToken' in token_response:\n return token_response\n\n # try again without refreshToken\n if 'refreshToken' in payload:\n # not strictly required, makes testing easier\n payload = payload.copy()\n del payload['refreshToken']\n return self.request_token(payload)\n\n raise NewsletterException('SFMC Error During Auth: ' + force_str(r.content),\n status_code=r.status_code)\n\n def refresh_token(self, force_refresh=False):\n \"\"\"\n Called from many different places right before executing a SOAP call\n \"\"\"\n # If we don't already have a token or the token expires within 5 min(300 seconds), get one\n self.refresh_auth_tokens_from_cache()\n if force_refresh or self.authToken is None or self.token_is_expired():\n payload = {\n 'clientId': self.client_id,\n 'clientSecret': self.client_secret,\n 'accessType': 'offline',\n }\n if self.refreshKey:\n payload['refreshToken'] = self.refreshKey\n\n token_response = self.request_token(payload)\n statsd.incr('news.backends.sfmc.auth_token_refresh')\n self.authToken = token_response['accessToken']\n self.authTokenExpiresIn = token_response['expiresIn']\n self.authTokenExpiration = time() + self.authTokenExpiresIn\n self.internalAuthToken = token_response['legacyToken']\n if 'refreshToken' in token_response:\n self.refreshKey = token_response['refreshToken']\n\n self.build_soap_client()\n self.cache_auth_tokens()\n\n\ndef assert_response(resp):\n if not resp.status:\n raise NewsletterException(str(resp.results))\n\n\ndef assert_results(resp):\n assert_response(resp)\n if not resp.results:\n raise NewsletterNoResultsException()\n\n\ndef build_attributes(data):\n return [{'Name': key, 'Value': value} for key, value in data.items()]\n\n\nclass SFMC(object):\n _client = None\n sms_api_url = 'https://www.exacttargetapis.com/sms/v1/messageContact/{}/send'\n rowset_api_url = 'https://www.exacttargetapis.com/hub/v1/dataevents/key:{}/rowset'\n\n @property\n def client(self):\n if self._client is None and 'clientid' in settings.SFMC_SETTINGS:\n self._client = ETRefreshClient(False, settings.SFMC_DEBUG, settings.SFMC_SETTINGS)\n\n return self._client\n\n @property\n def auth_header(self):\n self.client.refresh_token()\n return {'Authorization': 'Bearer {0}'.format(self.client.authToken)}\n\n def _get_row_obj(self, de_name, props):\n row = ET_DataExtension_Row()\n row.auth_stub = self.client\n row.CustomerKey = row.Name = de_name\n row.props = props\n return row\n\n @time_request\n def get_row(self, de_name, fields, token=None, email=None):\n \"\"\"\n Get the values of `fields` from a data extension. Either token or email is required.\n\n @param de_name: name of the data extension\n @param fields: list of column names\n @param token: the user's token\n @param email: the user's email address\n @return: dict of user data\n \"\"\"\n assert token or email, 'token or email required'\n row = self._get_row_obj(de_name, fields)\n if token:\n row.search_filter = {\n 'Property': 'TOKEN',\n 'SimpleOperator': 'equals',\n 'Value': token,\n }\n elif email:\n row.search_filter = {\n 'Property': 'EMAIL_ADDRESS_',\n 'SimpleOperator': 'equals',\n 'Value': email,\n }\n\n resp = row.get()\n assert_results(resp)\n # TODO do something if more than 1 result is returned\n return dict((p.Name, p.Value)\n for p in resp.results[0].Properties.Property)\n\n @time_request\n def add_row(self, de_name, values):\n \"\"\"\n Add a row to a data extension.\n\n @param de_name: name of the data extension\n @param values: dict containing the COLUMN: VALUE pairs\n @return: None\n \"\"\"\n row = self._get_row_obj(de_name, values)\n resp = row.post()\n assert_response(resp)\n\n @time_request\n def update_row(self, de_name, values):\n \"\"\"\n Update a row in a data extension.\n\n @param de_name: name of the data extension\n @param values: dict containing the COLUMN: VALUE pairs.\n Must contain TOKEN or EMAIL_ADDRESS_.\n @return: None\n \"\"\"\n row = self._get_row_obj(de_name, values)\n resp = row.patch()\n assert_response(resp)\n\n @time_request\n def upsert_row(self, de_name, values):\n \"\"\"\n Add or update a row in a data extension.\n\n @param de_name: name of the data extension\n @param values: dict containing the COLUMN: VALUE pairs.\n Must contain TOKEN or EMAIL_ADDRESS_.\n @return: None\n \"\"\"\n row = self._get_row_obj(de_name, values)\n resp = row.patch(True)\n assert_response(resp)\n\n @time_request\n def delete_row(self, de_name, column, value):\n \"\"\"\n Delete a row from a data extension. Either token or email are required.\n\n @param de_name: name of the data extension\n @param token: user's token\n @param email: user's email address\n @return: None\n \"\"\"\n row = self._get_row_obj(de_name, {column: value})\n resp = row.delete()\n assert_response(resp)\n\n @time_request\n def send_mail(self, ts_name, email, subscriber_key, token=None):\n \"\"\"\n Send an email message to a user (Triggered Send).\n\n @param ts_name: the name of the message to send\n @param email: the email address of the user\n @param subscriber_key: the key for the user in SFMC\n @param format: T or H for Text or HTML\n @param token: optional token if a recovery message\n @return: None\n \"\"\"\n ts = ET_TriggeredSend()\n ts.auth_stub = self.client\n ts.props = {'CustomerKey': ts_name}\n subscriber = {\n 'EmailAddress': email,\n 'SubscriberKey': subscriber_key,\n }\n if token:\n ts.attributes = build_attributes({\n 'Token__c': token,\n })\n subscriber['Attributes'] = ts.attributes\n ts.subscribers = [subscriber]\n resp = ts.send()\n assert_response(resp)\n\n @time_request\n def send_sms(self, phone_numbers, message_id):\n if isinstance(phone_numbers, str):\n phone_numbers = [phone_numbers]\n\n phone_numbers = [pn.lstrip('+') for pn in phone_numbers]\n data = {\n 'mobileNumbers': phone_numbers,\n 'Subscribe': True,\n 'Resubscribe': True,\n 'keyword': 'FFDROID', # TODO: Set keyword in arguments.\n }\n url = self.sms_api_url.format(message_id)\n response = requests.post(url, json=data, headers=self.auth_header, timeout=10)\n if response.status_code >= 500:\n raise NewsletterException('SFMC Server Error: {}'.format(force_str(response.content)),\n status_code=response.status_code)\n\n if response.status_code >= 400:\n raise NewsletterException('SFMC Request Error: {}'.format(force_str(response.content)),\n status_code=response.status_code)\n\n @time_request\n def bulk_upsert_rows(self, de_name, values):\n url = self.rowset_api_url.format(de_name)\n response = requests.post(url, json=values, headers=self.auth_header, timeout=30)\n if response.status_code >= 500:\n raise NewsletterException('SFMC Server Error: {}'.format(force_str(response.content)),\n status_code=response.status_code)\n\n if response.status_code >= 400:\n raise NewsletterException(force_str(response.content), status_code=response.status_code)\n\n\nsfmc = SFMC()\n", "path": "basket/news/backends/sfmc.py"}]}
3,637
563
gh_patches_debug_22899
rasdani/github-patches
git_diff
mlflow__mlflow-8879
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- clean and transparent store registration These lines https://github.com/mlflow/mlflow/blob/152063e0b6fbadfbb2baecaf7d0ac7ca1b304b31/mlflow/tracking/_tracking_service/utils.py#L194C1-L206C1 initialize a global variable holding important information about store endpoints. But the style is not good: - should be moved to the top and marked clearly - perhaps it would be beneficial to wrap them to be recycled per user request (re-register / re-initialize) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mlflow/tracking/_tracking_service/utils.py` Content: ``` 1 import os 2 from functools import partial 3 import logging 4 from pathlib import Path 5 from typing import Union 6 from contextlib import contextmanager 7 8 from mlflow.environment_variables import ( 9 MLFLOW_TRACKING_AWS_SIGV4, 10 MLFLOW_TRACKING_URI, 11 MLFLOW_TRACKING_TOKEN, 12 MLFLOW_TRACKING_INSECURE_TLS, 13 MLFLOW_TRACKING_CLIENT_CERT_PATH, 14 MLFLOW_TRACKING_SERVER_CERT_PATH, 15 ) 16 from mlflow.store.tracking import DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH 17 from mlflow.store.db.db_types import DATABASE_ENGINES 18 from mlflow.store.tracking.file_store import FileStore 19 from mlflow.store.tracking.rest_store import RestStore 20 from mlflow.tracking._tracking_service.registry import TrackingStoreRegistry 21 from mlflow.utils import rest_utils 22 from mlflow.utils.file_utils import path_to_local_file_uri 23 from mlflow.utils.databricks_utils import get_databricks_host_creds 24 from mlflow.utils.uri import _DATABRICKS_UNITY_CATALOG_SCHEME 25 from mlflow.utils.credentials import read_mlflow_creds 26 27 _logger = logging.getLogger(__name__) 28 _tracking_uri = None 29 30 31 def is_tracking_uri_set(): 32 """Returns True if the tracking URI has been set, False otherwise.""" 33 if _tracking_uri or MLFLOW_TRACKING_URI.get(): 34 return True 35 return False 36 37 38 def set_tracking_uri(uri: Union[str, Path]) -> None: 39 """ 40 Set the tracking server URI. This does not affect the 41 currently active run (if one exists), but takes effect for successive runs. 42 43 :param uri: 44 45 - An empty string, or a local file path, prefixed with ``file:/``. Data is stored 46 locally at the provided file (or ``./mlruns`` if empty). 47 - An HTTP URI like ``https://my-tracking-server:5000``. 48 - A Databricks workspace, provided as the string "databricks" or, to use a 49 Databricks CLI 50 `profile <https://github.com/databricks/databricks-cli#installation>`_, 51 "databricks://<profileName>". 52 - A :py:class:`pathlib.Path` instance 53 54 .. test-code-block:: python 55 :caption: Example 56 57 import mlflow 58 59 mlflow.set_tracking_uri("file:///tmp/my_tracking") 60 tracking_uri = mlflow.get_tracking_uri() 61 print("Current tracking uri: {}".format(tracking_uri)) 62 63 .. code-block:: text 64 :caption: Output 65 66 Current tracking uri: file:///tmp/my_tracking 67 """ 68 if isinstance(uri, Path): 69 # On Windows with Python3.8 (https://bugs.python.org/issue38671) 70 # .resolve() doesn't return the absolute path if the directory doesn't exist 71 # so we're calling .absolute() first to get the absolute path on Windows, 72 # then .resolve() to clean the path 73 uri = uri.absolute().resolve().as_uri() 74 global _tracking_uri 75 _tracking_uri = uri 76 77 78 @contextmanager 79 def _use_tracking_uri(uri: str, local_store_root_path: str = None) -> None: 80 """ 81 Similar to `mlflow.tracking.set_tracking_uri` function but return a context manager. 82 :param uri: tracking URI to use. 83 :param local_store_root_path: the local store root path for the tracking URI. 84 """ 85 global _tracking_uri 86 cwd = os.getcwd() 87 old_tracking_uri = _tracking_uri 88 try: 89 if local_store_root_path is not None: 90 os.chdir(local_store_root_path) 91 _tracking_uri = uri 92 yield 93 finally: 94 _tracking_uri = old_tracking_uri 95 os.chdir(cwd) 96 97 98 def _resolve_tracking_uri(tracking_uri=None): 99 return tracking_uri or get_tracking_uri() 100 101 102 def get_tracking_uri() -> str: 103 """ 104 Get the current tracking URI. This may not correspond to the tracking URI of 105 the currently active run, since the tracking URI can be updated via ``set_tracking_uri``. 106 107 :return: The tracking URI. 108 109 .. code-block:: python 110 :caption: Example 111 112 import mlflow 113 114 # Get the current tracking uri 115 tracking_uri = mlflow.get_tracking_uri() 116 print("Current tracking uri: {}".format(tracking_uri)) 117 118 .. code-block:: text 119 :caption: Output 120 121 Current tracking uri: file:///.../mlruns 122 """ 123 global _tracking_uri 124 if _tracking_uri is not None: 125 return _tracking_uri 126 elif uri := MLFLOW_TRACKING_URI.get(): 127 return uri 128 else: 129 return path_to_local_file_uri(os.path.abspath(DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH)) 130 131 132 def _get_file_store(store_uri, **_): 133 return FileStore(store_uri, store_uri) 134 135 136 def _get_sqlalchemy_store(store_uri, artifact_uri): 137 from mlflow.store.tracking.sqlalchemy_store import SqlAlchemyStore 138 139 if artifact_uri is None: 140 artifact_uri = DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH 141 return SqlAlchemyStore(store_uri, artifact_uri) 142 143 144 def _get_default_host_creds(store_uri): 145 creds = read_mlflow_creds() 146 return rest_utils.MlflowHostCreds( 147 host=store_uri, 148 username=creds.username, 149 password=creds.password, 150 token=MLFLOW_TRACKING_TOKEN.get(), 151 aws_sigv4=MLFLOW_TRACKING_AWS_SIGV4.get(), 152 ignore_tls_verification=MLFLOW_TRACKING_INSECURE_TLS.get(), 153 client_cert_path=MLFLOW_TRACKING_CLIENT_CERT_PATH.get(), 154 server_cert_path=MLFLOW_TRACKING_SERVER_CERT_PATH.get(), 155 ) 156 157 158 def _get_rest_store(store_uri, **_): 159 return RestStore(partial(_get_default_host_creds, store_uri)) 160 161 162 def _get_databricks_rest_store(store_uri, **_): 163 return RestStore(partial(get_databricks_host_creds, store_uri)) 164 165 166 def _get_databricks_uc_rest_store(store_uri, **_): 167 from mlflow.exceptions import MlflowException 168 from mlflow.version import VERSION 169 170 global _tracking_store_registry 171 supported_schemes = [ 172 scheme 173 for scheme in _tracking_store_registry._registry 174 if scheme != _DATABRICKS_UNITY_CATALOG_SCHEME 175 ] 176 raise MlflowException( 177 f"Detected Unity Catalog tracking URI '{store_uri}'. " 178 "Setting the tracking URI to a Unity Catalog backend is not supported in the current " 179 f"version of the MLflow client ({VERSION}). " 180 "Please specify a different tracking URI via mlflow.set_tracking_uri, with " 181 "one of the supported schemes: " 182 f"{supported_schemes}. If you're trying to access models in the Unity " 183 "Catalog, please upgrade to the latest version of the MLflow Python " 184 "client, then specify a Unity Catalog model registry URI via " 185 f"mlflow.set_registry_uri('{_DATABRICKS_UNITY_CATALOG_SCHEME}') or " 186 f"mlflow.set_registry_uri('{_DATABRICKS_UNITY_CATALOG_SCHEME}://profile_name'), where " 187 "'profile_name' is the name of the Databricks CLI profile to use for " 188 "authentication. Be sure to leave the tracking URI configured to use " 189 "one of the supported schemes listed above." 190 ) 191 192 193 _tracking_store_registry = TrackingStoreRegistry() 194 _tracking_store_registry.register("", _get_file_store) 195 _tracking_store_registry.register("file", _get_file_store) 196 _tracking_store_registry.register("databricks", _get_databricks_rest_store) 197 _tracking_store_registry.register(_DATABRICKS_UNITY_CATALOG_SCHEME, _get_databricks_uc_rest_store) 198 199 for scheme in ["http", "https"]: 200 _tracking_store_registry.register(scheme, _get_rest_store) 201 202 for scheme in DATABASE_ENGINES: 203 _tracking_store_registry.register(scheme, _get_sqlalchemy_store) 204 205 _tracking_store_registry.register_entrypoints() 206 207 208 def _get_store(store_uri=None, artifact_uri=None): 209 return _tracking_store_registry.get_store(store_uri, artifact_uri) 210 211 212 # TODO(sueann): move to a projects utils module 213 def _get_git_url_if_present(uri): 214 """ 215 Return the path git_uri#sub_directory if the URI passed is a local path that's part of 216 a Git repo, or returns the original URI otherwise. 217 :param uri: The expanded uri 218 :return: The git_uri#sub_directory if the uri is part of a Git repo, 219 otherwise return the original uri 220 """ 221 if "#" in uri: 222 # Already a URI in git repo format 223 return uri 224 try: 225 from git import Repo, InvalidGitRepositoryError, GitCommandNotFound, NoSuchPathError 226 except ImportError as e: 227 _logger.warning( 228 "Failed to import Git (the git executable is probably not on your PATH)," 229 " so Git SHA is not available. Error: %s", 230 e, 231 ) 232 return uri 233 try: 234 # Check whether this is part of a git repo 235 repo = Repo(uri, search_parent_directories=True) 236 237 # Repo url 238 repo_url = "file://%s" % repo.working_tree_dir 239 240 # Sub directory 241 rlpath = uri.replace(repo.working_tree_dir, "") 242 if rlpath == "": 243 git_path = repo_url 244 elif rlpath[0] == "/": 245 git_path = repo_url + "#" + rlpath[1:] 246 else: 247 git_path = repo_url + "#" + rlpath 248 return git_path 249 except (InvalidGitRepositoryError, GitCommandNotFound, ValueError, NoSuchPathError): 250 return uri 251 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mlflow/tracking/_tracking_service/utils.py b/mlflow/tracking/_tracking_service/utils.py --- a/mlflow/tracking/_tracking_service/utils.py +++ b/mlflow/tracking/_tracking_service/utils.py @@ -191,18 +191,27 @@ _tracking_store_registry = TrackingStoreRegistry() -_tracking_store_registry.register("", _get_file_store) -_tracking_store_registry.register("file", _get_file_store) -_tracking_store_registry.register("databricks", _get_databricks_rest_store) -_tracking_store_registry.register(_DATABRICKS_UNITY_CATALOG_SCHEME, _get_databricks_uc_rest_store) -for scheme in ["http", "https"]: - _tracking_store_registry.register(scheme, _get_rest_store) -for scheme in DATABASE_ENGINES: - _tracking_store_registry.register(scheme, _get_sqlalchemy_store) +def _register_tracking_stores(): + global _tracking_store_registry + _tracking_store_registry.register("", _get_file_store) + _tracking_store_registry.register("file", _get_file_store) + _tracking_store_registry.register("databricks", _get_databricks_rest_store) + _tracking_store_registry.register( + _DATABRICKS_UNITY_CATALOG_SCHEME, _get_databricks_uc_rest_store + ) + + for scheme in ["http", "https"]: + _tracking_store_registry.register(scheme, _get_rest_store) + + for scheme in DATABASE_ENGINES: + _tracking_store_registry.register(scheme, _get_sqlalchemy_store) + + _tracking_store_registry.register_entrypoints() + -_tracking_store_registry.register_entrypoints() +_register_tracking_stores() def _get_store(store_uri=None, artifact_uri=None):
{"golden_diff": "diff --git a/mlflow/tracking/_tracking_service/utils.py b/mlflow/tracking/_tracking_service/utils.py\n--- a/mlflow/tracking/_tracking_service/utils.py\n+++ b/mlflow/tracking/_tracking_service/utils.py\n@@ -191,18 +191,27 @@\n \n \n _tracking_store_registry = TrackingStoreRegistry()\n-_tracking_store_registry.register(\"\", _get_file_store)\n-_tracking_store_registry.register(\"file\", _get_file_store)\n-_tracking_store_registry.register(\"databricks\", _get_databricks_rest_store)\n-_tracking_store_registry.register(_DATABRICKS_UNITY_CATALOG_SCHEME, _get_databricks_uc_rest_store)\n \n-for scheme in [\"http\", \"https\"]:\n- _tracking_store_registry.register(scheme, _get_rest_store)\n \n-for scheme in DATABASE_ENGINES:\n- _tracking_store_registry.register(scheme, _get_sqlalchemy_store)\n+def _register_tracking_stores():\n+ global _tracking_store_registry\n+ _tracking_store_registry.register(\"\", _get_file_store)\n+ _tracking_store_registry.register(\"file\", _get_file_store)\n+ _tracking_store_registry.register(\"databricks\", _get_databricks_rest_store)\n+ _tracking_store_registry.register(\n+ _DATABRICKS_UNITY_CATALOG_SCHEME, _get_databricks_uc_rest_store\n+ )\n+\n+ for scheme in [\"http\", \"https\"]:\n+ _tracking_store_registry.register(scheme, _get_rest_store)\n+\n+ for scheme in DATABASE_ENGINES:\n+ _tracking_store_registry.register(scheme, _get_sqlalchemy_store)\n+\n+ _tracking_store_registry.register_entrypoints()\n+\n \n-_tracking_store_registry.register_entrypoints()\n+_register_tracking_stores()\n \n \n def _get_store(store_uri=None, artifact_uri=None):\n", "issue": "clean and transparent store registration\nThese lines\r\n\r\nhttps://github.com/mlflow/mlflow/blob/152063e0b6fbadfbb2baecaf7d0ac7ca1b304b31/mlflow/tracking/_tracking_service/utils.py#L194C1-L206C1\r\n\r\ninitialize a global variable holding important information about store endpoints. \r\n\r\nBut the style is not good:\r\n- should be moved to the top and marked clearly\r\n- perhaps it would be beneficial to wrap them to be recycled per user request (re-register / re-initialize)\n", "before_files": [{"content": "import os\nfrom functools import partial\nimport logging\nfrom pathlib import Path\nfrom typing import Union\nfrom contextlib import contextmanager\n\nfrom mlflow.environment_variables import (\n MLFLOW_TRACKING_AWS_SIGV4,\n MLFLOW_TRACKING_URI,\n MLFLOW_TRACKING_TOKEN,\n MLFLOW_TRACKING_INSECURE_TLS,\n MLFLOW_TRACKING_CLIENT_CERT_PATH,\n MLFLOW_TRACKING_SERVER_CERT_PATH,\n)\nfrom mlflow.store.tracking import DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH\nfrom mlflow.store.db.db_types import DATABASE_ENGINES\nfrom mlflow.store.tracking.file_store import FileStore\nfrom mlflow.store.tracking.rest_store import RestStore\nfrom mlflow.tracking._tracking_service.registry import TrackingStoreRegistry\nfrom mlflow.utils import rest_utils\nfrom mlflow.utils.file_utils import path_to_local_file_uri\nfrom mlflow.utils.databricks_utils import get_databricks_host_creds\nfrom mlflow.utils.uri import _DATABRICKS_UNITY_CATALOG_SCHEME\nfrom mlflow.utils.credentials import read_mlflow_creds\n\n_logger = logging.getLogger(__name__)\n_tracking_uri = None\n\n\ndef is_tracking_uri_set():\n \"\"\"Returns True if the tracking URI has been set, False otherwise.\"\"\"\n if _tracking_uri or MLFLOW_TRACKING_URI.get():\n return True\n return False\n\n\ndef set_tracking_uri(uri: Union[str, Path]) -> None:\n \"\"\"\n Set the tracking server URI. This does not affect the\n currently active run (if one exists), but takes effect for successive runs.\n\n :param uri:\n\n - An empty string, or a local file path, prefixed with ``file:/``. Data is stored\n locally at the provided file (or ``./mlruns`` if empty).\n - An HTTP URI like ``https://my-tracking-server:5000``.\n - A Databricks workspace, provided as the string \"databricks\" or, to use a\n Databricks CLI\n `profile <https://github.com/databricks/databricks-cli#installation>`_,\n \"databricks://<profileName>\".\n - A :py:class:`pathlib.Path` instance\n\n .. test-code-block:: python\n :caption: Example\n\n import mlflow\n\n mlflow.set_tracking_uri(\"file:///tmp/my_tracking\")\n tracking_uri = mlflow.get_tracking_uri()\n print(\"Current tracking uri: {}\".format(tracking_uri))\n\n .. code-block:: text\n :caption: Output\n\n Current tracking uri: file:///tmp/my_tracking\n \"\"\"\n if isinstance(uri, Path):\n # On Windows with Python3.8 (https://bugs.python.org/issue38671)\n # .resolve() doesn't return the absolute path if the directory doesn't exist\n # so we're calling .absolute() first to get the absolute path on Windows,\n # then .resolve() to clean the path\n uri = uri.absolute().resolve().as_uri()\n global _tracking_uri\n _tracking_uri = uri\n\n\n@contextmanager\ndef _use_tracking_uri(uri: str, local_store_root_path: str = None) -> None:\n \"\"\"\n Similar to `mlflow.tracking.set_tracking_uri` function but return a context manager.\n :param uri: tracking URI to use.\n :param local_store_root_path: the local store root path for the tracking URI.\n \"\"\"\n global _tracking_uri\n cwd = os.getcwd()\n old_tracking_uri = _tracking_uri\n try:\n if local_store_root_path is not None:\n os.chdir(local_store_root_path)\n _tracking_uri = uri\n yield\n finally:\n _tracking_uri = old_tracking_uri\n os.chdir(cwd)\n\n\ndef _resolve_tracking_uri(tracking_uri=None):\n return tracking_uri or get_tracking_uri()\n\n\ndef get_tracking_uri() -> str:\n \"\"\"\n Get the current tracking URI. This may not correspond to the tracking URI of\n the currently active run, since the tracking URI can be updated via ``set_tracking_uri``.\n\n :return: The tracking URI.\n\n .. code-block:: python\n :caption: Example\n\n import mlflow\n\n # Get the current tracking uri\n tracking_uri = mlflow.get_tracking_uri()\n print(\"Current tracking uri: {}\".format(tracking_uri))\n\n .. code-block:: text\n :caption: Output\n\n Current tracking uri: file:///.../mlruns\n \"\"\"\n global _tracking_uri\n if _tracking_uri is not None:\n return _tracking_uri\n elif uri := MLFLOW_TRACKING_URI.get():\n return uri\n else:\n return path_to_local_file_uri(os.path.abspath(DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH))\n\n\ndef _get_file_store(store_uri, **_):\n return FileStore(store_uri, store_uri)\n\n\ndef _get_sqlalchemy_store(store_uri, artifact_uri):\n from mlflow.store.tracking.sqlalchemy_store import SqlAlchemyStore\n\n if artifact_uri is None:\n artifact_uri = DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH\n return SqlAlchemyStore(store_uri, artifact_uri)\n\n\ndef _get_default_host_creds(store_uri):\n creds = read_mlflow_creds()\n return rest_utils.MlflowHostCreds(\n host=store_uri,\n username=creds.username,\n password=creds.password,\n token=MLFLOW_TRACKING_TOKEN.get(),\n aws_sigv4=MLFLOW_TRACKING_AWS_SIGV4.get(),\n ignore_tls_verification=MLFLOW_TRACKING_INSECURE_TLS.get(),\n client_cert_path=MLFLOW_TRACKING_CLIENT_CERT_PATH.get(),\n server_cert_path=MLFLOW_TRACKING_SERVER_CERT_PATH.get(),\n )\n\n\ndef _get_rest_store(store_uri, **_):\n return RestStore(partial(_get_default_host_creds, store_uri))\n\n\ndef _get_databricks_rest_store(store_uri, **_):\n return RestStore(partial(get_databricks_host_creds, store_uri))\n\n\ndef _get_databricks_uc_rest_store(store_uri, **_):\n from mlflow.exceptions import MlflowException\n from mlflow.version import VERSION\n\n global _tracking_store_registry\n supported_schemes = [\n scheme\n for scheme in _tracking_store_registry._registry\n if scheme != _DATABRICKS_UNITY_CATALOG_SCHEME\n ]\n raise MlflowException(\n f\"Detected Unity Catalog tracking URI '{store_uri}'. \"\n \"Setting the tracking URI to a Unity Catalog backend is not supported in the current \"\n f\"version of the MLflow client ({VERSION}). \"\n \"Please specify a different tracking URI via mlflow.set_tracking_uri, with \"\n \"one of the supported schemes: \"\n f\"{supported_schemes}. If you're trying to access models in the Unity \"\n \"Catalog, please upgrade to the latest version of the MLflow Python \"\n \"client, then specify a Unity Catalog model registry URI via \"\n f\"mlflow.set_registry_uri('{_DATABRICKS_UNITY_CATALOG_SCHEME}') or \"\n f\"mlflow.set_registry_uri('{_DATABRICKS_UNITY_CATALOG_SCHEME}://profile_name'), where \"\n \"'profile_name' is the name of the Databricks CLI profile to use for \"\n \"authentication. Be sure to leave the tracking URI configured to use \"\n \"one of the supported schemes listed above.\"\n )\n\n\n_tracking_store_registry = TrackingStoreRegistry()\n_tracking_store_registry.register(\"\", _get_file_store)\n_tracking_store_registry.register(\"file\", _get_file_store)\n_tracking_store_registry.register(\"databricks\", _get_databricks_rest_store)\n_tracking_store_registry.register(_DATABRICKS_UNITY_CATALOG_SCHEME, _get_databricks_uc_rest_store)\n\nfor scheme in [\"http\", \"https\"]:\n _tracking_store_registry.register(scheme, _get_rest_store)\n\nfor scheme in DATABASE_ENGINES:\n _tracking_store_registry.register(scheme, _get_sqlalchemy_store)\n\n_tracking_store_registry.register_entrypoints()\n\n\ndef _get_store(store_uri=None, artifact_uri=None):\n return _tracking_store_registry.get_store(store_uri, artifact_uri)\n\n\n# TODO(sueann): move to a projects utils module\ndef _get_git_url_if_present(uri):\n \"\"\"\n Return the path git_uri#sub_directory if the URI passed is a local path that's part of\n a Git repo, or returns the original URI otherwise.\n :param uri: The expanded uri\n :return: The git_uri#sub_directory if the uri is part of a Git repo,\n otherwise return the original uri\n \"\"\"\n if \"#\" in uri:\n # Already a URI in git repo format\n return uri\n try:\n from git import Repo, InvalidGitRepositoryError, GitCommandNotFound, NoSuchPathError\n except ImportError as e:\n _logger.warning(\n \"Failed to import Git (the git executable is probably not on your PATH),\"\n \" so Git SHA is not available. Error: %s\",\n e,\n )\n return uri\n try:\n # Check whether this is part of a git repo\n repo = Repo(uri, search_parent_directories=True)\n\n # Repo url\n repo_url = \"file://%s\" % repo.working_tree_dir\n\n # Sub directory\n rlpath = uri.replace(repo.working_tree_dir, \"\")\n if rlpath == \"\":\n git_path = repo_url\n elif rlpath[0] == \"/\":\n git_path = repo_url + \"#\" + rlpath[1:]\n else:\n git_path = repo_url + \"#\" + rlpath\n return git_path\n except (InvalidGitRepositoryError, GitCommandNotFound, ValueError, NoSuchPathError):\n return uri\n", "path": "mlflow/tracking/_tracking_service/utils.py"}], "after_files": [{"content": "import os\nfrom functools import partial\nimport logging\nfrom pathlib import Path\nfrom typing import Union\nfrom contextlib import contextmanager\n\nfrom mlflow.environment_variables import (\n MLFLOW_TRACKING_AWS_SIGV4,\n MLFLOW_TRACKING_URI,\n MLFLOW_TRACKING_TOKEN,\n MLFLOW_TRACKING_INSECURE_TLS,\n MLFLOW_TRACKING_CLIENT_CERT_PATH,\n MLFLOW_TRACKING_SERVER_CERT_PATH,\n)\nfrom mlflow.store.tracking import DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH\nfrom mlflow.store.db.db_types import DATABASE_ENGINES\nfrom mlflow.store.tracking.file_store import FileStore\nfrom mlflow.store.tracking.rest_store import RestStore\nfrom mlflow.tracking._tracking_service.registry import TrackingStoreRegistry\nfrom mlflow.utils import rest_utils\nfrom mlflow.utils.file_utils import path_to_local_file_uri\nfrom mlflow.utils.databricks_utils import get_databricks_host_creds\nfrom mlflow.utils.uri import _DATABRICKS_UNITY_CATALOG_SCHEME\nfrom mlflow.utils.credentials import read_mlflow_creds\n\n_logger = logging.getLogger(__name__)\n_tracking_uri = None\n\n\ndef is_tracking_uri_set():\n \"\"\"Returns True if the tracking URI has been set, False otherwise.\"\"\"\n if _tracking_uri or MLFLOW_TRACKING_URI.get():\n return True\n return False\n\n\ndef set_tracking_uri(uri: Union[str, Path]) -> None:\n \"\"\"\n Set the tracking server URI. This does not affect the\n currently active run (if one exists), but takes effect for successive runs.\n\n :param uri:\n\n - An empty string, or a local file path, prefixed with ``file:/``. Data is stored\n locally at the provided file (or ``./mlruns`` if empty).\n - An HTTP URI like ``https://my-tracking-server:5000``.\n - A Databricks workspace, provided as the string \"databricks\" or, to use a\n Databricks CLI\n `profile <https://github.com/databricks/databricks-cli#installation>`_,\n \"databricks://<profileName>\".\n - A :py:class:`pathlib.Path` instance\n\n .. test-code-block:: python\n :caption: Example\n\n import mlflow\n\n mlflow.set_tracking_uri(\"file:///tmp/my_tracking\")\n tracking_uri = mlflow.get_tracking_uri()\n print(\"Current tracking uri: {}\".format(tracking_uri))\n\n .. code-block:: text\n :caption: Output\n\n Current tracking uri: file:///tmp/my_tracking\n \"\"\"\n if isinstance(uri, Path):\n # On Windows with Python3.8 (https://bugs.python.org/issue38671)\n # .resolve() doesn't return the absolute path if the directory doesn't exist\n # so we're calling .absolute() first to get the absolute path on Windows,\n # then .resolve() to clean the path\n uri = uri.absolute().resolve().as_uri()\n global _tracking_uri\n _tracking_uri = uri\n\n\n@contextmanager\ndef _use_tracking_uri(uri: str, local_store_root_path: str = None) -> None:\n \"\"\"\n Similar to `mlflow.tracking.set_tracking_uri` function but return a context manager.\n :param uri: tracking URI to use.\n :param local_store_root_path: the local store root path for the tracking URI.\n \"\"\"\n global _tracking_uri\n cwd = os.getcwd()\n old_tracking_uri = _tracking_uri\n try:\n if local_store_root_path is not None:\n os.chdir(local_store_root_path)\n _tracking_uri = uri\n yield\n finally:\n _tracking_uri = old_tracking_uri\n os.chdir(cwd)\n\n\ndef _resolve_tracking_uri(tracking_uri=None):\n return tracking_uri or get_tracking_uri()\n\n\ndef get_tracking_uri() -> str:\n \"\"\"\n Get the current tracking URI. This may not correspond to the tracking URI of\n the currently active run, since the tracking URI can be updated via ``set_tracking_uri``.\n\n :return: The tracking URI.\n\n .. code-block:: python\n :caption: Example\n\n import mlflow\n\n # Get the current tracking uri\n tracking_uri = mlflow.get_tracking_uri()\n print(\"Current tracking uri: {}\".format(tracking_uri))\n\n .. code-block:: text\n :caption: Output\n\n Current tracking uri: file:///.../mlruns\n \"\"\"\n global _tracking_uri\n if _tracking_uri is not None:\n return _tracking_uri\n elif uri := MLFLOW_TRACKING_URI.get():\n return uri\n else:\n return path_to_local_file_uri(os.path.abspath(DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH))\n\n\ndef _get_file_store(store_uri, **_):\n return FileStore(store_uri, store_uri)\n\n\ndef _get_sqlalchemy_store(store_uri, artifact_uri):\n from mlflow.store.tracking.sqlalchemy_store import SqlAlchemyStore\n\n if artifact_uri is None:\n artifact_uri = DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH\n return SqlAlchemyStore(store_uri, artifact_uri)\n\n\ndef _get_default_host_creds(store_uri):\n creds = read_mlflow_creds()\n return rest_utils.MlflowHostCreds(\n host=store_uri,\n username=creds.username,\n password=creds.password,\n token=MLFLOW_TRACKING_TOKEN.get(),\n aws_sigv4=MLFLOW_TRACKING_AWS_SIGV4.get(),\n ignore_tls_verification=MLFLOW_TRACKING_INSECURE_TLS.get(),\n client_cert_path=MLFLOW_TRACKING_CLIENT_CERT_PATH.get(),\n server_cert_path=MLFLOW_TRACKING_SERVER_CERT_PATH.get(),\n )\n\n\ndef _get_rest_store(store_uri, **_):\n return RestStore(partial(_get_default_host_creds, store_uri))\n\n\ndef _get_databricks_rest_store(store_uri, **_):\n return RestStore(partial(get_databricks_host_creds, store_uri))\n\n\ndef _get_databricks_uc_rest_store(store_uri, **_):\n from mlflow.exceptions import MlflowException\n from mlflow.version import VERSION\n\n global _tracking_store_registry\n supported_schemes = [\n scheme\n for scheme in _tracking_store_registry._registry\n if scheme != _DATABRICKS_UNITY_CATALOG_SCHEME\n ]\n raise MlflowException(\n f\"Detected Unity Catalog tracking URI '{store_uri}'. \"\n \"Setting the tracking URI to a Unity Catalog backend is not supported in the current \"\n f\"version of the MLflow client ({VERSION}). \"\n \"Please specify a different tracking URI via mlflow.set_tracking_uri, with \"\n \"one of the supported schemes: \"\n f\"{supported_schemes}. If you're trying to access models in the Unity \"\n \"Catalog, please upgrade to the latest version of the MLflow Python \"\n \"client, then specify a Unity Catalog model registry URI via \"\n f\"mlflow.set_registry_uri('{_DATABRICKS_UNITY_CATALOG_SCHEME}') or \"\n f\"mlflow.set_registry_uri('{_DATABRICKS_UNITY_CATALOG_SCHEME}://profile_name'), where \"\n \"'profile_name' is the name of the Databricks CLI profile to use for \"\n \"authentication. Be sure to leave the tracking URI configured to use \"\n \"one of the supported schemes listed above.\"\n )\n\n\n_tracking_store_registry = TrackingStoreRegistry()\n\n\ndef _register_tracking_stores():\n global _tracking_store_registry\n _tracking_store_registry.register(\"\", _get_file_store)\n _tracking_store_registry.register(\"file\", _get_file_store)\n _tracking_store_registry.register(\"databricks\", _get_databricks_rest_store)\n _tracking_store_registry.register(\n _DATABRICKS_UNITY_CATALOG_SCHEME, _get_databricks_uc_rest_store\n )\n\n for scheme in [\"http\", \"https\"]:\n _tracking_store_registry.register(scheme, _get_rest_store)\n\n for scheme in DATABASE_ENGINES:\n _tracking_store_registry.register(scheme, _get_sqlalchemy_store)\n\n _tracking_store_registry.register_entrypoints()\n\n\n_register_tracking_stores()\n\n\ndef _get_store(store_uri=None, artifact_uri=None):\n return _tracking_store_registry.get_store(store_uri, artifact_uri)\n\n\n# TODO(sueann): move to a projects utils module\ndef _get_git_url_if_present(uri):\n \"\"\"\n Return the path git_uri#sub_directory if the URI passed is a local path that's part of\n a Git repo, or returns the original URI otherwise.\n :param uri: The expanded uri\n :return: The git_uri#sub_directory if the uri is part of a Git repo,\n otherwise return the original uri\n \"\"\"\n if \"#\" in uri:\n # Already a URI in git repo format\n return uri\n try:\n from git import Repo, InvalidGitRepositoryError, GitCommandNotFound, NoSuchPathError\n except ImportError as e:\n _logger.warning(\n \"Failed to import Git (the git executable is probably not on your PATH),\"\n \" so Git SHA is not available. Error: %s\",\n e,\n )\n return uri\n try:\n # Check whether this is part of a git repo\n repo = Repo(uri, search_parent_directories=True)\n\n # Repo url\n repo_url = \"file://%s\" % repo.working_tree_dir\n\n # Sub directory\n rlpath = uri.replace(repo.working_tree_dir, \"\")\n if rlpath == \"\":\n git_path = repo_url\n elif rlpath[0] == \"/\":\n git_path = repo_url + \"#\" + rlpath[1:]\n else:\n git_path = repo_url + \"#\" + rlpath\n return git_path\n except (InvalidGitRepositoryError, GitCommandNotFound, ValueError, NoSuchPathError):\n return uri\n", "path": "mlflow/tracking/_tracking_service/utils.py"}]}
3,134
386
gh_patches_debug_6423
rasdani/github-patches
git_diff
pytorch__examples-229
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Unused import of math in time_sequence_prediction example The generate_sine_wave.py module imports math on the first line, but doesn't use it. This import should be removed. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `time_sequence_prediction/generate_sine_wave.py` Content: ``` 1 import math 2 import numpy as np 3 import torch 4 T = 20 5 L = 1000 6 N = 100 7 np.random.seed(2) 8 x = np.empty((N, L), 'int64') 9 x[:] = np.array(range(L)) + np.random.randint(-4*T, 4*T, N).reshape(N, 1) 10 data = np.sin(x / 1.0 / T).astype('float64') 11 torch.save(data, open('traindata.pt', 'wb')) 12 13 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/time_sequence_prediction/generate_sine_wave.py b/time_sequence_prediction/generate_sine_wave.py --- a/time_sequence_prediction/generate_sine_wave.py +++ b/time_sequence_prediction/generate_sine_wave.py @@ -1,12 +1,13 @@ -import math import numpy as np import torch + +np.random.seed(2) + T = 20 L = 1000 N = 100 -np.random.seed(2) + x = np.empty((N, L), 'int64') -x[:] = np.array(range(L)) + np.random.randint(-4*T, 4*T, N).reshape(N, 1) +x[:] = np.array(range(L)) + np.random.randint(-4 * T, 4 * T, N).reshape(N, 1) data = np.sin(x / 1.0 / T).astype('float64') torch.save(data, open('traindata.pt', 'wb')) -
{"golden_diff": "diff --git a/time_sequence_prediction/generate_sine_wave.py b/time_sequence_prediction/generate_sine_wave.py\n--- a/time_sequence_prediction/generate_sine_wave.py\n+++ b/time_sequence_prediction/generate_sine_wave.py\n@@ -1,12 +1,13 @@\n-import math\n import numpy as np\n import torch\n+\n+np.random.seed(2)\n+\n T = 20\n L = 1000\n N = 100\n-np.random.seed(2)\n+\n x = np.empty((N, L), 'int64')\n-x[:] = np.array(range(L)) + np.random.randint(-4*T, 4*T, N).reshape(N, 1)\n+x[:] = np.array(range(L)) + np.random.randint(-4 * T, 4 * T, N).reshape(N, 1)\n data = np.sin(x / 1.0 / T).astype('float64')\n torch.save(data, open('traindata.pt', 'wb'))\n-\n", "issue": "Unused import of math in time_sequence_prediction example\nThe generate_sine_wave.py module imports math on the first line, but doesn't use it. This import should be removed.\n", "before_files": [{"content": "import math\nimport numpy as np\nimport torch\nT = 20\nL = 1000\nN = 100\nnp.random.seed(2)\nx = np.empty((N, L), 'int64')\nx[:] = np.array(range(L)) + np.random.randint(-4*T, 4*T, N).reshape(N, 1)\ndata = np.sin(x / 1.0 / T).astype('float64')\ntorch.save(data, open('traindata.pt', 'wb'))\n\n", "path": "time_sequence_prediction/generate_sine_wave.py"}], "after_files": [{"content": "import numpy as np\nimport torch\n\nnp.random.seed(2)\n\nT = 20\nL = 1000\nN = 100\n\nx = np.empty((N, L), 'int64')\nx[:] = np.array(range(L)) + np.random.randint(-4 * T, 4 * T, N).reshape(N, 1)\ndata = np.sin(x / 1.0 / T).astype('float64')\ntorch.save(data, open('traindata.pt', 'wb'))\n", "path": "time_sequence_prediction/generate_sine_wave.py"}]}
429
214
gh_patches_debug_27771
rasdani/github-patches
git_diff
translate__translate-3898
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Nested YAML dictionary is serialized as OrderedDict in PO file ### Problem If there is a nested dictionary in a YAML file, the resulting po file contains a string starting with `OrderedDict` ### Expectation The nested keys are shown correctly in the PO file ### How to reproduce The following YAML input ``` e1: - s1: Subtag 1 ``` given to `yaml2po -i test.yml -o test.po` results in the PO file content of ``` #: t1-%3E[0] msgid "OrderedDict([('s1', 'Subtag 1')])" msgstr "" ``` I would expect something like ``` #: t1-%3E[0]-%3Es1 msgid "Subtag 1" msgstr "" ``` Perhaps it may be related to #3819. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `translate/storage/yaml.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # 3 # Copyright 2016 Michal Čihař 4 # 5 # This file is part of the Translate Toolkit. 6 # 7 # This program is free software; you can redistribute it and/or modify 8 # it under the terms of the GNU General Public License as published by 9 # the Free Software Foundation; either version 2 of the License, or 10 # (at your option) any later version. 11 # 12 # This program is distributed in the hope that it will be useful, 13 # but WITHOUT ANY WARRANTY; without even the implied warranty of 14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 # GNU General Public License for more details. 16 # 17 # You should have received a copy of the GNU General Public License 18 # along with this program; if not, see <http://www.gnu.org/licenses/>. 19 20 r"""Class that manages YAML data files for translation 21 """ 22 23 from __future__ import absolute_import 24 from __future__ import unicode_literals 25 26 import uuid 27 28 import six 29 from ruamel.yaml import YAML, YAMLError 30 from ruamel.yaml.comments import CommentedMap 31 32 from translate.lang.data import cldr_plural_categories, plural_tags 33 from translate.misc.deprecation import deprecated 34 from translate.misc.multistring import multistring 35 from translate.storage import base 36 37 38 class YAMLUnit(base.TranslationUnit): 39 """A YAML entry""" 40 41 def __init__(self, source=None, **kwargs): 42 self._id = None 43 if source: 44 self.source = source 45 super(YAMLUnit, self).__init__(source) 46 47 @property 48 def source(self): 49 return self.target 50 51 @source.setter 52 def source(self, source): 53 self.target = source 54 55 # Deprecated on 2.3.1 56 @deprecated("Use `source` property instead") 57 def getsource(self): 58 return self.source 59 60 def setid(self, value): 61 self._id = value 62 63 def getid(self): 64 # Ensure we have ID (for serialization) 65 if self._id is None: 66 self._id = str(uuid.uuid4()) 67 return self._id 68 69 def getlocations(self): 70 return [self.getid()] 71 72 73 class YAMLFile(base.TranslationStore): 74 """A YAML file""" 75 76 UnitClass = YAMLUnit 77 78 def __init__(self, inputfile=None, **kwargs): 79 """construct a YAML file, optionally reading in from inputfile.""" 80 super(YAMLFile, self).__init__(**kwargs) 81 self.filename = '' 82 self._file = u'' 83 if inputfile is not None: 84 self.parse(inputfile) 85 86 def get_root_node(self, node): 87 """Returns root node for serialize""" 88 return node 89 90 def serialize_value(self, value): 91 return value 92 93 def serialize(self, out): 94 def nested_set(target, path, value): 95 value = self.serialize_value(value) 96 if len(path) > 1: 97 if len(path) == 2 and path[1] and path[1][0] == '[' and path[1][-1] == ']' and path[1][1:-1].isdigit(): 98 if path[0] not in target: 99 target[path[0]] = [] 100 target[path[0]].append(value) 101 else: 102 # Add empty dict in case there is value and we 103 # expect dict 104 if path[0] not in target or not isinstance(target[path[0]], dict): 105 target[path[0]] = CommentedMap() 106 nested_set(target[path[0]], path[1:], value) 107 else: 108 target[path[0]] = value 109 110 units = CommentedMap() 111 for unit in self.unit_iter(): 112 nested_set(units, unit.getid().split('->'), unit.target) 113 yaml = YAML() 114 yaml.default_flow_style = False 115 yaml.dump(self.get_root_node(units), out) 116 117 def _parse_dict(self, data, prev): 118 for k, v in six.iteritems(data): 119 if not isinstance(k, six.string_types): 120 raise base.ParseError( 121 'Key not string: {0}/{1} ({2})'.format(prev, k, type(k)) 122 ) 123 124 for x in self._flatten(v, '->'.join((prev, k)) if prev else k): 125 yield x 126 127 def _flatten(self, data, prev=""): 128 """Flatten YAML dictionary. 129 """ 130 if isinstance(data, dict): 131 for x in self._parse_dict(data, prev): 132 yield x 133 else: 134 if isinstance(data, six.string_types): 135 yield (prev, data) 136 elif isinstance(data, (bool, int)): 137 yield (prev, str(data)) 138 elif isinstance(data, list): 139 for k, v in enumerate(data): 140 key = '[{0}]'.format(k) 141 yield ('->'.join((prev, key)), six.text_type(v)) 142 elif data is None: 143 pass 144 else: 145 raise ValueError("We don't handle these values:\n" 146 "Type: %s\n" 147 "Data: %s\n" 148 "Previous: %s" % (type(data), data, prev)) 149 150 def preprocess(self, data): 151 """Preprocess hook for child formats""" 152 return data 153 154 def parse(self, input): 155 """parse the given file or file source string""" 156 if hasattr(input, 'name'): 157 self.filename = input.name 158 elif not getattr(self, 'filename', ''): 159 self.filename = '' 160 if hasattr(input, "read"): 161 src = input.read() 162 input.close() 163 input = src 164 if isinstance(input, bytes): 165 input = input.decode('utf-8') 166 yaml = YAML() 167 try: 168 self._file = yaml.load(input) 169 except YAMLError as e: 170 message = e.problem if hasattr(e, 'problem') else e.message 171 if hasattr(e, 'problem_mark'): 172 message += ' {0}'.format(e.problem_mark) 173 raise base.ParseError(message) 174 175 self._file = self.preprocess(self._file) 176 177 for k, data in self._flatten(self._file): 178 unit = self.UnitClass(data) 179 unit.setid(k) 180 self.addunit(unit) 181 182 183 class RubyYAMLFile(YAMLFile): 184 """Ruby YAML file, it has language code as first node.""" 185 186 def preprocess(self, data): 187 if isinstance(data, CommentedMap) and len(data) == 1: 188 lang = list(data.keys())[0] 189 self.settargetlanguage(lang) 190 return data[lang] 191 return data 192 193 def get_root_node(self, node): 194 """Returns root node for serialize""" 195 if self.targetlanguage is not None: 196 result = CommentedMap() 197 result[self.targetlanguage] = node 198 return result 199 return node 200 201 def _parse_dict(self, data, prev): 202 # Does this look like a plural? 203 if data and all((x in cldr_plural_categories for x in data.keys())): 204 # Ensure we have correct plurals ordering. 205 values = [data[item] for item in cldr_plural_categories if item in data] 206 yield (prev, multistring(values)) 207 return 208 209 # Handle normal dict 210 for x in super(RubyYAMLFile, self)._parse_dict(data, prev): 211 yield x 212 213 def serialize_value(self, value): 214 if not isinstance(value, multistring): 215 return value 216 217 tags = plural_tags.get(self.targetlanguage, plural_tags['en']) 218 219 strings = [six.text_type(s) for s in value.strings] 220 221 # Sync plural_strings elements to plural_tags count. 222 if len(strings) < len(tags): 223 strings += [''] * (len(tags) - len(strings)) 224 strings = strings[:len(tags)] 225 226 return CommentedMap(zip(tags, strings)) 227 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/translate/storage/yaml.py b/translate/storage/yaml.py --- a/translate/storage/yaml.py +++ b/translate/storage/yaml.py @@ -94,10 +94,15 @@ def nested_set(target, path, value): value = self.serialize_value(value) if len(path) > 1: - if len(path) == 2 and path[1] and path[1][0] == '[' and path[1][-1] == ']' and path[1][1:-1].isdigit(): + if len(path) >= 2 and path[1] and path[1][0] == '[' and path[1][-1] == ']' and path[1][1:-1].isdigit(): if path[0] not in target: target[path[0]] = [] - target[path[0]].append(value) + if len(path) > 2: + new_value = CommentedMap() + nested_set(new_value, path[2:], value) + target[path[0]].append(new_value) + else: + target[path[0]].append(value) else: # Add empty dict in case there is value and we # expect dict @@ -138,7 +143,8 @@ elif isinstance(data, list): for k, v in enumerate(data): key = '[{0}]'.format(k) - yield ('->'.join((prev, key)), six.text_type(v)) + for value in self._flatten(v, '->'.join((prev, key))): + yield value elif data is None: pass else:
{"golden_diff": "diff --git a/translate/storage/yaml.py b/translate/storage/yaml.py\n--- a/translate/storage/yaml.py\n+++ b/translate/storage/yaml.py\n@@ -94,10 +94,15 @@\n def nested_set(target, path, value):\n value = self.serialize_value(value)\n if len(path) > 1:\n- if len(path) == 2 and path[1] and path[1][0] == '[' and path[1][-1] == ']' and path[1][1:-1].isdigit():\n+ if len(path) >= 2 and path[1] and path[1][0] == '[' and path[1][-1] == ']' and path[1][1:-1].isdigit():\n if path[0] not in target:\n target[path[0]] = []\n- target[path[0]].append(value)\n+ if len(path) > 2:\n+ new_value = CommentedMap()\n+ nested_set(new_value, path[2:], value)\n+ target[path[0]].append(new_value)\n+ else:\n+ target[path[0]].append(value)\n else:\n # Add empty dict in case there is value and we\n # expect dict\n@@ -138,7 +143,8 @@\n elif isinstance(data, list):\n for k, v in enumerate(data):\n key = '[{0}]'.format(k)\n- yield ('->'.join((prev, key)), six.text_type(v))\n+ for value in self._flatten(v, '->'.join((prev, key))):\n+ yield value\n elif data is None:\n pass\n else:\n", "issue": "Nested YAML dictionary is serialized as OrderedDict in PO file\n### Problem\r\nIf there is a nested dictionary in a YAML file, the resulting po file contains a string starting with `OrderedDict`\r\n### Expectation\r\nThe nested keys are shown correctly in the PO file\r\n### How to reproduce\r\nThe following YAML input\r\n```\r\ne1:\r\n- s1: Subtag 1\r\n```\r\ngiven to `yaml2po -i test.yml -o test.po` results in the PO file content of\r\n```\r\n#: t1-%3E[0]\r\nmsgid \"OrderedDict([('s1', 'Subtag 1')])\"\r\nmsgstr \"\"\r\n```\r\nI would expect something like\r\n```\r\n#: t1-%3E[0]-%3Es1\r\nmsgid \"Subtag 1\"\r\nmsgstr \"\"\r\n```\r\nPerhaps it may be related to #3819.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2016 Michal \u010ciha\u0159\n#\n# This file is part of the Translate Toolkit.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n\nr\"\"\"Class that manages YAML data files for translation\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nimport uuid\n\nimport six\nfrom ruamel.yaml import YAML, YAMLError\nfrom ruamel.yaml.comments import CommentedMap\n\nfrom translate.lang.data import cldr_plural_categories, plural_tags\nfrom translate.misc.deprecation import deprecated\nfrom translate.misc.multistring import multistring\nfrom translate.storage import base\n\n\nclass YAMLUnit(base.TranslationUnit):\n \"\"\"A YAML entry\"\"\"\n\n def __init__(self, source=None, **kwargs):\n self._id = None\n if source:\n self.source = source\n super(YAMLUnit, self).__init__(source)\n\n @property\n def source(self):\n return self.target\n\n @source.setter\n def source(self, source):\n self.target = source\n\n # Deprecated on 2.3.1\n @deprecated(\"Use `source` property instead\")\n def getsource(self):\n return self.source\n\n def setid(self, value):\n self._id = value\n\n def getid(self):\n # Ensure we have ID (for serialization)\n if self._id is None:\n self._id = str(uuid.uuid4())\n return self._id\n\n def getlocations(self):\n return [self.getid()]\n\n\nclass YAMLFile(base.TranslationStore):\n \"\"\"A YAML file\"\"\"\n\n UnitClass = YAMLUnit\n\n def __init__(self, inputfile=None, **kwargs):\n \"\"\"construct a YAML file, optionally reading in from inputfile.\"\"\"\n super(YAMLFile, self).__init__(**kwargs)\n self.filename = ''\n self._file = u''\n if inputfile is not None:\n self.parse(inputfile)\n\n def get_root_node(self, node):\n \"\"\"Returns root node for serialize\"\"\"\n return node\n\n def serialize_value(self, value):\n return value\n\n def serialize(self, out):\n def nested_set(target, path, value):\n value = self.serialize_value(value)\n if len(path) > 1:\n if len(path) == 2 and path[1] and path[1][0] == '[' and path[1][-1] == ']' and path[1][1:-1].isdigit():\n if path[0] not in target:\n target[path[0]] = []\n target[path[0]].append(value)\n else:\n # Add empty dict in case there is value and we\n # expect dict\n if path[0] not in target or not isinstance(target[path[0]], dict):\n target[path[0]] = CommentedMap()\n nested_set(target[path[0]], path[1:], value)\n else:\n target[path[0]] = value\n\n units = CommentedMap()\n for unit in self.unit_iter():\n nested_set(units, unit.getid().split('->'), unit.target)\n yaml = YAML()\n yaml.default_flow_style = False\n yaml.dump(self.get_root_node(units), out)\n\n def _parse_dict(self, data, prev):\n for k, v in six.iteritems(data):\n if not isinstance(k, six.string_types):\n raise base.ParseError(\n 'Key not string: {0}/{1} ({2})'.format(prev, k, type(k))\n )\n\n for x in self._flatten(v, '->'.join((prev, k)) if prev else k):\n yield x\n\n def _flatten(self, data, prev=\"\"):\n \"\"\"Flatten YAML dictionary.\n \"\"\"\n if isinstance(data, dict):\n for x in self._parse_dict(data, prev):\n yield x\n else:\n if isinstance(data, six.string_types):\n yield (prev, data)\n elif isinstance(data, (bool, int)):\n yield (prev, str(data))\n elif isinstance(data, list):\n for k, v in enumerate(data):\n key = '[{0}]'.format(k)\n yield ('->'.join((prev, key)), six.text_type(v))\n elif data is None:\n pass\n else:\n raise ValueError(\"We don't handle these values:\\n\"\n \"Type: %s\\n\"\n \"Data: %s\\n\"\n \"Previous: %s\" % (type(data), data, prev))\n\n def preprocess(self, data):\n \"\"\"Preprocess hook for child formats\"\"\"\n return data\n\n def parse(self, input):\n \"\"\"parse the given file or file source string\"\"\"\n if hasattr(input, 'name'):\n self.filename = input.name\n elif not getattr(self, 'filename', ''):\n self.filename = ''\n if hasattr(input, \"read\"):\n src = input.read()\n input.close()\n input = src\n if isinstance(input, bytes):\n input = input.decode('utf-8')\n yaml = YAML()\n try:\n self._file = yaml.load(input)\n except YAMLError as e:\n message = e.problem if hasattr(e, 'problem') else e.message\n if hasattr(e, 'problem_mark'):\n message += ' {0}'.format(e.problem_mark)\n raise base.ParseError(message)\n\n self._file = self.preprocess(self._file)\n\n for k, data in self._flatten(self._file):\n unit = self.UnitClass(data)\n unit.setid(k)\n self.addunit(unit)\n\n\nclass RubyYAMLFile(YAMLFile):\n \"\"\"Ruby YAML file, it has language code as first node.\"\"\"\n\n def preprocess(self, data):\n if isinstance(data, CommentedMap) and len(data) == 1:\n lang = list(data.keys())[0]\n self.settargetlanguage(lang)\n return data[lang]\n return data\n\n def get_root_node(self, node):\n \"\"\"Returns root node for serialize\"\"\"\n if self.targetlanguage is not None:\n result = CommentedMap()\n result[self.targetlanguage] = node\n return result\n return node\n\n def _parse_dict(self, data, prev):\n # Does this look like a plural?\n if data and all((x in cldr_plural_categories for x in data.keys())):\n # Ensure we have correct plurals ordering.\n values = [data[item] for item in cldr_plural_categories if item in data]\n yield (prev, multistring(values))\n return\n\n # Handle normal dict\n for x in super(RubyYAMLFile, self)._parse_dict(data, prev):\n yield x\n\n def serialize_value(self, value):\n if not isinstance(value, multistring):\n return value\n\n tags = plural_tags.get(self.targetlanguage, plural_tags['en'])\n\n strings = [six.text_type(s) for s in value.strings]\n\n # Sync plural_strings elements to plural_tags count.\n if len(strings) < len(tags):\n strings += [''] * (len(tags) - len(strings))\n strings = strings[:len(tags)]\n\n return CommentedMap(zip(tags, strings))\n", "path": "translate/storage/yaml.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright 2016 Michal \u010ciha\u0159\n#\n# This file is part of the Translate Toolkit.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n\nr\"\"\"Class that manages YAML data files for translation\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nimport uuid\n\nimport six\nfrom ruamel.yaml import YAML, YAMLError\nfrom ruamel.yaml.comments import CommentedMap\n\nfrom translate.lang.data import cldr_plural_categories, plural_tags\nfrom translate.misc.deprecation import deprecated\nfrom translate.misc.multistring import multistring\nfrom translate.storage import base\n\n\nclass YAMLUnit(base.TranslationUnit):\n \"\"\"A YAML entry\"\"\"\n\n def __init__(self, source=None, **kwargs):\n self._id = None\n if source:\n self.source = source\n super(YAMLUnit, self).__init__(source)\n\n @property\n def source(self):\n return self.target\n\n @source.setter\n def source(self, source):\n self.target = source\n\n # Deprecated on 2.3.1\n @deprecated(\"Use `source` property instead\")\n def getsource(self):\n return self.source\n\n def setid(self, value):\n self._id = value\n\n def getid(self):\n # Ensure we have ID (for serialization)\n if self._id is None:\n self._id = str(uuid.uuid4())\n return self._id\n\n def getlocations(self):\n return [self.getid()]\n\n\nclass YAMLFile(base.TranslationStore):\n \"\"\"A YAML file\"\"\"\n\n UnitClass = YAMLUnit\n\n def __init__(self, inputfile=None, **kwargs):\n \"\"\"construct a YAML file, optionally reading in from inputfile.\"\"\"\n super(YAMLFile, self).__init__(**kwargs)\n self.filename = ''\n self._file = u''\n if inputfile is not None:\n self.parse(inputfile)\n\n def get_root_node(self, node):\n \"\"\"Returns root node for serialize\"\"\"\n return node\n\n def serialize_value(self, value):\n return value\n\n def serialize(self, out):\n def nested_set(target, path, value):\n value = self.serialize_value(value)\n if len(path) > 1:\n if len(path) >= 2 and path[1] and path[1][0] == '[' and path[1][-1] == ']' and path[1][1:-1].isdigit():\n if path[0] not in target:\n target[path[0]] = []\n if len(path) > 2:\n new_value = CommentedMap()\n nested_set(new_value, path[2:], value)\n target[path[0]].append(new_value)\n else:\n target[path[0]].append(value)\n else:\n # Add empty dict in case there is value and we\n # expect dict\n if path[0] not in target or not isinstance(target[path[0]], dict):\n target[path[0]] = CommentedMap()\n nested_set(target[path[0]], path[1:], value)\n else:\n target[path[0]] = value\n\n units = CommentedMap()\n for unit in self.unit_iter():\n nested_set(units, unit.getid().split('->'), unit.target)\n yaml = YAML()\n yaml.default_flow_style = False\n yaml.dump(self.get_root_node(units), out)\n\n def _parse_dict(self, data, prev):\n for k, v in six.iteritems(data):\n if not isinstance(k, six.string_types):\n raise base.ParseError(\n 'Key not string: {0}/{1} ({2})'.format(prev, k, type(k))\n )\n\n for x in self._flatten(v, '->'.join((prev, k)) if prev else k):\n yield x\n\n def _flatten(self, data, prev=\"\"):\n \"\"\"Flatten YAML dictionary.\n \"\"\"\n if isinstance(data, dict):\n for x in self._parse_dict(data, prev):\n yield x\n else:\n if isinstance(data, six.string_types):\n yield (prev, data)\n elif isinstance(data, (bool, int)):\n yield (prev, str(data))\n elif isinstance(data, list):\n for k, v in enumerate(data):\n key = '[{0}]'.format(k)\n for value in self._flatten(v, '->'.join((prev, key))):\n yield value\n elif data is None:\n pass\n else:\n raise ValueError(\"We don't handle these values:\\n\"\n \"Type: %s\\n\"\n \"Data: %s\\n\"\n \"Previous: %s\" % (type(data), data, prev))\n\n def preprocess(self, data):\n \"\"\"Preprocess hook for child formats\"\"\"\n return data\n\n def parse(self, input):\n \"\"\"parse the given file or file source string\"\"\"\n if hasattr(input, 'name'):\n self.filename = input.name\n elif not getattr(self, 'filename', ''):\n self.filename = ''\n if hasattr(input, \"read\"):\n src = input.read()\n input.close()\n input = src\n if isinstance(input, bytes):\n input = input.decode('utf-8')\n yaml = YAML()\n try:\n self._file = yaml.load(input)\n except YAMLError as e:\n message = e.problem if hasattr(e, 'problem') else e.message\n if hasattr(e, 'problem_mark'):\n message += ' {0}'.format(e.problem_mark)\n raise base.ParseError(message)\n\n self._file = self.preprocess(self._file)\n\n for k, data in self._flatten(self._file):\n unit = self.UnitClass(data)\n unit.setid(k)\n self.addunit(unit)\n\n\nclass RubyYAMLFile(YAMLFile):\n \"\"\"Ruby YAML file, it has language code as first node.\"\"\"\n\n def preprocess(self, data):\n if isinstance(data, CommentedMap) and len(data) == 1:\n lang = list(data.keys())[0]\n self.settargetlanguage(lang)\n return data[lang]\n return data\n\n def get_root_node(self, node):\n \"\"\"Returns root node for serialize\"\"\"\n if self.targetlanguage is not None:\n result = CommentedMap()\n result[self.targetlanguage] = node\n return result\n return node\n\n def _parse_dict(self, data, prev):\n # Does this look like a plural?\n if data and all((x in cldr_plural_categories for x in data.keys())):\n # Ensure we have correct plurals ordering.\n values = [data[item] for item in cldr_plural_categories if item in data]\n yield (prev, multistring(values))\n return\n\n # Handle normal dict\n for x in super(RubyYAMLFile, self)._parse_dict(data, prev):\n yield x\n\n def serialize_value(self, value):\n if not isinstance(value, multistring):\n return value\n\n tags = plural_tags.get(self.targetlanguage, plural_tags['en'])\n\n strings = [six.text_type(s) for s in value.strings]\n\n # Sync plural_strings elements to plural_tags count.\n if len(strings) < len(tags):\n strings += [''] * (len(tags) - len(strings))\n strings = strings[:len(tags)]\n\n return CommentedMap(zip(tags, strings))\n", "path": "translate/storage/yaml.py"}]}
2,691
365
gh_patches_debug_8809
rasdani/github-patches
git_diff
conan-io__conan-center-index-16928
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [package] xorg-makedepend/any: Homepage url incorrect ### Description In the `xorg-makedepend` recipe the homepage url is incorrectly set to "https://gitlab.freedesktop.org/xorg/util/cf" which is a different repository in the same group, the correct repository url is "https://gitlab.freedesktop.org/xorg/util/makedepend". This should be changed accordingly. To be fixed in https://github.com/conan-io/conan-center-index/blob/master/recipes/xorg-makedepend/all/conanfile.py ### Package and Environment Details * Package Name/Version: xorg-makedepend/any * Operating System+version: n/a * Compiler+version: n/a * Docker image: n/a * Conan version: n/a * Python version: n/a ### Conan profile n/a ### Steps to reproduce n/a ### Logs n/a --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `recipes/xorg-makedepend/all/conanfile.py` Content: ``` 1 from conan import ConanFile 2 from conan.errors import ConanInvalidConfiguration 3 from conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get, load, rmdir, save 4 from conan.tools.gnu import Autotools, AutotoolsToolchain, PkgConfigDeps 5 from conan.tools.layout import basic_layout 6 import os 7 import re 8 9 required_conan_version = ">=1.53.0" 10 11 12 class XorgMakedepend(ConanFile): 13 name = "xorg-makedepend" 14 description = "Utility to parse C source files to make dependency lists for Makefiles" 15 topics = ("xorg", "dependency", "obsolete") 16 license = "MIT" 17 homepage = "https://gitlab.freedesktop.org/xorg/util/cf" 18 url = "https://github.com/conan-io/conan-center-index" 19 settings = "os", "arch", "compiler", "build_type" 20 21 @property 22 def _settings_build(self): 23 return getattr(self, "settings_build", self.settings) 24 25 def export_sources(self): 26 export_conandata_patches(self) 27 28 def requirements(self): 29 self.requires("xorg-macros/1.19.3") 30 self.requires("xorg-proto/2022.2") 31 32 def build_requirements(self): 33 self.build_requires("pkgconf/1.7.4") 34 35 def validate(self): 36 if self.settings.os == "Windows": 37 raise ConanInvalidConfiguration("Windows is not supported by xorg-makedepend") 38 39 def configure(self): 40 self.settings.rm_safe("compiler.cppstd") 41 self.settings.rm_safe("compiler.libcxx") 42 43 def package_id(self): 44 del self.info.settings.compiler 45 46 def layout(self): 47 basic_layout(self, src_folder="src") 48 49 def source(self): 50 get(self, **self.conan_data["sources"][self.version], 51 destination=self.source_folder, strip_root=True) 52 53 @property 54 def _user_info_build(self): 55 return getattr(self, "user_info_build", self.deps_user_info) 56 57 def generate(self): 58 tc = AutotoolsToolchain(self) 59 tc.generate() 60 61 deps = PkgConfigDeps(self) 62 deps.generate() 63 64 def build(self): 65 apply_conandata_patches(self) 66 autotools = Autotools(self) 67 autotools.configure() 68 autotools.make() 69 70 def package(self): 71 copy(self, "COPYING", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses")) 72 def_h_text = load(self, os.path.join(self.source_folder, "def.h")) 73 license_text = next(re.finditer(r"/\*([^*]+)\*/", def_h_text)).group(1) 74 save(self, os.path.join(self.package_folder, "licenses", "LICENSE"), license_text) 75 76 autotools = Autotools(self) 77 autotools.install() 78 rmdir(self, os.path.join(self.package_folder, "share")) 79 80 def package_info(self): 81 self.cpp_info.libdirs = [] 82 self.cpp_info.includedirs = [] 83 84 bin_path = os.path.join(self.package_folder, "bin") 85 self.output.info("Appending PATH environment variable: {}".format(bin_path)) 86 self.env_info.PATH.append(bin_path) 87 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/recipes/xorg-makedepend/all/conanfile.py b/recipes/xorg-makedepend/all/conanfile.py --- a/recipes/xorg-makedepend/all/conanfile.py +++ b/recipes/xorg-makedepend/all/conanfile.py @@ -14,7 +14,7 @@ description = "Utility to parse C source files to make dependency lists for Makefiles" topics = ("xorg", "dependency", "obsolete") license = "MIT" - homepage = "https://gitlab.freedesktop.org/xorg/util/cf" + homepage = "https://gitlab.freedesktop.org/xorg/util/makedepend" url = "https://github.com/conan-io/conan-center-index" settings = "os", "arch", "compiler", "build_type"
{"golden_diff": "diff --git a/recipes/xorg-makedepend/all/conanfile.py b/recipes/xorg-makedepend/all/conanfile.py\n--- a/recipes/xorg-makedepend/all/conanfile.py\n+++ b/recipes/xorg-makedepend/all/conanfile.py\n@@ -14,7 +14,7 @@\n description = \"Utility to parse C source files to make dependency lists for Makefiles\"\n topics = (\"xorg\", \"dependency\", \"obsolete\")\n license = \"MIT\"\n- homepage = \"https://gitlab.freedesktop.org/xorg/util/cf\"\n+ homepage = \"https://gitlab.freedesktop.org/xorg/util/makedepend\"\n url = \"https://github.com/conan-io/conan-center-index\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n", "issue": "[package] xorg-makedepend/any: Homepage url incorrect\n### Description\n\nIn the `xorg-makedepend` recipe the homepage url is incorrectly set to \"https://gitlab.freedesktop.org/xorg/util/cf\" which is a different repository in the same group, the correct repository url is \"https://gitlab.freedesktop.org/xorg/util/makedepend\". This should be changed accordingly.\r\n\r\nTo be fixed in https://github.com/conan-io/conan-center-index/blob/master/recipes/xorg-makedepend/all/conanfile.py\n\n### Package and Environment Details\n\n* Package Name/Version: xorg-makedepend/any\r\n* Operating System+version: n/a\r\n* Compiler+version: n/a\r\n* Docker image: n/a\r\n* Conan version: n/a\r\n* Python version: n/a\r\n\n\n### Conan profile\n\nn/a\n\n### Steps to reproduce\n\nn/a\n\n### Logs\n\nn/a\n", "before_files": [{"content": "from conan import ConanFile\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get, load, rmdir, save\nfrom conan.tools.gnu import Autotools, AutotoolsToolchain, PkgConfigDeps\nfrom conan.tools.layout import basic_layout\nimport os\nimport re\n\nrequired_conan_version = \">=1.53.0\"\n\n\nclass XorgMakedepend(ConanFile):\n name = \"xorg-makedepend\"\n description = \"Utility to parse C source files to make dependency lists for Makefiles\"\n topics = (\"xorg\", \"dependency\", \"obsolete\")\n license = \"MIT\"\n homepage = \"https://gitlab.freedesktop.org/xorg/util/cf\"\n url = \"https://github.com/conan-io/conan-center-index\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n\n @property\n def _settings_build(self):\n return getattr(self, \"settings_build\", self.settings)\n\n def export_sources(self):\n export_conandata_patches(self)\n\n def requirements(self):\n self.requires(\"xorg-macros/1.19.3\")\n self.requires(\"xorg-proto/2022.2\")\n\n def build_requirements(self):\n self.build_requires(\"pkgconf/1.7.4\")\n\n def validate(self):\n if self.settings.os == \"Windows\":\n raise ConanInvalidConfiguration(\"Windows is not supported by xorg-makedepend\")\n\n def configure(self):\n self.settings.rm_safe(\"compiler.cppstd\")\n self.settings.rm_safe(\"compiler.libcxx\")\n\n def package_id(self):\n del self.info.settings.compiler\n\n def layout(self):\n basic_layout(self, src_folder=\"src\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version],\n destination=self.source_folder, strip_root=True)\n\n @property\n def _user_info_build(self):\n return getattr(self, \"user_info_build\", self.deps_user_info)\n\n def generate(self):\n tc = AutotoolsToolchain(self)\n tc.generate()\n\n deps = PkgConfigDeps(self)\n deps.generate()\n\n def build(self):\n apply_conandata_patches(self)\n autotools = Autotools(self)\n autotools.configure()\n autotools.make()\n\n def package(self):\n copy(self, \"COPYING\", src=self.source_folder, dst=os.path.join(self.package_folder, \"licenses\"))\n def_h_text = load(self, os.path.join(self.source_folder, \"def.h\"))\n license_text = next(re.finditer(r\"/\\*([^*]+)\\*/\", def_h_text)).group(1)\n save(self, os.path.join(self.package_folder, \"licenses\", \"LICENSE\"), license_text)\n\n autotools = Autotools(self)\n autotools.install()\n rmdir(self, os.path.join(self.package_folder, \"share\"))\n\n def package_info(self):\n self.cpp_info.libdirs = []\n self.cpp_info.includedirs = []\n\n bin_path = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Appending PATH environment variable: {}\".format(bin_path))\n self.env_info.PATH.append(bin_path)\n", "path": "recipes/xorg-makedepend/all/conanfile.py"}], "after_files": [{"content": "from conan import ConanFile\nfrom conan.errors import ConanInvalidConfiguration\nfrom conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get, load, rmdir, save\nfrom conan.tools.gnu import Autotools, AutotoolsToolchain, PkgConfigDeps\nfrom conan.tools.layout import basic_layout\nimport os\nimport re\n\nrequired_conan_version = \">=1.53.0\"\n\n\nclass XorgMakedepend(ConanFile):\n name = \"xorg-makedepend\"\n description = \"Utility to parse C source files to make dependency lists for Makefiles\"\n topics = (\"xorg\", \"dependency\", \"obsolete\")\n license = \"MIT\"\n homepage = \"https://gitlab.freedesktop.org/xorg/util/makedepend\"\n url = \"https://github.com/conan-io/conan-center-index\"\n settings = \"os\", \"arch\", \"compiler\", \"build_type\"\n\n @property\n def _settings_build(self):\n return getattr(self, \"settings_build\", self.settings)\n\n def export_sources(self):\n export_conandata_patches(self)\n\n def requirements(self):\n self.requires(\"xorg-macros/1.19.3\")\n self.requires(\"xorg-proto/2022.2\")\n\n def build_requirements(self):\n self.build_requires(\"pkgconf/1.7.4\")\n\n def validate(self):\n if self.settings.os == \"Windows\":\n raise ConanInvalidConfiguration(\"Windows is not supported by xorg-makedepend\")\n\n def configure(self):\n self.settings.rm_safe(\"compiler.cppstd\")\n self.settings.rm_safe(\"compiler.libcxx\")\n\n def package_id(self):\n del self.info.settings.compiler\n\n def layout(self):\n basic_layout(self, src_folder=\"src\")\n\n def source(self):\n get(self, **self.conan_data[\"sources\"][self.version],\n destination=self.source_folder, strip_root=True)\n\n @property\n def _user_info_build(self):\n return getattr(self, \"user_info_build\", self.deps_user_info)\n\n def generate(self):\n tc = AutotoolsToolchain(self)\n tc.generate()\n\n deps = PkgConfigDeps(self)\n deps.generate()\n\n def build(self):\n apply_conandata_patches(self)\n autotools = Autotools(self)\n autotools.configure()\n autotools.make()\n\n def package(self):\n copy(self, \"COPYING\", src=self.source_folder, dst=os.path.join(self.package_folder, \"licenses\"))\n def_h_text = load(self, os.path.join(self.source_folder, \"def.h\"))\n license_text = next(re.finditer(r\"/\\*([^*]+)\\*/\", def_h_text)).group(1)\n save(self, os.path.join(self.package_folder, \"licenses\", \"LICENSE\"), license_text)\n\n autotools = Autotools(self)\n autotools.install()\n rmdir(self, os.path.join(self.package_folder, \"share\"))\n\n def package_info(self):\n self.cpp_info.libdirs = []\n self.cpp_info.includedirs = []\n\n bin_path = os.path.join(self.package_folder, \"bin\")\n self.output.info(\"Appending PATH environment variable: {}\".format(bin_path))\n self.env_info.PATH.append(bin_path)\n", "path": "recipes/xorg-makedepend/all/conanfile.py"}]}
1,343
180
gh_patches_debug_17607
rasdani/github-patches
git_diff
DataDog__dd-trace-py-4246
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Cannot call sqlite3.backup(db) on a TracedSQLite object Thanks for taking the time for reporting an issue! Before reporting an issue on dd-trace-py, please be sure to provide all necessary information. If you're hitting a bug, make sure that you're using the latest version of this library. ### Which version of dd-trace-py are you using? 1.5.0 ### Which version of pip are you using? 21.1.1 _ddtrace requires pip>=18 to install one of our pre-built wheels_ ### Which version of the libraries are you using? You can copy/paste the output of `pip freeze` here. ``` ddtrace==1.5.0 ``` ### How can we reproduce your problem? ``` from ddtrace import config, patch_all import sqlite3 config.env = "test" # the environment the application is in config.service = "app" # name of your application config.version = "v1" # version of your application patch_all() src = sqlite3.connect("1.db") dst = sqlite3.connect("2.db") with dst: src.backup(dst, pages=1) dst.close() src.close() ``` ### What is the result that you get? The following TypeError ``` TypeError: backup() argument 1 must be sqlite3.Connection, not TracedSQLite ``` ### What is the result that you expected? The function should succeed without error. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ddtrace/contrib/sqlite3/patch.py` Content: ``` 1 import os 2 import sqlite3 3 import sqlite3.dbapi2 4 5 from ddtrace import config 6 from ddtrace.vendor import wrapt 7 8 from ...contrib.dbapi import FetchTracedCursor 9 from ...contrib.dbapi import TracedConnection 10 from ...contrib.dbapi import TracedCursor 11 from ...internal.utils.formats import asbool 12 from ...pin import Pin 13 14 15 # Original connect method 16 _connect = sqlite3.connect 17 18 config._add( 19 "sqlite", 20 dict( 21 _default_service="sqlite", 22 _dbapi_span_name_prefix="sqlite", 23 trace_fetch_methods=asbool(os.getenv("DD_SQLITE_TRACE_FETCH_METHODS", default=False)), 24 ), 25 ) 26 27 28 def patch(): 29 wrapped = wrapt.FunctionWrapper(_connect, traced_connect) 30 31 setattr(sqlite3, "connect", wrapped) 32 setattr(sqlite3.dbapi2, "connect", wrapped) 33 34 35 def unpatch(): 36 sqlite3.connect = _connect 37 sqlite3.dbapi2.connect = _connect 38 39 40 def traced_connect(func, _, args, kwargs): 41 conn = func(*args, **kwargs) 42 return patch_conn(conn) 43 44 45 def patch_conn(conn): 46 wrapped = TracedSQLite(conn) 47 Pin().onto(wrapped) 48 return wrapped 49 50 51 class TracedSQLiteCursor(TracedCursor): 52 def executemany(self, *args, **kwargs): 53 # DEV: SQLite3 Cursor.execute always returns back the cursor instance 54 super(TracedSQLiteCursor, self).executemany(*args, **kwargs) 55 return self 56 57 def execute(self, *args, **kwargs): 58 # DEV: SQLite3 Cursor.execute always returns back the cursor instance 59 super(TracedSQLiteCursor, self).execute(*args, **kwargs) 60 return self 61 62 63 class TracedSQLiteFetchCursor(TracedSQLiteCursor, FetchTracedCursor): 64 pass 65 66 67 class TracedSQLite(TracedConnection): 68 def __init__(self, conn, pin=None, cursor_cls=None): 69 if not cursor_cls: 70 # Do not trace `fetch*` methods by default 71 cursor_cls = TracedSQLiteFetchCursor if config.sqlite.trace_fetch_methods else TracedSQLiteCursor 72 73 super(TracedSQLite, self).__init__(conn, pin=pin, cfg=config.sqlite, cursor_cls=cursor_cls) 74 75 def execute(self, *args, **kwargs): 76 # sqlite has a few extra sugar functions 77 return self.cursor().execute(*args, **kwargs) 78 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ddtrace/contrib/sqlite3/patch.py b/ddtrace/contrib/sqlite3/patch.py --- a/ddtrace/contrib/sqlite3/patch.py +++ b/ddtrace/contrib/sqlite3/patch.py @@ -1,6 +1,7 @@ import os import sqlite3 import sqlite3.dbapi2 +import sys from ddtrace import config from ddtrace.vendor import wrapt @@ -75,3 +76,13 @@ def execute(self, *args, **kwargs): # sqlite has a few extra sugar functions return self.cursor().execute(*args, **kwargs) + + # backup was added in Python 3.7 + if sys.version_info >= (3, 7, 0): + + def backup(self, target, *args, **kwargs): + # sqlite3 checks the type of `target`, it cannot be a wrapped connection + # https://github.com/python/cpython/blob/4652093e1b816b78e9a585d671a807ce66427417/Modules/_sqlite/connection.c#L1897-L1899 + if isinstance(target, TracedConnection): + target = target.__wrapped__ + return self.__wrapped__.backup(target, *args, **kwargs)
{"golden_diff": "diff --git a/ddtrace/contrib/sqlite3/patch.py b/ddtrace/contrib/sqlite3/patch.py\n--- a/ddtrace/contrib/sqlite3/patch.py\n+++ b/ddtrace/contrib/sqlite3/patch.py\n@@ -1,6 +1,7 @@\n import os\n import sqlite3\n import sqlite3.dbapi2\n+import sys\n \n from ddtrace import config\n from ddtrace.vendor import wrapt\n@@ -75,3 +76,13 @@\n def execute(self, *args, **kwargs):\n # sqlite has a few extra sugar functions\n return self.cursor().execute(*args, **kwargs)\n+\n+ # backup was added in Python 3.7\n+ if sys.version_info >= (3, 7, 0):\n+\n+ def backup(self, target, *args, **kwargs):\n+ # sqlite3 checks the type of `target`, it cannot be a wrapped connection\n+ # https://github.com/python/cpython/blob/4652093e1b816b78e9a585d671a807ce66427417/Modules/_sqlite/connection.c#L1897-L1899\n+ if isinstance(target, TracedConnection):\n+ target = target.__wrapped__\n+ return self.__wrapped__.backup(target, *args, **kwargs)\n", "issue": "Cannot call sqlite3.backup(db) on a TracedSQLite object\nThanks for taking the time for reporting an issue!\r\n\r\nBefore reporting an issue on dd-trace-py, please be sure to provide all\r\nnecessary information.\r\n\r\nIf you're hitting a bug, make sure that you're using the latest version of this\r\nlibrary.\r\n\r\n### Which version of dd-trace-py are you using?\r\n1.5.0\r\n### Which version of pip are you using?\r\n21.1.1\r\n_ddtrace requires pip>=18 to install one of our pre-built wheels_\r\n\r\n### Which version of the libraries are you using?\r\n\r\nYou can copy/paste the output of `pip freeze` here.\r\n\r\n```\r\nddtrace==1.5.0\r\n```\r\n\r\n### How can we reproduce your problem?\r\n\r\n```\r\nfrom ddtrace import config, patch_all\r\nimport sqlite3\r\n\r\nconfig.env = \"test\" # the environment the application is in\r\nconfig.service = \"app\" # name of your application\r\nconfig.version = \"v1\" # version of your application\r\npatch_all()\r\n\r\nsrc = sqlite3.connect(\"1.db\")\r\ndst = sqlite3.connect(\"2.db\")\r\nwith dst:\r\n src.backup(dst, pages=1)\r\ndst.close()\r\nsrc.close()\r\n```\r\n\r\n### What is the result that you get?\r\n\r\nThe following TypeError\r\n```\r\nTypeError: backup() argument 1 must be sqlite3.Connection, not TracedSQLite\r\n```\r\n\r\n### What is the result that you expected?\r\n\r\nThe function should succeed without error.\r\n\n", "before_files": [{"content": "import os\nimport sqlite3\nimport sqlite3.dbapi2\n\nfrom ddtrace import config\nfrom ddtrace.vendor import wrapt\n\nfrom ...contrib.dbapi import FetchTracedCursor\nfrom ...contrib.dbapi import TracedConnection\nfrom ...contrib.dbapi import TracedCursor\nfrom ...internal.utils.formats import asbool\nfrom ...pin import Pin\n\n\n# Original connect method\n_connect = sqlite3.connect\n\nconfig._add(\n \"sqlite\",\n dict(\n _default_service=\"sqlite\",\n _dbapi_span_name_prefix=\"sqlite\",\n trace_fetch_methods=asbool(os.getenv(\"DD_SQLITE_TRACE_FETCH_METHODS\", default=False)),\n ),\n)\n\n\ndef patch():\n wrapped = wrapt.FunctionWrapper(_connect, traced_connect)\n\n setattr(sqlite3, \"connect\", wrapped)\n setattr(sqlite3.dbapi2, \"connect\", wrapped)\n\n\ndef unpatch():\n sqlite3.connect = _connect\n sqlite3.dbapi2.connect = _connect\n\n\ndef traced_connect(func, _, args, kwargs):\n conn = func(*args, **kwargs)\n return patch_conn(conn)\n\n\ndef patch_conn(conn):\n wrapped = TracedSQLite(conn)\n Pin().onto(wrapped)\n return wrapped\n\n\nclass TracedSQLiteCursor(TracedCursor):\n def executemany(self, *args, **kwargs):\n # DEV: SQLite3 Cursor.execute always returns back the cursor instance\n super(TracedSQLiteCursor, self).executemany(*args, **kwargs)\n return self\n\n def execute(self, *args, **kwargs):\n # DEV: SQLite3 Cursor.execute always returns back the cursor instance\n super(TracedSQLiteCursor, self).execute(*args, **kwargs)\n return self\n\n\nclass TracedSQLiteFetchCursor(TracedSQLiteCursor, FetchTracedCursor):\n pass\n\n\nclass TracedSQLite(TracedConnection):\n def __init__(self, conn, pin=None, cursor_cls=None):\n if not cursor_cls:\n # Do not trace `fetch*` methods by default\n cursor_cls = TracedSQLiteFetchCursor if config.sqlite.trace_fetch_methods else TracedSQLiteCursor\n\n super(TracedSQLite, self).__init__(conn, pin=pin, cfg=config.sqlite, cursor_cls=cursor_cls)\n\n def execute(self, *args, **kwargs):\n # sqlite has a few extra sugar functions\n return self.cursor().execute(*args, **kwargs)\n", "path": "ddtrace/contrib/sqlite3/patch.py"}], "after_files": [{"content": "import os\nimport sqlite3\nimport sqlite3.dbapi2\nimport sys\n\nfrom ddtrace import config\nfrom ddtrace.vendor import wrapt\n\nfrom ...contrib.dbapi import FetchTracedCursor\nfrom ...contrib.dbapi import TracedConnection\nfrom ...contrib.dbapi import TracedCursor\nfrom ...internal.utils.formats import asbool\nfrom ...pin import Pin\n\n\n# Original connect method\n_connect = sqlite3.connect\n\nconfig._add(\n \"sqlite\",\n dict(\n _default_service=\"sqlite\",\n _dbapi_span_name_prefix=\"sqlite\",\n trace_fetch_methods=asbool(os.getenv(\"DD_SQLITE_TRACE_FETCH_METHODS\", default=False)),\n ),\n)\n\n\ndef patch():\n wrapped = wrapt.FunctionWrapper(_connect, traced_connect)\n\n setattr(sqlite3, \"connect\", wrapped)\n setattr(sqlite3.dbapi2, \"connect\", wrapped)\n\n\ndef unpatch():\n sqlite3.connect = _connect\n sqlite3.dbapi2.connect = _connect\n\n\ndef traced_connect(func, _, args, kwargs):\n conn = func(*args, **kwargs)\n return patch_conn(conn)\n\n\ndef patch_conn(conn):\n wrapped = TracedSQLite(conn)\n Pin().onto(wrapped)\n return wrapped\n\n\nclass TracedSQLiteCursor(TracedCursor):\n def executemany(self, *args, **kwargs):\n # DEV: SQLite3 Cursor.execute always returns back the cursor instance\n super(TracedSQLiteCursor, self).executemany(*args, **kwargs)\n return self\n\n def execute(self, *args, **kwargs):\n # DEV: SQLite3 Cursor.execute always returns back the cursor instance\n super(TracedSQLiteCursor, self).execute(*args, **kwargs)\n return self\n\n\nclass TracedSQLiteFetchCursor(TracedSQLiteCursor, FetchTracedCursor):\n pass\n\n\nclass TracedSQLite(TracedConnection):\n def __init__(self, conn, pin=None, cursor_cls=None):\n if not cursor_cls:\n # Do not trace `fetch*` methods by default\n cursor_cls = TracedSQLiteFetchCursor if config.sqlite.trace_fetch_methods else TracedSQLiteCursor\n\n super(TracedSQLite, self).__init__(conn, pin=pin, cfg=config.sqlite, cursor_cls=cursor_cls)\n\n def execute(self, *args, **kwargs):\n # sqlite has a few extra sugar functions\n return self.cursor().execute(*args, **kwargs)\n\n # backup was added in Python 3.7\n if sys.version_info >= (3, 7, 0):\n\n def backup(self, target, *args, **kwargs):\n # sqlite3 checks the type of `target`, it cannot be a wrapped connection\n # https://github.com/python/cpython/blob/4652093e1b816b78e9a585d671a807ce66427417/Modules/_sqlite/connection.c#L1897-L1899\n if isinstance(target, TracedConnection):\n target = target.__wrapped__\n return self.__wrapped__.backup(target, *args, **kwargs)\n", "path": "ddtrace/contrib/sqlite3/patch.py"}]}
1,259
307
gh_patches_debug_21357
rasdani/github-patches
git_diff
nextcloud__appstore-282
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- API: registering an app id and uploading an app release needs to check revoked certificates In order to prevent old or lost certificates from being abused we need to check if the certificate has been revoked. This has to be done before validating the certificate on app release upload and before registering a new app id. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `nextcloudappstore/core/certificate/validator.py` Content: ``` 1 import logging 2 from base64 import b64decode 3 4 import pem 5 from OpenSSL.crypto import FILETYPE_PEM, load_certificate, verify, X509, \ 6 X509Store, X509StoreContext, load_crl 7 from django.conf import settings # type: ignore 8 from rest_framework.exceptions import APIException 9 10 logger = logging.getLogger(__name__) 11 12 13 class CertificateConfiguration: 14 def __init__(self) -> None: 15 self.digest = settings.CERTIFICATE_DIGEST 16 17 18 class InvalidSignatureException(APIException): 19 pass 20 21 22 class InvalidCertificateException(APIException): 23 pass 24 25 26 class CertificateAppIdMismatchException(APIException): 27 pass 28 29 30 class CertificateValidator: 31 """ 32 See https://pyopenssl.readthedocs.io/en/stable/api/crypto.html#signing 33 -and-verifying-signatures 34 """ 35 36 def __init__(self, config: CertificateConfiguration) -> None: 37 self.config = config 38 39 def validate_signature(self, certificate: str, signature: str, 40 data: bytes) -> None: 41 """ 42 Tests if a value is a valid certificate using SHA512 43 :param certificate: the certificate to use as string 44 :param signature: the signature base64 encoded string to test 45 :param data: the binary file content that was signed 46 :raises: InvalidSignatureException if the signature is invalid 47 :return: None 48 """ 49 cert = self._to_cert(certificate) 50 err_msg = 'Signature is invalid' 51 try: 52 result = verify(cert, b64decode(signature.encode()), data, 53 self.config.digest) 54 if result is not None: 55 raise InvalidSignatureException(err_msg) 56 except Exception as e: 57 raise InvalidSignatureException('%s: %s' % (err_msg, str(e))) 58 59 def validate_certificate(self, certificate: str, chain: str, 60 crl: str = None) -> None: 61 """ 62 Tests if a certificate has been signed by the chain, is not revoked 63 and has not yet been expired. 64 :param certificate: the certificate to test as string 65 :param chain: the certificate chain file content as string 66 :param crl: the certificate revocation list file content as string 67 :raises: InvalidCertificateException if the certificate is invalid 68 :return: None 69 """ 70 # root and intermediary certificate need to be split 71 cas = pem.parse(chain.encode()) 72 store = X509Store() 73 for ca in cas: 74 store.add_cert(self._to_cert(str(ca))) 75 76 cert = self._to_cert(certificate) 77 ctx = X509StoreContext(store, cert) 78 err_msg = 'Certificate is invalid' 79 80 if crl: 81 crl = load_crl(FILETYPE_PEM, crl) 82 store.add_crl(crl) 83 84 try: 85 result = ctx.verify_certificate() 86 if result is not None: 87 raise InvalidCertificateException(err_msg) 88 except Exception as e: 89 raise InvalidCertificateException('%s: %s' % (err_msg, str(e))) 90 91 def get_cn(self, certificate: str) -> str: 92 """ 93 Extracts the CN from a certificate and removes the leading 94 slash, e.g. /news should return news 95 :param certificate: certificate 96 :return: the certificate's subject without the leading slash 97 """ 98 cert = self._to_cert(certificate) 99 return cert.get_subject().CN 100 101 def validate_app_id(self, certificate: str, app_id: str) -> None: 102 """ 103 Validates if the CN matches the app id 104 :param certificate: app certificate 105 :param app_id: the app id 106 :raises CertificateAppIdMismatchException: if the app id and cert CN do 107 not match 108 :return: None 109 """ 110 cn = self.get_cn(certificate) 111 if cn != app_id: 112 msg = 'App id %s does not match cert CN %s' % (app_id, cn) 113 raise CertificateAppIdMismatchException(msg) 114 115 def _to_cert(self, certificate: str) -> X509: 116 return load_certificate(FILETYPE_PEM, certificate.encode()) 117 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/nextcloudappstore/core/certificate/validator.py b/nextcloudappstore/core/certificate/validator.py --- a/nextcloudappstore/core/certificate/validator.py +++ b/nextcloudappstore/core/certificate/validator.py @@ -3,7 +3,7 @@ import pem from OpenSSL.crypto import FILETYPE_PEM, load_certificate, verify, X509, \ - X509Store, X509StoreContext, load_crl + X509Store, X509StoreContext, load_crl, X509StoreFlags from django.conf import settings # type: ignore from rest_framework.exceptions import APIException @@ -74,12 +74,14 @@ store.add_cert(self._to_cert(str(ca))) cert = self._to_cert(certificate) - ctx = X509StoreContext(store, cert) - err_msg = 'Certificate is invalid' if crl: - crl = load_crl(FILETYPE_PEM, crl) - store.add_crl(crl) + parsed_crl = load_crl(FILETYPE_PEM, crl) + store.set_flags(X509StoreFlags.CRL_CHECK) + store.add_crl(parsed_crl) + + ctx = X509StoreContext(store, cert) + err_msg = 'Certificate is invalid' try: result = ctx.verify_certificate()
{"golden_diff": "diff --git a/nextcloudappstore/core/certificate/validator.py b/nextcloudappstore/core/certificate/validator.py\n--- a/nextcloudappstore/core/certificate/validator.py\n+++ b/nextcloudappstore/core/certificate/validator.py\n@@ -3,7 +3,7 @@\n \n import pem\n from OpenSSL.crypto import FILETYPE_PEM, load_certificate, verify, X509, \\\n- X509Store, X509StoreContext, load_crl\n+ X509Store, X509StoreContext, load_crl, X509StoreFlags\n from django.conf import settings # type: ignore\n from rest_framework.exceptions import APIException\n \n@@ -74,12 +74,14 @@\n store.add_cert(self._to_cert(str(ca)))\n \n cert = self._to_cert(certificate)\n- ctx = X509StoreContext(store, cert)\n- err_msg = 'Certificate is invalid'\n \n if crl:\n- crl = load_crl(FILETYPE_PEM, crl)\n- store.add_crl(crl)\n+ parsed_crl = load_crl(FILETYPE_PEM, crl)\n+ store.set_flags(X509StoreFlags.CRL_CHECK)\n+ store.add_crl(parsed_crl)\n+\n+ ctx = X509StoreContext(store, cert)\n+ err_msg = 'Certificate is invalid'\n \n try:\n result = ctx.verify_certificate()\n", "issue": "API: registering an app id and uploading an app release needs to check revoked certificates\nIn order to prevent old or lost certificates from being abused we need to check if the certificate has been revoked. This has to be done before validating the certificate on app release upload and before registering a new app id.\n\n", "before_files": [{"content": "import logging\nfrom base64 import b64decode\n\nimport pem\nfrom OpenSSL.crypto import FILETYPE_PEM, load_certificate, verify, X509, \\\n X509Store, X509StoreContext, load_crl\nfrom django.conf import settings # type: ignore\nfrom rest_framework.exceptions import APIException\n\nlogger = logging.getLogger(__name__)\n\n\nclass CertificateConfiguration:\n def __init__(self) -> None:\n self.digest = settings.CERTIFICATE_DIGEST\n\n\nclass InvalidSignatureException(APIException):\n pass\n\n\nclass InvalidCertificateException(APIException):\n pass\n\n\nclass CertificateAppIdMismatchException(APIException):\n pass\n\n\nclass CertificateValidator:\n \"\"\"\n See https://pyopenssl.readthedocs.io/en/stable/api/crypto.html#signing\n -and-verifying-signatures\n \"\"\"\n\n def __init__(self, config: CertificateConfiguration) -> None:\n self.config = config\n\n def validate_signature(self, certificate: str, signature: str,\n data: bytes) -> None:\n \"\"\"\n Tests if a value is a valid certificate using SHA512\n :param certificate: the certificate to use as string\n :param signature: the signature base64 encoded string to test\n :param data: the binary file content that was signed\n :raises: InvalidSignatureException if the signature is invalid\n :return: None\n \"\"\"\n cert = self._to_cert(certificate)\n err_msg = 'Signature is invalid'\n try:\n result = verify(cert, b64decode(signature.encode()), data,\n self.config.digest)\n if result is not None:\n raise InvalidSignatureException(err_msg)\n except Exception as e:\n raise InvalidSignatureException('%s: %s' % (err_msg, str(e)))\n\n def validate_certificate(self, certificate: str, chain: str,\n crl: str = None) -> None:\n \"\"\"\n Tests if a certificate has been signed by the chain, is not revoked\n and has not yet been expired.\n :param certificate: the certificate to test as string\n :param chain: the certificate chain file content as string\n :param crl: the certificate revocation list file content as string\n :raises: InvalidCertificateException if the certificate is invalid\n :return: None\n \"\"\"\n # root and intermediary certificate need to be split\n cas = pem.parse(chain.encode())\n store = X509Store()\n for ca in cas:\n store.add_cert(self._to_cert(str(ca)))\n\n cert = self._to_cert(certificate)\n ctx = X509StoreContext(store, cert)\n err_msg = 'Certificate is invalid'\n\n if crl:\n crl = load_crl(FILETYPE_PEM, crl)\n store.add_crl(crl)\n\n try:\n result = ctx.verify_certificate()\n if result is not None:\n raise InvalidCertificateException(err_msg)\n except Exception as e:\n raise InvalidCertificateException('%s: %s' % (err_msg, str(e)))\n\n def get_cn(self, certificate: str) -> str:\n \"\"\"\n Extracts the CN from a certificate and removes the leading\n slash, e.g. /news should return news\n :param certificate: certificate\n :return: the certificate's subject without the leading slash\n \"\"\"\n cert = self._to_cert(certificate)\n return cert.get_subject().CN\n\n def validate_app_id(self, certificate: str, app_id: str) -> None:\n \"\"\"\n Validates if the CN matches the app id\n :param certificate: app certificate\n :param app_id: the app id\n :raises CertificateAppIdMismatchException: if the app id and cert CN do\n not match\n :return: None\n \"\"\"\n cn = self.get_cn(certificate)\n if cn != app_id:\n msg = 'App id %s does not match cert CN %s' % (app_id, cn)\n raise CertificateAppIdMismatchException(msg)\n\n def _to_cert(self, certificate: str) -> X509:\n return load_certificate(FILETYPE_PEM, certificate.encode())\n", "path": "nextcloudappstore/core/certificate/validator.py"}], "after_files": [{"content": "import logging\nfrom base64 import b64decode\n\nimport pem\nfrom OpenSSL.crypto import FILETYPE_PEM, load_certificate, verify, X509, \\\n X509Store, X509StoreContext, load_crl, X509StoreFlags\nfrom django.conf import settings # type: ignore\nfrom rest_framework.exceptions import APIException\n\nlogger = logging.getLogger(__name__)\n\n\nclass CertificateConfiguration:\n def __init__(self) -> None:\n self.digest = settings.CERTIFICATE_DIGEST\n\n\nclass InvalidSignatureException(APIException):\n pass\n\n\nclass InvalidCertificateException(APIException):\n pass\n\n\nclass CertificateAppIdMismatchException(APIException):\n pass\n\n\nclass CertificateValidator:\n \"\"\"\n See https://pyopenssl.readthedocs.io/en/stable/api/crypto.html#signing\n -and-verifying-signatures\n \"\"\"\n\n def __init__(self, config: CertificateConfiguration) -> None:\n self.config = config\n\n def validate_signature(self, certificate: str, signature: str,\n data: bytes) -> None:\n \"\"\"\n Tests if a value is a valid certificate using SHA512\n :param certificate: the certificate to use as string\n :param signature: the signature base64 encoded string to test\n :param data: the binary file content that was signed\n :raises: InvalidSignatureException if the signature is invalid\n :return: None\n \"\"\"\n cert = self._to_cert(certificate)\n err_msg = 'Signature is invalid'\n try:\n result = verify(cert, b64decode(signature.encode()), data,\n self.config.digest)\n if result is not None:\n raise InvalidSignatureException(err_msg)\n except Exception as e:\n raise InvalidSignatureException('%s: %s' % (err_msg, str(e)))\n\n def validate_certificate(self, certificate: str, chain: str,\n crl: str = None) -> None:\n \"\"\"\n Tests if a certificate has been signed by the chain, is not revoked\n and has not yet been expired.\n :param certificate: the certificate to test as string\n :param chain: the certificate chain file content as string\n :param crl: the certificate revocation list file content as string\n :raises: InvalidCertificateException if the certificate is invalid\n :return: None\n \"\"\"\n # root and intermediary certificate need to be split\n cas = pem.parse(chain.encode())\n store = X509Store()\n for ca in cas:\n store.add_cert(self._to_cert(str(ca)))\n\n cert = self._to_cert(certificate)\n\n if crl:\n parsed_crl = load_crl(FILETYPE_PEM, crl)\n store.set_flags(X509StoreFlags.CRL_CHECK)\n store.add_crl(parsed_crl)\n\n ctx = X509StoreContext(store, cert)\n err_msg = 'Certificate is invalid'\n\n try:\n result = ctx.verify_certificate()\n if result is not None:\n raise InvalidCertificateException(err_msg)\n except Exception as e:\n raise InvalidCertificateException('%s: %s' % (err_msg, str(e)))\n\n def get_cn(self, certificate: str) -> str:\n \"\"\"\n Extracts the CN from a certificate and removes the leading\n slash, e.g. /news should return news\n :param certificate: certificate\n :return: the certificate's subject without the leading slash\n \"\"\"\n cert = self._to_cert(certificate)\n return cert.get_subject().CN\n\n def validate_app_id(self, certificate: str, app_id: str) -> None:\n \"\"\"\n Validates if the CN matches the app id\n :param certificate: app certificate\n :param app_id: the app id\n :raises CertificateAppIdMismatchException: if the app id and cert CN do\n not match\n :return: None\n \"\"\"\n cn = self.get_cn(certificate)\n if cn != app_id:\n msg = 'App id %s does not match cert CN %s' % (app_id, cn)\n raise CertificateAppIdMismatchException(msg)\n\n def _to_cert(self, certificate: str) -> X509:\n return load_certificate(FILETYPE_PEM, certificate.encode())\n", "path": "nextcloudappstore/core/certificate/validator.py"}]}
1,467
323
gh_patches_debug_5815
rasdani/github-patches
git_diff
pulp__pulpcore-4722
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- file:// sync deletes files from directory **Version** Pulpcore 3.39 **Describe the bug** When syncing file:// repositories, files are disappearing after the sync. **To Reproduce** 1) Copy these two repositories to the FS: - https://github.com/Katello/katello/tree/master/test/fixtures/test_repos/file1 - https://github.com/Katello/katello/tree/master/test/fixtures/test_repos/file2 2) Sync one, then the other 3) See that some files disappeared. - In my case, file2 lost every file except PULP_MANIFEST **Expected behavior** No files disappear. **Additional context** This also occurred with RPM content type files. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pulpcore/download/file.py` Content: ``` 1 import os 2 3 from urllib.parse import urlparse 4 5 import aiofiles 6 7 from .base import BaseDownloader, DownloadResult 8 9 10 class FileDownloader(BaseDownloader): 11 """ 12 A downloader for downloading files from the filesystem. 13 14 It provides digest and size validation along with computation of the digests needed to save the 15 file as an Artifact. It writes a new file to the disk and the return path is included in the 16 :class:`~pulpcore.plugin.download.DownloadResult`. 17 18 This downloader has all of the attributes of 19 :class:`~pulpcore.plugin.download.BaseDownloader` 20 """ 21 22 def __init__(self, url, *args, **kwargs): 23 """ 24 Download files from a url that starts with `file://` 25 26 Args: 27 url (str): The url to the file. This is expected to begin with `file://` 28 kwargs (dict): This accepts the parameters of 29 :class:`~pulpcore.plugin.download.BaseDownloader`. 30 31 Raises: 32 ValidationError: When the url starts with `file://`, but is not a subfolder of a path in 33 the ALLOWED_IMPORT_PATH setting. 34 """ 35 from pulpcore.app.serializers import RemoteSerializer 36 37 RemoteSerializer().validate_url(url) 38 p = urlparse(url) 39 self._path = os.path.abspath(os.path.join(p.netloc, p.path)) 40 super().__init__(url, *args, **kwargs) 41 42 async def _run(self, extra_data=None): 43 """ 44 Read, validate, and compute digests on the `url`. This is a coroutine. 45 46 This method provides the same return object type and documented in 47 :meth:`~pulpcore.plugin.download.BaseDownloader._run`. 48 49 Args: 50 extra_data (dict): Extra data passed to the downloader. 51 """ 52 async with aiofiles.open(self._path, "rb") as f_handle: 53 while True: 54 chunk = await f_handle.read(1048576) # 1 megabyte 55 if not chunk: 56 await self.finalize() 57 break # the reading is done 58 await self.handle_data(chunk) 59 return DownloadResult( 60 path=self._path, 61 artifact_attributes=self.artifact_attributes, 62 url=self.url, 63 headers=None, 64 ) 65 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pulpcore/download/file.py b/pulpcore/download/file.py --- a/pulpcore/download/file.py +++ b/pulpcore/download/file.py @@ -57,7 +57,7 @@ break # the reading is done await self.handle_data(chunk) return DownloadResult( - path=self._path, + path=self.path, artifact_attributes=self.artifact_attributes, url=self.url, headers=None,
{"golden_diff": "diff --git a/pulpcore/download/file.py b/pulpcore/download/file.py\n--- a/pulpcore/download/file.py\n+++ b/pulpcore/download/file.py\n@@ -57,7 +57,7 @@\n break # the reading is done\n await self.handle_data(chunk)\n return DownloadResult(\n- path=self._path,\n+ path=self.path,\n artifact_attributes=self.artifact_attributes,\n url=self.url,\n headers=None,\n", "issue": "file:// sync deletes files from directory\n**Version**\r\nPulpcore 3.39\r\n\r\n**Describe the bug**\r\nWhen syncing file:// repositories, files are disappearing after the sync.\r\n\r\n**To Reproduce**\r\n1) Copy these two repositories to the FS:\r\n - https://github.com/Katello/katello/tree/master/test/fixtures/test_repos/file1\r\n - https://github.com/Katello/katello/tree/master/test/fixtures/test_repos/file2\r\n2) Sync one, then the other\r\n3) See that some files disappeared.\r\n - In my case, file2 lost every file except PULP_MANIFEST\r\n\r\n\r\n**Expected behavior**\r\nNo files disappear.\r\n\r\n**Additional context**\r\nThis also occurred with RPM content type files.\r\n\n", "before_files": [{"content": "import os\n\nfrom urllib.parse import urlparse\n\nimport aiofiles\n\nfrom .base import BaseDownloader, DownloadResult\n\n\nclass FileDownloader(BaseDownloader):\n \"\"\"\n A downloader for downloading files from the filesystem.\n\n It provides digest and size validation along with computation of the digests needed to save the\n file as an Artifact. It writes a new file to the disk and the return path is included in the\n :class:`~pulpcore.plugin.download.DownloadResult`.\n\n This downloader has all of the attributes of\n :class:`~pulpcore.plugin.download.BaseDownloader`\n \"\"\"\n\n def __init__(self, url, *args, **kwargs):\n \"\"\"\n Download files from a url that starts with `file://`\n\n Args:\n url (str): The url to the file. This is expected to begin with `file://`\n kwargs (dict): This accepts the parameters of\n :class:`~pulpcore.plugin.download.BaseDownloader`.\n\n Raises:\n ValidationError: When the url starts with `file://`, but is not a subfolder of a path in\n the ALLOWED_IMPORT_PATH setting.\n \"\"\"\n from pulpcore.app.serializers import RemoteSerializer\n\n RemoteSerializer().validate_url(url)\n p = urlparse(url)\n self._path = os.path.abspath(os.path.join(p.netloc, p.path))\n super().__init__(url, *args, **kwargs)\n\n async def _run(self, extra_data=None):\n \"\"\"\n Read, validate, and compute digests on the `url`. This is a coroutine.\n\n This method provides the same return object type and documented in\n :meth:`~pulpcore.plugin.download.BaseDownloader._run`.\n\n Args:\n extra_data (dict): Extra data passed to the downloader.\n \"\"\"\n async with aiofiles.open(self._path, \"rb\") as f_handle:\n while True:\n chunk = await f_handle.read(1048576) # 1 megabyte\n if not chunk:\n await self.finalize()\n break # the reading is done\n await self.handle_data(chunk)\n return DownloadResult(\n path=self._path,\n artifact_attributes=self.artifact_attributes,\n url=self.url,\n headers=None,\n )\n", "path": "pulpcore/download/file.py"}], "after_files": [{"content": "import os\n\nfrom urllib.parse import urlparse\n\nimport aiofiles\n\nfrom .base import BaseDownloader, DownloadResult\n\n\nclass FileDownloader(BaseDownloader):\n \"\"\"\n A downloader for downloading files from the filesystem.\n\n It provides digest and size validation along with computation of the digests needed to save the\n file as an Artifact. It writes a new file to the disk and the return path is included in the\n :class:`~pulpcore.plugin.download.DownloadResult`.\n\n This downloader has all of the attributes of\n :class:`~pulpcore.plugin.download.BaseDownloader`\n \"\"\"\n\n def __init__(self, url, *args, **kwargs):\n \"\"\"\n Download files from a url that starts with `file://`\n\n Args:\n url (str): The url to the file. This is expected to begin with `file://`\n kwargs (dict): This accepts the parameters of\n :class:`~pulpcore.plugin.download.BaseDownloader`.\n\n Raises:\n ValidationError: When the url starts with `file://`, but is not a subfolder of a path in\n the ALLOWED_IMPORT_PATH setting.\n \"\"\"\n from pulpcore.app.serializers import RemoteSerializer\n\n RemoteSerializer().validate_url(url)\n p = urlparse(url)\n self._path = os.path.abspath(os.path.join(p.netloc, p.path))\n super().__init__(url, *args, **kwargs)\n\n async def _run(self, extra_data=None):\n \"\"\"\n Read, validate, and compute digests on the `url`. This is a coroutine.\n\n This method provides the same return object type and documented in\n :meth:`~pulpcore.plugin.download.BaseDownloader._run`.\n\n Args:\n extra_data (dict): Extra data passed to the downloader.\n \"\"\"\n async with aiofiles.open(self._path, \"rb\") as f_handle:\n while True:\n chunk = await f_handle.read(1048576) # 1 megabyte\n if not chunk:\n await self.finalize()\n break # the reading is done\n await self.handle_data(chunk)\n return DownloadResult(\n path=self.path,\n artifact_attributes=self.artifact_attributes,\n url=self.url,\n headers=None,\n )\n", "path": "pulpcore/download/file.py"}]}
1,016
99
gh_patches_debug_17299
rasdani/github-patches
git_diff
cal-itp__benefits-1056
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Enrollment Success page: Final touches Make the page look like the Figma mocks. 1. Desktop: Design the last line to this. It's a 10-column wide large div that is left-aligned. <img width="837" alt="image" src="https://user-images.githubusercontent.com/3673236/195391617-3f48afbb-9b1a-4500-b727-087a43e5865a.png"> <img width="1512" alt="image" src="https://user-images.githubusercontent.com/3673236/195273016-ca031b6e-673f-4bf1-943e-2fdd8f085255.png"> 2. Mobile: Design this last line. This mobile alignment is trickier. Might have to write CSS to add a margin-left and then calculate exactly how much the amount would be, based on what is the diameter of the icon plus the margin. <img width="209" alt="image" src="https://user-images.githubusercontent.com/3673236/195392741-aba35117-a63f-457e-a8ec-3687b5ef7f30.png"> <img width="1512" alt="image" src="https://user-images.githubusercontent.com/3673236/195273060-2df3fad3-045e-48ad-af27-8c4f9abb75b7.png"> 3. Enrollment Success post-log out: <img width="193" alt="image" src="https://user-images.githubusercontent.com/3673236/195393285-3eb7e077-b56d-4d11-9e2c-9d2a8858ad8d.png"> <img width="636" alt="image" src="https://user-images.githubusercontent.com/3673236/195393331-387c207b-c988-471a-9eca-ae31356ab360.png"> <img width="1512" alt="image" src="https://user-images.githubusercontent.com/3673236/195273236-40fe1650-f74c-4126-8559-d86bdb7811da.png"> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `benefits/core/views.py` Content: ``` 1 """ 2 The core application: view definition for the root of the webapp. 3 """ 4 from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound, HttpResponseServerError 5 from django.shortcuts import redirect 6 from django.template import loader 7 from django.template.response import TemplateResponse 8 from django.urls import reverse 9 from django.utils.translation import pgettext, gettext as _ 10 11 from . import models, session, viewmodels 12 from .middleware import pageview_decorator 13 14 ROUTE_INDEX = "core:index" 15 ROUTE_ELIGIBILITY = "eligibility:index" 16 ROUTE_HELP = "core:help" 17 ROUTE_LOGGED_OUT = "core:logged_out" 18 19 TEMPLATE_PAGE = "core/page.html" 20 TEMPLATE_AGENCY = "core/agency_index.html" 21 TEMPLATE_HELP = "core/help.html" 22 23 24 @pageview_decorator 25 def index(request): 26 """View handler for the main entry page.""" 27 session.reset(request) 28 29 agencies = models.TransitAgency.all_active() 30 31 if len(agencies) == 1: 32 agency = agencies[0] 33 return redirect(agency.index_url) 34 35 # generate a button to the landing page for each active agency 36 buttons = [viewmodels.Button.outline_primary(text=a.short_name, url=a.index_url) for a in agencies] 37 buttons[0].classes.append("mt-3") 38 buttons[0].label = _("core.pages.index.chooseprovider") 39 40 page = viewmodels.Page( 41 title=_("core.pages.index.title"), 42 headline=_("core.pages.index.headline"), 43 buttons=buttons, 44 classes="home", 45 ) 46 47 return TemplateResponse(request, TEMPLATE_PAGE, page.context_dict()) 48 49 50 @pageview_decorator 51 def agency_index(request, agency): 52 """View handler for an agency entry page.""" 53 session.reset(request) 54 session.update(request, agency=agency, origin=agency.index_url) 55 56 button = viewmodels.Button.primary(text=_("core.pages.index.continue"), url=reverse(ROUTE_ELIGIBILITY)) 57 58 page = viewmodels.Page( 59 title=_("core.pages.agency_index.title"), 60 headline=_("core.pages.agency_index.mst_cc.headline"), 61 button=button, 62 classes="home", 63 ) 64 65 return TemplateResponse(request, TEMPLATE_AGENCY, page.context_dict()) 66 67 68 @pageview_decorator 69 def agency_public_key(request, agency): 70 """View handler returns an agency's public key as plain text.""" 71 return HttpResponse(agency.public_key_data, content_type="text/plain") 72 73 74 @pageview_decorator 75 def help(request): 76 """View handler for the help page.""" 77 if session.active_agency(request): 78 agency = session.agency(request) 79 buttons = viewmodels.Button.agency_contact_links(agency) 80 else: 81 buttons = [btn for a in models.TransitAgency.all_active() for btn in viewmodels.Button.agency_contact_links(a)] 82 83 buttons.append(viewmodels.Button.home(request, _("core.buttons.back"))) 84 85 page = viewmodels.Page( 86 title=_("core.buttons.help"), 87 headline=_("core.buttons.help"), 88 buttons=buttons, 89 ) 90 91 return TemplateResponse(request, TEMPLATE_HELP, page.context_dict()) 92 93 94 @pageview_decorator 95 def bad_request(request, exception, template_name="400.html"): 96 """View handler for HTTP 400 Bad Request responses.""" 97 if session.active_agency(request): 98 session.update(request, origin=session.agency(request).index_url) 99 else: 100 session.update(request, origin=reverse(ROUTE_INDEX)) 101 102 home = viewmodels.Button.home(request) 103 page = viewmodels.ErrorPage.server_error(button=home) 104 t = loader.get_template(template_name) 105 106 return HttpResponseBadRequest(t.render(page.context_dict())) 107 108 109 @pageview_decorator 110 def csrf_failure(request, reason): 111 """ 112 View handler for CSRF_FAILURE_VIEW with custom data. 113 """ 114 if session.active_agency(request): 115 session.update(request, origin=session.agency(request).index_url) 116 else: 117 session.update(request, origin=reverse(ROUTE_INDEX)) 118 119 home = viewmodels.Button.home(request) 120 page = viewmodels.ErrorPage.not_found(button=home, path=request.path) 121 t = loader.get_template("400.html") 122 123 return HttpResponseNotFound(t.render(page.context_dict())) 124 125 126 @pageview_decorator 127 def page_not_found(request, exception, template_name="404.html"): 128 """View handler for HTTP 404 Not Found responses.""" 129 if session.active_agency(request): 130 session.update(request, origin=session.agency(request).index_url) 131 else: 132 session.update(request, origin=reverse(ROUTE_INDEX)) 133 134 home = viewmodels.Button.home(request) 135 # show a more user-friendly message instead of not_found 136 page = viewmodels.ErrorPage.user_error(button=home, path=request.path) 137 t = loader.get_template(template_name) 138 139 return HttpResponseNotFound(t.render(page.context_dict())) 140 141 142 @pageview_decorator 143 def server_error(request, template_name="500.html"): 144 """View handler for HTTP 500 Server Error responses.""" 145 if session.active_agency(request): 146 session.update(request, origin=session.agency(request).index_url) 147 else: 148 session.update(request, origin=reverse(ROUTE_INDEX)) 149 150 home = viewmodels.Button.home(request) 151 page = viewmodels.ErrorPage.server_error(button=home) 152 t = loader.get_template(template_name) 153 154 return HttpResponseServerError(t.render(page.context_dict())) 155 156 157 def logged_out(request): 158 """View handler for the final log out confirmation message.""" 159 page = viewmodels.Page( 160 title=_("core.pages.logged_out.title"), 161 headline=_("core.pages.logged_out.headline"), 162 icon=viewmodels.Icon("happybus", pgettext("image alt text", "core.icons.happybus")), 163 ) 164 165 return TemplateResponse(request, TEMPLATE_PAGE, page.context_dict()) 166 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/benefits/core/views.py b/benefits/core/views.py --- a/benefits/core/views.py +++ b/benefits/core/views.py @@ -19,6 +19,7 @@ TEMPLATE_PAGE = "core/page.html" TEMPLATE_AGENCY = "core/agency_index.html" TEMPLATE_HELP = "core/help.html" +TEMPLATE_LOGGED_OUT = "core/logged_out.html" @pageview_decorator @@ -158,8 +159,7 @@ """View handler for the final log out confirmation message.""" page = viewmodels.Page( title=_("core.pages.logged_out.title"), - headline=_("core.pages.logged_out.headline"), icon=viewmodels.Icon("happybus", pgettext("image alt text", "core.icons.happybus")), ) - return TemplateResponse(request, TEMPLATE_PAGE, page.context_dict()) + return TemplateResponse(request, TEMPLATE_LOGGED_OUT, page.context_dict())
{"golden_diff": "diff --git a/benefits/core/views.py b/benefits/core/views.py\n--- a/benefits/core/views.py\n+++ b/benefits/core/views.py\n@@ -19,6 +19,7 @@\n TEMPLATE_PAGE = \"core/page.html\"\n TEMPLATE_AGENCY = \"core/agency_index.html\"\n TEMPLATE_HELP = \"core/help.html\"\n+TEMPLATE_LOGGED_OUT = \"core/logged_out.html\"\n \n \n @pageview_decorator\n@@ -158,8 +159,7 @@\n \"\"\"View handler for the final log out confirmation message.\"\"\"\n page = viewmodels.Page(\n title=_(\"core.pages.logged_out.title\"),\n- headline=_(\"core.pages.logged_out.headline\"),\n icon=viewmodels.Icon(\"happybus\", pgettext(\"image alt text\", \"core.icons.happybus\")),\n )\n \n- return TemplateResponse(request, TEMPLATE_PAGE, page.context_dict())\n+ return TemplateResponse(request, TEMPLATE_LOGGED_OUT, page.context_dict())\n", "issue": "Enrollment Success page: Final touches\nMake the page look like the Figma mocks. \r\n\r\n1. Desktop: Design the last line to this. It's a 10-column wide large div that is left-aligned.\r\n\r\n<img width=\"837\" alt=\"image\" src=\"https://user-images.githubusercontent.com/3673236/195391617-3f48afbb-9b1a-4500-b727-087a43e5865a.png\">\r\n\r\n\r\n\r\n<img width=\"1512\" alt=\"image\" src=\"https://user-images.githubusercontent.com/3673236/195273016-ca031b6e-673f-4bf1-943e-2fdd8f085255.png\">\r\n\r\n2. Mobile: Design this last line. This mobile alignment is trickier. Might have to write CSS to add a margin-left and then calculate exactly how much the amount would be, based on what is the diameter of the icon plus the margin.\r\n\r\n<img width=\"209\" alt=\"image\" src=\"https://user-images.githubusercontent.com/3673236/195392741-aba35117-a63f-457e-a8ec-3687b5ef7f30.png\">\r\n<img width=\"1512\" alt=\"image\" src=\"https://user-images.githubusercontent.com/3673236/195273060-2df3fad3-045e-48ad-af27-8c4f9abb75b7.png\">\r\n\r\n\r\n3. Enrollment Success post-log out:\r\n<img width=\"193\" alt=\"image\" src=\"https://user-images.githubusercontent.com/3673236/195393285-3eb7e077-b56d-4d11-9e2c-9d2a8858ad8d.png\">\r\n<img width=\"636\" alt=\"image\" src=\"https://user-images.githubusercontent.com/3673236/195393331-387c207b-c988-471a-9eca-ae31356ab360.png\">\r\n\r\n\r\n<img width=\"1512\" alt=\"image\" src=\"https://user-images.githubusercontent.com/3673236/195273236-40fe1650-f74c-4126-8559-d86bdb7811da.png\">\r\n\n", "before_files": [{"content": "\"\"\"\nThe core application: view definition for the root of the webapp.\n\"\"\"\nfrom django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound, HttpResponseServerError\nfrom django.shortcuts import redirect\nfrom django.template import loader\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse\nfrom django.utils.translation import pgettext, gettext as _\n\nfrom . import models, session, viewmodels\nfrom .middleware import pageview_decorator\n\nROUTE_INDEX = \"core:index\"\nROUTE_ELIGIBILITY = \"eligibility:index\"\nROUTE_HELP = \"core:help\"\nROUTE_LOGGED_OUT = \"core:logged_out\"\n\nTEMPLATE_PAGE = \"core/page.html\"\nTEMPLATE_AGENCY = \"core/agency_index.html\"\nTEMPLATE_HELP = \"core/help.html\"\n\n\n@pageview_decorator\ndef index(request):\n \"\"\"View handler for the main entry page.\"\"\"\n session.reset(request)\n\n agencies = models.TransitAgency.all_active()\n\n if len(agencies) == 1:\n agency = agencies[0]\n return redirect(agency.index_url)\n\n # generate a button to the landing page for each active agency\n buttons = [viewmodels.Button.outline_primary(text=a.short_name, url=a.index_url) for a in agencies]\n buttons[0].classes.append(\"mt-3\")\n buttons[0].label = _(\"core.pages.index.chooseprovider\")\n\n page = viewmodels.Page(\n title=_(\"core.pages.index.title\"),\n headline=_(\"core.pages.index.headline\"),\n buttons=buttons,\n classes=\"home\",\n )\n\n return TemplateResponse(request, TEMPLATE_PAGE, page.context_dict())\n\n\n@pageview_decorator\ndef agency_index(request, agency):\n \"\"\"View handler for an agency entry page.\"\"\"\n session.reset(request)\n session.update(request, agency=agency, origin=agency.index_url)\n\n button = viewmodels.Button.primary(text=_(\"core.pages.index.continue\"), url=reverse(ROUTE_ELIGIBILITY))\n\n page = viewmodels.Page(\n title=_(\"core.pages.agency_index.title\"),\n headline=_(\"core.pages.agency_index.mst_cc.headline\"),\n button=button,\n classes=\"home\",\n )\n\n return TemplateResponse(request, TEMPLATE_AGENCY, page.context_dict())\n\n\n@pageview_decorator\ndef agency_public_key(request, agency):\n \"\"\"View handler returns an agency's public key as plain text.\"\"\"\n return HttpResponse(agency.public_key_data, content_type=\"text/plain\")\n\n\n@pageview_decorator\ndef help(request):\n \"\"\"View handler for the help page.\"\"\"\n if session.active_agency(request):\n agency = session.agency(request)\n buttons = viewmodels.Button.agency_contact_links(agency)\n else:\n buttons = [btn for a in models.TransitAgency.all_active() for btn in viewmodels.Button.agency_contact_links(a)]\n\n buttons.append(viewmodels.Button.home(request, _(\"core.buttons.back\")))\n\n page = viewmodels.Page(\n title=_(\"core.buttons.help\"),\n headline=_(\"core.buttons.help\"),\n buttons=buttons,\n )\n\n return TemplateResponse(request, TEMPLATE_HELP, page.context_dict())\n\n\n@pageview_decorator\ndef bad_request(request, exception, template_name=\"400.html\"):\n \"\"\"View handler for HTTP 400 Bad Request responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.server_error(button=home)\n t = loader.get_template(template_name)\n\n return HttpResponseBadRequest(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef csrf_failure(request, reason):\n \"\"\"\n View handler for CSRF_FAILURE_VIEW with custom data.\n \"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.not_found(button=home, path=request.path)\n t = loader.get_template(\"400.html\")\n\n return HttpResponseNotFound(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef page_not_found(request, exception, template_name=\"404.html\"):\n \"\"\"View handler for HTTP 404 Not Found responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n # show a more user-friendly message instead of not_found\n page = viewmodels.ErrorPage.user_error(button=home, path=request.path)\n t = loader.get_template(template_name)\n\n return HttpResponseNotFound(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef server_error(request, template_name=\"500.html\"):\n \"\"\"View handler for HTTP 500 Server Error responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.server_error(button=home)\n t = loader.get_template(template_name)\n\n return HttpResponseServerError(t.render(page.context_dict()))\n\n\ndef logged_out(request):\n \"\"\"View handler for the final log out confirmation message.\"\"\"\n page = viewmodels.Page(\n title=_(\"core.pages.logged_out.title\"),\n headline=_(\"core.pages.logged_out.headline\"),\n icon=viewmodels.Icon(\"happybus\", pgettext(\"image alt text\", \"core.icons.happybus\")),\n )\n\n return TemplateResponse(request, TEMPLATE_PAGE, page.context_dict())\n", "path": "benefits/core/views.py"}], "after_files": [{"content": "\"\"\"\nThe core application: view definition for the root of the webapp.\n\"\"\"\nfrom django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound, HttpResponseServerError\nfrom django.shortcuts import redirect\nfrom django.template import loader\nfrom django.template.response import TemplateResponse\nfrom django.urls import reverse\nfrom django.utils.translation import pgettext, gettext as _\n\nfrom . import models, session, viewmodels\nfrom .middleware import pageview_decorator\n\nROUTE_INDEX = \"core:index\"\nROUTE_ELIGIBILITY = \"eligibility:index\"\nROUTE_HELP = \"core:help\"\nROUTE_LOGGED_OUT = \"core:logged_out\"\n\nTEMPLATE_PAGE = \"core/page.html\"\nTEMPLATE_AGENCY = \"core/agency_index.html\"\nTEMPLATE_HELP = \"core/help.html\"\nTEMPLATE_LOGGED_OUT = \"core/logged_out.html\"\n\n\n@pageview_decorator\ndef index(request):\n \"\"\"View handler for the main entry page.\"\"\"\n session.reset(request)\n\n agencies = models.TransitAgency.all_active()\n\n if len(agencies) == 1:\n agency = agencies[0]\n return redirect(agency.index_url)\n\n # generate a button to the landing page for each active agency\n buttons = [viewmodels.Button.outline_primary(text=a.short_name, url=a.index_url) for a in agencies]\n buttons[0].classes.append(\"mt-3\")\n buttons[0].label = _(\"core.pages.index.chooseprovider\")\n\n page = viewmodels.Page(\n title=_(\"core.pages.index.title\"),\n headline=_(\"core.pages.index.headline\"),\n buttons=buttons,\n classes=\"home\",\n )\n\n return TemplateResponse(request, TEMPLATE_PAGE, page.context_dict())\n\n\n@pageview_decorator\ndef agency_index(request, agency):\n \"\"\"View handler for an agency entry page.\"\"\"\n session.reset(request)\n session.update(request, agency=agency, origin=agency.index_url)\n\n button = viewmodels.Button.primary(text=_(\"core.pages.index.continue\"), url=reverse(ROUTE_ELIGIBILITY))\n\n page = viewmodels.Page(\n title=_(\"core.pages.agency_index.title\"),\n headline=_(\"core.pages.agency_index.mst_cc.headline\"),\n button=button,\n classes=\"home\",\n )\n\n return TemplateResponse(request, TEMPLATE_AGENCY, page.context_dict())\n\n\n@pageview_decorator\ndef agency_public_key(request, agency):\n \"\"\"View handler returns an agency's public key as plain text.\"\"\"\n return HttpResponse(agency.public_key_data, content_type=\"text/plain\")\n\n\n@pageview_decorator\ndef help(request):\n \"\"\"View handler for the help page.\"\"\"\n if session.active_agency(request):\n agency = session.agency(request)\n buttons = viewmodels.Button.agency_contact_links(agency)\n else:\n buttons = [btn for a in models.TransitAgency.all_active() for btn in viewmodels.Button.agency_contact_links(a)]\n\n buttons.append(viewmodels.Button.home(request, _(\"core.buttons.back\")))\n\n page = viewmodels.Page(\n title=_(\"core.buttons.help\"),\n headline=_(\"core.buttons.help\"),\n buttons=buttons,\n )\n\n return TemplateResponse(request, TEMPLATE_HELP, page.context_dict())\n\n\n@pageview_decorator\ndef bad_request(request, exception, template_name=\"400.html\"):\n \"\"\"View handler for HTTP 400 Bad Request responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.server_error(button=home)\n t = loader.get_template(template_name)\n\n return HttpResponseBadRequest(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef csrf_failure(request, reason):\n \"\"\"\n View handler for CSRF_FAILURE_VIEW with custom data.\n \"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.not_found(button=home, path=request.path)\n t = loader.get_template(\"400.html\")\n\n return HttpResponseNotFound(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef page_not_found(request, exception, template_name=\"404.html\"):\n \"\"\"View handler for HTTP 404 Not Found responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n # show a more user-friendly message instead of not_found\n page = viewmodels.ErrorPage.user_error(button=home, path=request.path)\n t = loader.get_template(template_name)\n\n return HttpResponseNotFound(t.render(page.context_dict()))\n\n\n@pageview_decorator\ndef server_error(request, template_name=\"500.html\"):\n \"\"\"View handler for HTTP 500 Server Error responses.\"\"\"\n if session.active_agency(request):\n session.update(request, origin=session.agency(request).index_url)\n else:\n session.update(request, origin=reverse(ROUTE_INDEX))\n\n home = viewmodels.Button.home(request)\n page = viewmodels.ErrorPage.server_error(button=home)\n t = loader.get_template(template_name)\n\n return HttpResponseServerError(t.render(page.context_dict()))\n\n\ndef logged_out(request):\n \"\"\"View handler for the final log out confirmation message.\"\"\"\n page = viewmodels.Page(\n title=_(\"core.pages.logged_out.title\"),\n icon=viewmodels.Icon(\"happybus\", pgettext(\"image alt text\", \"core.icons.happybus\")),\n )\n\n return TemplateResponse(request, TEMPLATE_LOGGED_OUT, page.context_dict())\n", "path": "benefits/core/views.py"}]}
2,467
203
gh_patches_debug_2426
rasdani/github-patches
git_diff
kserve__kserve-864
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- explanations no longer working with 0.3.0 Am following the steps in with 0.3.0 of kfserving: https://github.com/kubeflow/kfserving/tree/master/docs/samples/explanation/alibi/income When I execute the curl for the explain I get a 500 error and the container logs show the below. I'm guessing the [update to master](https://github.com/kubeflow/kfserving/pull/803) means that the explainer models have also been updated and so they no longer work with 0.3.0 (the latest release version) ``` [E 200605 17:15:14 web:1792] Uncaught exception POST /v1/models/income:explain (127.0.0.1) HTTPServerRequest(protocol='http', host='income-explainer-default.default.svc.cluster.local', method='POST', uri='/v1/models/income:explain', version='HTTP/1.1', remote_ip='127.0.0.1') Traceback (most recent call last): File "/usr/local/lib/python3.7/site-packages/tornado/web.py", line 1701, in _execute result = method(*self.path_args, **self.path_kwargs) File "/kfserving/kfserving/handlers/http.py", line 61, in post response = model.explain(request) File "/alibiexplainer/alibiexplainer/explainer.py", line 74, in explain explanation = self.wrapper.explain(request["instances"]) File "/alibiexplainer/alibiexplainer/anchor_tabular.py", line 89, in explain anchor_exp = self.anchors_tabular.explain(arr[0], **self.kwargs) File "/usr/local/lib/python3.7/site-packages/alibi/explainers/anchor_tabular.py", line 803, in explain for sampler in self.samplers: AttributeError: 'AnchorTabular' object has no attribute 'samplers' [E 200605 17:15:14 web:2250] 500 POST /v1/models/income:explain (127.0.0.1) 58.80ms [I 200605 17:18:22 anchor_tabular:83] Arr shape ((1, 12),) [E 200605 17:18:22 web:1792] Uncaught exception POST /v1/models/income:explain (127.0.0.1) HTTPServerRequest(protocol='http', host='income-explainer-default.default.svc.cluster.local', method='POST', uri='/v1/models/income:explain', version='HTTP/1.1', remote_ip='127.0.0.1') Traceback (most recent call last): File "/usr/local/lib/python3.7/site-packages/tornado/web.py", line 1701, in _execute result = method(*self.path_args, **self.path_kwargs) File "/kfserving/kfserving/handlers/http.py", line 61, in post response = model.explain(request) File "/alibiexplainer/alibiexplainer/explainer.py", line 74, in explain explanation = self.wrapper.explain(request["instances"]) File "/alibiexplainer/alibiexplainer/anchor_tabular.py", line 89, in explain anchor_exp = self.anchors_tabular.explain(arr[0], **self.kwargs) File "/usr/local/lib/python3.7/site-packages/alibi/explainers/anchor_tabular.py", line 803, in explain for sampler in self.samplers: AttributeError: 'AnchorTabular' object has no attribute 'samplers' [E 200605 17:18:22 web:2250] 500 POST /v1/models/income:explain (127.0.0.1) 31.17ms ``` Presumably it would work on master. Does that sound right @cliveseldon ? If so maybe we should just close this. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `python/alibiexplainer/setup.py` Content: ``` 1 # Copyright 2019 kubeflow.org. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from setuptools import setup, find_packages 16 17 tests_require = [ 18 'pytest', 19 'pytest-tornasync', 20 'mypy' 21 ] 22 23 setup( 24 name='alibiexplainer', 25 version='0.3.0', 26 author_email='[email protected]', 27 license='../../LICENSE.txt', 28 url='https://github.com/kubeflow/kfserving/python/kfserving/alibiexplainer', 29 description='Model Explaination Server. \ 30 Not intended for use outside KFServing Frameworks Images', 31 long_description=open('README.md').read(), 32 python_requires='>=3.6', 33 packages=find_packages("alibiexplainer"), 34 install_requires=[ 35 "kfserving>=0.3.0", 36 "alibi>=0.3", 37 "scikit-learn>=0.20.3", 38 "argparse>=1.4.0", 39 "requests>=2.22.0", 40 "joblib>=0.13.2", 41 "pandas>=0.24.2", 42 "numpy>=1.16.3", 43 "dill>=0.3.0", 44 "spacy>=2.1.4" 45 ], 46 tests_require=tests_require, 47 extras_require={'test': tests_require} 48 ) 49 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/python/alibiexplainer/setup.py b/python/alibiexplainer/setup.py --- a/python/alibiexplainer/setup.py +++ b/python/alibiexplainer/setup.py @@ -33,7 +33,7 @@ packages=find_packages("alibiexplainer"), install_requires=[ "kfserving>=0.3.0", - "alibi>=0.3", + "alibi==0.3.2", "scikit-learn>=0.20.3", "argparse>=1.4.0", "requests>=2.22.0",
{"golden_diff": "diff --git a/python/alibiexplainer/setup.py b/python/alibiexplainer/setup.py\n--- a/python/alibiexplainer/setup.py\n+++ b/python/alibiexplainer/setup.py\n@@ -33,7 +33,7 @@\n packages=find_packages(\"alibiexplainer\"),\n install_requires=[\n \"kfserving>=0.3.0\",\n- \"alibi>=0.3\",\n+ \"alibi==0.3.2\",\n \"scikit-learn>=0.20.3\",\n \"argparse>=1.4.0\",\n \"requests>=2.22.0\",\n", "issue": "explanations no longer working with 0.3.0\nAm following the steps in with 0.3.0 of kfserving: https://github.com/kubeflow/kfserving/tree/master/docs/samples/explanation/alibi/income\r\n\r\nWhen I execute the curl for the explain I get a 500 error and the container logs show the below. I'm guessing the [update to master](https://github.com/kubeflow/kfserving/pull/803) means that the explainer models have also been updated and so they no longer work with 0.3.0 (the latest release version)\r\n\r\n```\r\n[E 200605 17:15:14 web:1792] Uncaught exception POST /v1/models/income:explain (127.0.0.1)\r\n HTTPServerRequest(protocol='http', host='income-explainer-default.default.svc.cluster.local', method='POST', uri='/v1/models/income:explain', version='HTTP/1.1', remote_ip='127.0.0.1')\r\n Traceback (most recent call last):\r\n File \"/usr/local/lib/python3.7/site-packages/tornado/web.py\", line 1701, in _execute\r\n result = method(*self.path_args, **self.path_kwargs)\r\n File \"/kfserving/kfserving/handlers/http.py\", line 61, in post\r\n response = model.explain(request)\r\n File \"/alibiexplainer/alibiexplainer/explainer.py\", line 74, in explain\r\n explanation = self.wrapper.explain(request[\"instances\"])\r\n File \"/alibiexplainer/alibiexplainer/anchor_tabular.py\", line 89, in explain\r\n anchor_exp = self.anchors_tabular.explain(arr[0], **self.kwargs)\r\n File \"/usr/local/lib/python3.7/site-packages/alibi/explainers/anchor_tabular.py\", line 803, in explain\r\n for sampler in self.samplers:\r\n AttributeError: 'AnchorTabular' object has no attribute 'samplers'\r\n[E 200605 17:15:14 web:2250] 500 POST /v1/models/income:explain (127.0.0.1) 58.80ms\r\n[I 200605 17:18:22 anchor_tabular:83] Arr shape ((1, 12),) \r\n[E 200605 17:18:22 web:1792] Uncaught exception POST /v1/models/income:explain (127.0.0.1)\r\n HTTPServerRequest(protocol='http', host='income-explainer-default.default.svc.cluster.local', method='POST', uri='/v1/models/income:explain', version='HTTP/1.1', remote_ip='127.0.0.1')\r\n Traceback (most recent call last):\r\n File \"/usr/local/lib/python3.7/site-packages/tornado/web.py\", line 1701, in _execute\r\n result = method(*self.path_args, **self.path_kwargs)\r\n File \"/kfserving/kfserving/handlers/http.py\", line 61, in post\r\n response = model.explain(request)\r\n File \"/alibiexplainer/alibiexplainer/explainer.py\", line 74, in explain\r\n explanation = self.wrapper.explain(request[\"instances\"])\r\n File \"/alibiexplainer/alibiexplainer/anchor_tabular.py\", line 89, in explain\r\n anchor_exp = self.anchors_tabular.explain(arr[0], **self.kwargs)\r\n File \"/usr/local/lib/python3.7/site-packages/alibi/explainers/anchor_tabular.py\", line 803, in explain\r\n for sampler in self.samplers:\r\n AttributeError: 'AnchorTabular' object has no attribute 'samplers'\r\n[E 200605 17:18:22 web:2250] 500 POST /v1/models/income:explain (127.0.0.1) 31.17ms\r\n\r\n```\r\n\r\nPresumably it would work on master. Does that sound right @cliveseldon ? If so maybe we should just close this.\n", "before_files": [{"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'pytest',\n 'pytest-tornasync',\n 'mypy'\n]\n\nsetup(\n name='alibiexplainer',\n version='0.3.0',\n author_email='[email protected]',\n license='../../LICENSE.txt',\n url='https://github.com/kubeflow/kfserving/python/kfserving/alibiexplainer',\n description='Model Explaination Server. \\\n Not intended for use outside KFServing Frameworks Images',\n long_description=open('README.md').read(),\n python_requires='>=3.6',\n packages=find_packages(\"alibiexplainer\"),\n install_requires=[\n \"kfserving>=0.3.0\",\n \"alibi>=0.3\",\n \"scikit-learn>=0.20.3\",\n \"argparse>=1.4.0\",\n \"requests>=2.22.0\",\n \"joblib>=0.13.2\",\n \"pandas>=0.24.2\",\n \"numpy>=1.16.3\",\n \"dill>=0.3.0\",\n \"spacy>=2.1.4\"\n ],\n tests_require=tests_require,\n extras_require={'test': tests_require}\n)\n", "path": "python/alibiexplainer/setup.py"}], "after_files": [{"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'pytest',\n 'pytest-tornasync',\n 'mypy'\n]\n\nsetup(\n name='alibiexplainer',\n version='0.3.0',\n author_email='[email protected]',\n license='../../LICENSE.txt',\n url='https://github.com/kubeflow/kfserving/python/kfserving/alibiexplainer',\n description='Model Explaination Server. \\\n Not intended for use outside KFServing Frameworks Images',\n long_description=open('README.md').read(),\n python_requires='>=3.6',\n packages=find_packages(\"alibiexplainer\"),\n install_requires=[\n \"kfserving>=0.3.0\",\n \"alibi==0.3.2\",\n \"scikit-learn>=0.20.3\",\n \"argparse>=1.4.0\",\n \"requests>=2.22.0\",\n \"joblib>=0.13.2\",\n \"pandas>=0.24.2\",\n \"numpy>=1.16.3\",\n \"dill>=0.3.0\",\n \"spacy>=2.1.4\"\n ],\n tests_require=tests_require,\n extras_require={'test': tests_require}\n)\n", "path": "python/alibiexplainer/setup.py"}]}
1,712
136
gh_patches_debug_34547
rasdani/github-patches
git_diff
keras-team__keras-nlp-340
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Update our glue evaluation script to use tensorflow datasets When it was originally written, glue for tfds was totally broken, so we used huggingface datasets. This appears to be fixed, so let's used tfds instead. We should update the line here as well to flip the dependency. https://github.com/keras-team/keras-nlp/blob/master/setup.py#L55 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 # Copyright 2021 The KerasNLP Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # https://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Setup script.""" 16 17 import pathlib 18 19 from setuptools import find_packages 20 from setuptools import setup 21 22 HERE = pathlib.Path(__file__).parent 23 README = (HERE / "README.md").read_text() 24 25 setup( 26 name="keras-nlp", 27 description=( 28 "Industry-strength Natural Language Processing extensions for Keras." 29 ), 30 long_description=README, 31 long_description_content_type="text/markdown", 32 version="0.3.0", 33 url="https://github.com/keras-team/keras-nlp", 34 author="Keras team", 35 author_email="[email protected]", 36 license="Apache License 2.0", 37 install_requires=[ 38 "absl-py", 39 "numpy", 40 "packaging", 41 "tensorflow", 42 "tensorflow-text", 43 ], 44 extras_require={ 45 "tests": [ 46 "black", 47 "flake8", 48 "isort", 49 "pytest", 50 "pytest-cov", 51 "rouge-score", 52 "sentencepiece", 53 ], 54 "examples": [ 55 "datasets", # For GLUE in BERT example. 56 "nltk", 57 "wikiextractor", 58 "keras-tuner", 59 ], 60 }, 61 classifiers=[ 62 "Programming Language :: Python", 63 "Programming Language :: Python :: 3.7", 64 "Operating System :: Unix", 65 "Operating System :: Microsoft :: Windows", 66 "Operating System :: MacOS", 67 "Intended Audience :: Science/Research", 68 "Topic :: Scientific/Engineering", 69 "Topic :: Software Development", 70 ], 71 packages=find_packages(exclude=("*_test.py",)), 72 ) 73 ``` Path: `examples/bert/bert_finetune_glue.py` Content: ``` 1 # Copyright 2022 The KerasNLP Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # https://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 """Run finetuning on a GLUE task.""" 15 16 import tempfile 17 18 import datasets 19 import keras_tuner 20 import tensorflow as tf 21 from absl import app 22 from absl import flags 23 from tensorflow import keras 24 25 import keras_nlp 26 from examples.bert.bert_config import FINETUNING_CONFIG 27 from examples.bert.bert_config import PREPROCESSING_CONFIG 28 29 FLAGS = flags.FLAGS 30 31 flags.DEFINE_string( 32 "vocab_file", 33 None, 34 "The vocabulary file for tokenization.", 35 ) 36 37 flags.DEFINE_string( 38 "saved_model_input", 39 None, 40 "The directory to load the pretrained model.", 41 ) 42 43 flags.DEFINE_string( 44 "saved_model_output", 45 None, 46 "The directory to save the finetuned model.", 47 ) 48 49 flags.DEFINE_string( 50 "task_name", 51 "mrpc", 52 "The name of the GLUE task to finetune on.", 53 ) 54 55 flags.DEFINE_bool( 56 "do_lower_case", 57 True, 58 "Whether to lower case the input text.", 59 ) 60 61 flags.DEFINE_bool( 62 "do_evaluation", 63 True, 64 "Whether to run evaluation on test data.", 65 ) 66 67 68 def load_data(task_name): 69 if task_name in ("cola", "sst2"): 70 feature_names = ("sentence",) 71 elif task_name in ("mrpc", "stsb", "rte", "wnli"): 72 feature_names = ("sentence1", "sentence2") 73 elif task_name in ("mnli", "mnli_matched", "mnli_mismatched"): 74 feature_names = ("premise", "hypothesis") 75 elif task_name in "qnli": 76 feature_names = ("question", "sentence") 77 elif task_name in "qqp": 78 feature_names = ("question1", "question2") 79 else: 80 raise ValueError(f"Unkown task_name {task_name}.") 81 82 test_suffix = "" 83 if task_name in ("mnli", "mnli_matched"): 84 # For "mnli", just run default to "mnli_matched". 85 task_name = "mnli" 86 test_suffix = "_matched" 87 elif task_name in ("mnli_mismatched",): 88 task_name = "mnli" 89 test_suffix = "_mismatched" 90 91 def to_tf_dataset(split): 92 # Format each sample as a tuple of string features and an int label. 93 features = tuple([split[f] for f in feature_names]) 94 label = tf.cast(split["label"], tf.int32) 95 return tf.data.Dataset.from_tensor_slices((features, label)) 96 97 data = datasets.load_dataset("glue", task_name) 98 data.set_format(type="tensorflow") 99 train_ds = to_tf_dataset(data["train"]) 100 test_ds = to_tf_dataset(data["test" + test_suffix]) 101 validation_ds = to_tf_dataset(data["validation" + test_suffix]) 102 return train_ds, test_ds, validation_ds 103 104 105 class BertHyperModel(keras_tuner.HyperModel): 106 """Creates a hypermodel to help with the search space for finetuning.""" 107 108 def build(self, hp): 109 model = keras.models.load_model(FLAGS.saved_model_input, compile=False) 110 finetuning_model = keras_nlp.models.BertClassifier( 111 base_model=model, 112 num_classes=3 if FLAGS.task_name in ("mnli", "ax") else 2, 113 ) 114 finetuning_model.compile( 115 optimizer=keras.optimizers.Adam( 116 learning_rate=hp.Choice( 117 "lr", FINETUNING_CONFIG["learning_rates"] 118 ), 119 ), 120 loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True), 121 metrics=[keras.metrics.SparseCategoricalAccuracy()], 122 ) 123 return finetuning_model 124 125 126 def main(_): 127 print(f"Reading input model from {FLAGS.saved_model_input}") 128 129 tokenizer = keras_nlp.tokenizers.WordPieceTokenizer( 130 vocabulary=FLAGS.vocab_file, 131 lowercase=FLAGS.do_lower_case, 132 ) 133 packer = keras_nlp.layers.MultiSegmentPacker( 134 sequence_length=PREPROCESSING_CONFIG["max_seq_length"], 135 start_value=tokenizer.token_to_id("[CLS]"), 136 end_value=tokenizer.token_to_id("[SEP]"), 137 ) 138 139 def preprocess_data(inputs, labels): 140 inputs = [tokenizer(x) for x in inputs] 141 token_ids, segment_ids = packer(inputs) 142 return { 143 "token_ids": token_ids, 144 "segment_ids": segment_ids, 145 "padding_mask": tf.cast(token_ids != 0, "int32"), 146 }, labels 147 148 # Read and preprocess GLUE task data. 149 train_ds, test_ds, validation_ds = load_data(FLAGS.task_name) 150 151 batch_size = FINETUNING_CONFIG["batch_size"] 152 train_ds = train_ds.batch(batch_size).map( 153 preprocess_data, num_parallel_calls=tf.data.AUTOTUNE 154 ) 155 validation_ds = validation_ds.batch(batch_size).map( 156 preprocess_data, num_parallel_calls=tf.data.AUTOTUNE 157 ) 158 test_ds = test_ds.batch(batch_size).map( 159 preprocess_data, num_parallel_calls=tf.data.AUTOTUNE 160 ) 161 162 # Create a hypermodel object for a RandomSearch. 163 hypermodel = BertHyperModel() 164 165 # Initialize the random search over the 4 learning rate parameters, for 4 166 # trials and 3 epochs for each trial. 167 tuner = keras_tuner.RandomSearch( 168 hypermodel=hypermodel, 169 objective=keras_tuner.Objective("val_loss", direction="min"), 170 max_trials=4, 171 overwrite=True, 172 project_name="hyperparameter_tuner_results", 173 directory=tempfile.mkdtemp(), 174 ) 175 176 tuner.search( 177 train_ds, 178 epochs=FINETUNING_CONFIG["epochs"], 179 validation_data=validation_ds, 180 ) 181 182 # Extract the best hyperparameters after the search. 183 best_hp = tuner.get_best_hyperparameters()[0] 184 finetuning_model = tuner.get_best_models()[0] 185 186 print( 187 f"The best hyperparameters found are:\nLearning Rate: {best_hp['lr']}" 188 ) 189 190 if FLAGS.do_evaluation: 191 print("Evaluating on test set.") 192 finetuning_model.evaluate(test_ds) 193 194 if FLAGS.saved_model_output: 195 print(f"Saving to {FLAGS.saved_model_output}") 196 finetuning_model.save(FLAGS.saved_model_output) 197 198 199 if __name__ == "__main__": 200 flags.mark_flag_as_required("vocab_file") 201 flags.mark_flag_as_required("saved_model_input") 202 app.run(main) 203 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/examples/bert/bert_finetune_glue.py b/examples/bert/bert_finetune_glue.py --- a/examples/bert/bert_finetune_glue.py +++ b/examples/bert/bert_finetune_glue.py @@ -15,9 +15,9 @@ import tempfile -import datasets import keras_tuner import tensorflow as tf +import tensorflow_datasets as tfds from absl import app from absl import flags from tensorflow import keras @@ -77,7 +77,7 @@ elif task_name in "qqp": feature_names = ("question1", "question2") else: - raise ValueError(f"Unkown task_name {task_name}.") + raise ValueError(f"Unknown task_name {task_name}.") test_suffix = "" if task_name in ("mnli", "mnli_matched"): @@ -88,17 +88,18 @@ task_name = "mnli" test_suffix = "_mismatched" - def to_tf_dataset(split): - # Format each sample as a tuple of string features and an int label. - features = tuple([split[f] for f in feature_names]) - label = tf.cast(split["label"], tf.int32) - return tf.data.Dataset.from_tensor_slices((features, label)) - - data = datasets.load_dataset("glue", task_name) - data.set_format(type="tensorflow") - train_ds = to_tf_dataset(data["train"]) - test_ds = to_tf_dataset(data["test" + test_suffix]) - validation_ds = to_tf_dataset(data["validation" + test_suffix]) + def split_features(x): + return {feature_name: x[feature_name] for feature_name in feature_names} + + train_ds, test_ds, validation_ds = tfds.load( + f"glue/{task_name}", + split=["train", "test" + test_suffix, "validation" + test_suffix], + ) + train_ds = train_ds.map(split_features, num_parallel_calls=tf.data.AUTOTUNE) + test_ds = test_ds.map(split_features, num_parallel_calls=tf.data.AUTOTUNE) + validation_ds = validation_ds.map( + split_features, num_parallel_calls=tf.data.AUTOTUNE + ) return train_ds, test_ds, validation_ds diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -52,7 +52,7 @@ "sentencepiece", ], "examples": [ - "datasets", # For GLUE in BERT example. + "tensorflow_datasets", # For GLUE in BERT example. "nltk", "wikiextractor", "keras-tuner",
{"golden_diff": "diff --git a/examples/bert/bert_finetune_glue.py b/examples/bert/bert_finetune_glue.py\n--- a/examples/bert/bert_finetune_glue.py\n+++ b/examples/bert/bert_finetune_glue.py\n@@ -15,9 +15,9 @@\n \n import tempfile\n \n-import datasets\n import keras_tuner\n import tensorflow as tf\n+import tensorflow_datasets as tfds\n from absl import app\n from absl import flags\n from tensorflow import keras\n@@ -77,7 +77,7 @@\n elif task_name in \"qqp\":\n feature_names = (\"question1\", \"question2\")\n else:\n- raise ValueError(f\"Unkown task_name {task_name}.\")\n+ raise ValueError(f\"Unknown task_name {task_name}.\")\n \n test_suffix = \"\"\n if task_name in (\"mnli\", \"mnli_matched\"):\n@@ -88,17 +88,18 @@\n task_name = \"mnli\"\n test_suffix = \"_mismatched\"\n \n- def to_tf_dataset(split):\n- # Format each sample as a tuple of string features and an int label.\n- features = tuple([split[f] for f in feature_names])\n- label = tf.cast(split[\"label\"], tf.int32)\n- return tf.data.Dataset.from_tensor_slices((features, label))\n-\n- data = datasets.load_dataset(\"glue\", task_name)\n- data.set_format(type=\"tensorflow\")\n- train_ds = to_tf_dataset(data[\"train\"])\n- test_ds = to_tf_dataset(data[\"test\" + test_suffix])\n- validation_ds = to_tf_dataset(data[\"validation\" + test_suffix])\n+ def split_features(x):\n+ return {feature_name: x[feature_name] for feature_name in feature_names}\n+\n+ train_ds, test_ds, validation_ds = tfds.load(\n+ f\"glue/{task_name}\",\n+ split=[\"train\", \"test\" + test_suffix, \"validation\" + test_suffix],\n+ )\n+ train_ds = train_ds.map(split_features, num_parallel_calls=tf.data.AUTOTUNE)\n+ test_ds = test_ds.map(split_features, num_parallel_calls=tf.data.AUTOTUNE)\n+ validation_ds = validation_ds.map(\n+ split_features, num_parallel_calls=tf.data.AUTOTUNE\n+ )\n return train_ds, test_ds, validation_ds\n \n \ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -52,7 +52,7 @@\n \"sentencepiece\",\n ],\n \"examples\": [\n- \"datasets\", # For GLUE in BERT example.\n+ \"tensorflow_datasets\", # For GLUE in BERT example.\n \"nltk\",\n \"wikiextractor\",\n \"keras-tuner\",\n", "issue": "Update our glue evaluation script to use tensorflow datasets\nWhen it was originally written, glue for tfds was totally broken, so we used huggingface datasets.\r\n\r\nThis appears to be fixed, so let's used tfds instead.\r\n\r\nWe should update the line here as well to flip the dependency.\r\nhttps://github.com/keras-team/keras-nlp/blob/master/setup.py#L55\n", "before_files": [{"content": "# Copyright 2021 The KerasNLP Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Setup script.\"\"\"\n\nimport pathlib\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nHERE = pathlib.Path(__file__).parent\nREADME = (HERE / \"README.md\").read_text()\n\nsetup(\n name=\"keras-nlp\",\n description=(\n \"Industry-strength Natural Language Processing extensions for Keras.\"\n ),\n long_description=README,\n long_description_content_type=\"text/markdown\",\n version=\"0.3.0\",\n url=\"https://github.com/keras-team/keras-nlp\",\n author=\"Keras team\",\n author_email=\"[email protected]\",\n license=\"Apache License 2.0\",\n install_requires=[\n \"absl-py\",\n \"numpy\",\n \"packaging\",\n \"tensorflow\",\n \"tensorflow-text\",\n ],\n extras_require={\n \"tests\": [\n \"black\",\n \"flake8\",\n \"isort\",\n \"pytest\",\n \"pytest-cov\",\n \"rouge-score\",\n \"sentencepiece\",\n ],\n \"examples\": [\n \"datasets\", # For GLUE in BERT example.\n \"nltk\",\n \"wikiextractor\",\n \"keras-tuner\",\n ],\n },\n classifiers=[\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.7\",\n \"Operating System :: Unix\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS\",\n \"Intended Audience :: Science/Research\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Software Development\",\n ],\n packages=find_packages(exclude=(\"*_test.py\",)),\n)\n", "path": "setup.py"}, {"content": "# Copyright 2022 The KerasNLP Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Run finetuning on a GLUE task.\"\"\"\n\nimport tempfile\n\nimport datasets\nimport keras_tuner\nimport tensorflow as tf\nfrom absl import app\nfrom absl import flags\nfrom tensorflow import keras\n\nimport keras_nlp\nfrom examples.bert.bert_config import FINETUNING_CONFIG\nfrom examples.bert.bert_config import PREPROCESSING_CONFIG\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\n \"vocab_file\",\n None,\n \"The vocabulary file for tokenization.\",\n)\n\nflags.DEFINE_string(\n \"saved_model_input\",\n None,\n \"The directory to load the pretrained model.\",\n)\n\nflags.DEFINE_string(\n \"saved_model_output\",\n None,\n \"The directory to save the finetuned model.\",\n)\n\nflags.DEFINE_string(\n \"task_name\",\n \"mrpc\",\n \"The name of the GLUE task to finetune on.\",\n)\n\nflags.DEFINE_bool(\n \"do_lower_case\",\n True,\n \"Whether to lower case the input text.\",\n)\n\nflags.DEFINE_bool(\n \"do_evaluation\",\n True,\n \"Whether to run evaluation on test data.\",\n)\n\n\ndef load_data(task_name):\n if task_name in (\"cola\", \"sst2\"):\n feature_names = (\"sentence\",)\n elif task_name in (\"mrpc\", \"stsb\", \"rte\", \"wnli\"):\n feature_names = (\"sentence1\", \"sentence2\")\n elif task_name in (\"mnli\", \"mnli_matched\", \"mnli_mismatched\"):\n feature_names = (\"premise\", \"hypothesis\")\n elif task_name in \"qnli\":\n feature_names = (\"question\", \"sentence\")\n elif task_name in \"qqp\":\n feature_names = (\"question1\", \"question2\")\n else:\n raise ValueError(f\"Unkown task_name {task_name}.\")\n\n test_suffix = \"\"\n if task_name in (\"mnli\", \"mnli_matched\"):\n # For \"mnli\", just run default to \"mnli_matched\".\n task_name = \"mnli\"\n test_suffix = \"_matched\"\n elif task_name in (\"mnli_mismatched\",):\n task_name = \"mnli\"\n test_suffix = \"_mismatched\"\n\n def to_tf_dataset(split):\n # Format each sample as a tuple of string features and an int label.\n features = tuple([split[f] for f in feature_names])\n label = tf.cast(split[\"label\"], tf.int32)\n return tf.data.Dataset.from_tensor_slices((features, label))\n\n data = datasets.load_dataset(\"glue\", task_name)\n data.set_format(type=\"tensorflow\")\n train_ds = to_tf_dataset(data[\"train\"])\n test_ds = to_tf_dataset(data[\"test\" + test_suffix])\n validation_ds = to_tf_dataset(data[\"validation\" + test_suffix])\n return train_ds, test_ds, validation_ds\n\n\nclass BertHyperModel(keras_tuner.HyperModel):\n \"\"\"Creates a hypermodel to help with the search space for finetuning.\"\"\"\n\n def build(self, hp):\n model = keras.models.load_model(FLAGS.saved_model_input, compile=False)\n finetuning_model = keras_nlp.models.BertClassifier(\n base_model=model,\n num_classes=3 if FLAGS.task_name in (\"mnli\", \"ax\") else 2,\n )\n finetuning_model.compile(\n optimizer=keras.optimizers.Adam(\n learning_rate=hp.Choice(\n \"lr\", FINETUNING_CONFIG[\"learning_rates\"]\n ),\n ),\n loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=[keras.metrics.SparseCategoricalAccuracy()],\n )\n return finetuning_model\n\n\ndef main(_):\n print(f\"Reading input model from {FLAGS.saved_model_input}\")\n\n tokenizer = keras_nlp.tokenizers.WordPieceTokenizer(\n vocabulary=FLAGS.vocab_file,\n lowercase=FLAGS.do_lower_case,\n )\n packer = keras_nlp.layers.MultiSegmentPacker(\n sequence_length=PREPROCESSING_CONFIG[\"max_seq_length\"],\n start_value=tokenizer.token_to_id(\"[CLS]\"),\n end_value=tokenizer.token_to_id(\"[SEP]\"),\n )\n\n def preprocess_data(inputs, labels):\n inputs = [tokenizer(x) for x in inputs]\n token_ids, segment_ids = packer(inputs)\n return {\n \"token_ids\": token_ids,\n \"segment_ids\": segment_ids,\n \"padding_mask\": tf.cast(token_ids != 0, \"int32\"),\n }, labels\n\n # Read and preprocess GLUE task data.\n train_ds, test_ds, validation_ds = load_data(FLAGS.task_name)\n\n batch_size = FINETUNING_CONFIG[\"batch_size\"]\n train_ds = train_ds.batch(batch_size).map(\n preprocess_data, num_parallel_calls=tf.data.AUTOTUNE\n )\n validation_ds = validation_ds.batch(batch_size).map(\n preprocess_data, num_parallel_calls=tf.data.AUTOTUNE\n )\n test_ds = test_ds.batch(batch_size).map(\n preprocess_data, num_parallel_calls=tf.data.AUTOTUNE\n )\n\n # Create a hypermodel object for a RandomSearch.\n hypermodel = BertHyperModel()\n\n # Initialize the random search over the 4 learning rate parameters, for 4\n # trials and 3 epochs for each trial.\n tuner = keras_tuner.RandomSearch(\n hypermodel=hypermodel,\n objective=keras_tuner.Objective(\"val_loss\", direction=\"min\"),\n max_trials=4,\n overwrite=True,\n project_name=\"hyperparameter_tuner_results\",\n directory=tempfile.mkdtemp(),\n )\n\n tuner.search(\n train_ds,\n epochs=FINETUNING_CONFIG[\"epochs\"],\n validation_data=validation_ds,\n )\n\n # Extract the best hyperparameters after the search.\n best_hp = tuner.get_best_hyperparameters()[0]\n finetuning_model = tuner.get_best_models()[0]\n\n print(\n f\"The best hyperparameters found are:\\nLearning Rate: {best_hp['lr']}\"\n )\n\n if FLAGS.do_evaluation:\n print(\"Evaluating on test set.\")\n finetuning_model.evaluate(test_ds)\n\n if FLAGS.saved_model_output:\n print(f\"Saving to {FLAGS.saved_model_output}\")\n finetuning_model.save(FLAGS.saved_model_output)\n\n\nif __name__ == \"__main__\":\n flags.mark_flag_as_required(\"vocab_file\")\n flags.mark_flag_as_required(\"saved_model_input\")\n app.run(main)\n", "path": "examples/bert/bert_finetune_glue.py"}], "after_files": [{"content": "# Copyright 2021 The KerasNLP Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Setup script.\"\"\"\n\nimport pathlib\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nHERE = pathlib.Path(__file__).parent\nREADME = (HERE / \"README.md\").read_text()\n\nsetup(\n name=\"keras-nlp\",\n description=(\n \"Industry-strength Natural Language Processing extensions for Keras.\"\n ),\n long_description=README,\n long_description_content_type=\"text/markdown\",\n version=\"0.3.0\",\n url=\"https://github.com/keras-team/keras-nlp\",\n author=\"Keras team\",\n author_email=\"[email protected]\",\n license=\"Apache License 2.0\",\n install_requires=[\n \"absl-py\",\n \"numpy\",\n \"packaging\",\n \"tensorflow\",\n \"tensorflow-text\",\n ],\n extras_require={\n \"tests\": [\n \"black\",\n \"flake8\",\n \"isort\",\n \"pytest\",\n \"pytest-cov\",\n \"rouge-score\",\n \"sentencepiece\",\n ],\n \"examples\": [\n \"tensorflow_datasets\", # For GLUE in BERT example.\n \"nltk\",\n \"wikiextractor\",\n \"keras-tuner\",\n ],\n },\n classifiers=[\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3.7\",\n \"Operating System :: Unix\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS\",\n \"Intended Audience :: Science/Research\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Software Development\",\n ],\n packages=find_packages(exclude=(\"*_test.py\",)),\n)\n", "path": "setup.py"}, {"content": "# Copyright 2022 The KerasNLP Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Run finetuning on a GLUE task.\"\"\"\n\nimport tempfile\n\nimport keras_tuner\nimport tensorflow as tf\nimport tensorflow_datasets as tfds\nfrom absl import app\nfrom absl import flags\nfrom tensorflow import keras\n\nimport keras_nlp\nfrom examples.bert.bert_config import FINETUNING_CONFIG\nfrom examples.bert.bert_config import PREPROCESSING_CONFIG\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\n \"vocab_file\",\n None,\n \"The vocabulary file for tokenization.\",\n)\n\nflags.DEFINE_string(\n \"saved_model_input\",\n None,\n \"The directory to load the pretrained model.\",\n)\n\nflags.DEFINE_string(\n \"saved_model_output\",\n None,\n \"The directory to save the finetuned model.\",\n)\n\nflags.DEFINE_string(\n \"task_name\",\n \"mrpc\",\n \"The name of the GLUE task to finetune on.\",\n)\n\nflags.DEFINE_bool(\n \"do_lower_case\",\n True,\n \"Whether to lower case the input text.\",\n)\n\nflags.DEFINE_bool(\n \"do_evaluation\",\n True,\n \"Whether to run evaluation on test data.\",\n)\n\n\ndef load_data(task_name):\n if task_name in (\"cola\", \"sst2\"):\n feature_names = (\"sentence\",)\n elif task_name in (\"mrpc\", \"stsb\", \"rte\", \"wnli\"):\n feature_names = (\"sentence1\", \"sentence2\")\n elif task_name in (\"mnli\", \"mnli_matched\", \"mnli_mismatched\"):\n feature_names = (\"premise\", \"hypothesis\")\n elif task_name in \"qnli\":\n feature_names = (\"question\", \"sentence\")\n elif task_name in \"qqp\":\n feature_names = (\"question1\", \"question2\")\n else:\n raise ValueError(f\"Unknown task_name {task_name}.\")\n\n test_suffix = \"\"\n if task_name in (\"mnli\", \"mnli_matched\"):\n # For \"mnli\", just run default to \"mnli_matched\".\n task_name = \"mnli\"\n test_suffix = \"_matched\"\n elif task_name in (\"mnli_mismatched\",):\n task_name = \"mnli\"\n test_suffix = \"_mismatched\"\n\n def split_features(x):\n return {feature_name: x[feature_name] for feature_name in feature_names}\n\n train_ds, test_ds, validation_ds = tfds.load(\n f\"glue/{task_name}\",\n split=[\"train\", \"test\" + test_suffix, \"validation\" + test_suffix],\n )\n train_ds = train_ds.map(split_features, num_parallel_calls=tf.data.AUTOTUNE)\n test_ds = test_ds.map(split_features, num_parallel_calls=tf.data.AUTOTUNE)\n validation_ds = validation_ds.map(\n split_features, num_parallel_calls=tf.data.AUTOTUNE\n )\n return train_ds, test_ds, validation_ds\n\n\nclass BertHyperModel(keras_tuner.HyperModel):\n \"\"\"Creates a hypermodel to help with the search space for finetuning.\"\"\"\n\n def build(self, hp):\n model = keras.models.load_model(FLAGS.saved_model_input, compile=False)\n finetuning_model = keras_nlp.models.BertClassifier(\n base_model=model,\n num_classes=3 if FLAGS.task_name in (\"mnli\", \"ax\") else 2,\n )\n finetuning_model.compile(\n optimizer=keras.optimizers.Adam(\n learning_rate=hp.Choice(\n \"lr\", FINETUNING_CONFIG[\"learning_rates\"]\n ),\n ),\n loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=[keras.metrics.SparseCategoricalAccuracy()],\n )\n return finetuning_model\n\n\ndef main(_):\n print(f\"Reading input model from {FLAGS.saved_model_input}\")\n\n tokenizer = keras_nlp.tokenizers.WordPieceTokenizer(\n vocabulary=FLAGS.vocab_file,\n lowercase=FLAGS.do_lower_case,\n )\n packer = keras_nlp.layers.MultiSegmentPacker(\n sequence_length=PREPROCESSING_CONFIG[\"max_seq_length\"],\n start_value=tokenizer.token_to_id(\"[CLS]\"),\n end_value=tokenizer.token_to_id(\"[SEP]\"),\n )\n\n def preprocess_data(inputs, labels):\n inputs = [tokenizer(x) for x in inputs]\n token_ids, segment_ids = packer(inputs)\n return {\n \"token_ids\": token_ids,\n \"segment_ids\": segment_ids,\n \"padding_mask\": tf.cast(token_ids != 0, \"int32\"),\n }, labels\n\n # Read and preprocess GLUE task data.\n train_ds, test_ds, validation_ds = load_data(FLAGS.task_name)\n\n batch_size = FINETUNING_CONFIG[\"batch_size\"]\n train_ds = train_ds.batch(batch_size).map(\n preprocess_data, num_parallel_calls=tf.data.AUTOTUNE\n )\n validation_ds = validation_ds.batch(batch_size).map(\n preprocess_data, num_parallel_calls=tf.data.AUTOTUNE\n )\n test_ds = test_ds.batch(batch_size).map(\n preprocess_data, num_parallel_calls=tf.data.AUTOTUNE\n )\n\n # Create a hypermodel object for a RandomSearch.\n hypermodel = BertHyperModel()\n\n # Initialize the random search over the 4 learning rate parameters, for 4\n # trials and 3 epochs for each trial.\n tuner = keras_tuner.RandomSearch(\n hypermodel=hypermodel,\n objective=keras_tuner.Objective(\"val_loss\", direction=\"min\"),\n max_trials=4,\n overwrite=True,\n project_name=\"hyperparameter_tuner_results\",\n directory=tempfile.mkdtemp(),\n )\n\n tuner.search(\n train_ds,\n epochs=FINETUNING_CONFIG[\"epochs\"],\n validation_data=validation_ds,\n )\n\n # Extract the best hyperparameters after the search.\n best_hp = tuner.get_best_hyperparameters()[0]\n finetuning_model = tuner.get_best_models()[0]\n\n print(\n f\"The best hyperparameters found are:\\nLearning Rate: {best_hp['lr']}\"\n )\n\n if FLAGS.do_evaluation:\n print(\"Evaluating on test set.\")\n finetuning_model.evaluate(test_ds)\n\n if FLAGS.saved_model_output:\n print(f\"Saving to {FLAGS.saved_model_output}\")\n finetuning_model.save(FLAGS.saved_model_output)\n\n\nif __name__ == \"__main__\":\n flags.mark_flag_as_required(\"vocab_file\")\n flags.mark_flag_as_required(\"saved_model_input\")\n app.run(main)\n", "path": "examples/bert/bert_finetune_glue.py"}]}
2,998
612
gh_patches_debug_23599
rasdani/github-patches
git_diff
svthalia__concrexit-1793
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Food order can be changed after paying ### Describe the bug If you order a pizza and pay it, you can still change the product. If you change the product through the api, the payment is not removed. ### How to reproduce Steps to reproduce the behaviour: 1. Order a pizza 2. Pay with Thalia Pay 3. Change the order through the api 4. Get an expensive pizza for little money ### Expected behaviour Either changing the order after paying is impossible, or it removes the payment. I think removing the payment (as the website currently seems to do) would be strange, and for event registration we've decided not to enable this. ### Screenshots <img width="569" alt="image" src="https://user-images.githubusercontent.com/41264528/123456318-01d59880-d5e3-11eb-86c8-9217e4720988.png"> There are probably no food events any time soon, so a hotfix may not be needed, though it might be good to double-check that similar stuff is not possible with registrations. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `website/pizzas/api/v2/views.py` Content: ``` 1 from oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope 2 from rest_framework.generics import ( 3 ListAPIView, 4 RetrieveAPIView, 5 get_object_or_404, 6 CreateAPIView, 7 DestroyAPIView, 8 UpdateAPIView, 9 ) 10 11 from rest_framework import filters as framework_filters, status 12 from rest_framework.permissions import DjangoModelPermissionsOrAnonReadOnly 13 from rest_framework.response import Response 14 15 from pizzas.api.v2 import filters 16 from pizzas.api.v2.serializers import ( 17 ProductSerializer, 18 FoodOrderSerializer, 19 FoodOrderUpdateSerializer, 20 FoodOrderCreateSerializer, 21 ) 22 from pizzas.api.v2.serializers.food_event import FoodEventSerializer 23 from pizzas.models import FoodEvent, Product, FoodOrder 24 from thaliawebsite.api.v2.permissions import IsAuthenticatedOrTokenHasScopeForMethod 25 26 27 class FoodEventListView(ListAPIView): 28 """Returns an overview of all food events.""" 29 30 serializer_class = FoodEventSerializer 31 queryset = FoodEvent.objects.all() 32 filter_backends = ( 33 framework_filters.OrderingFilter, 34 filters.FoodEventDateFilterBackend, 35 ) 36 ordering_fields = ("start", "end") 37 permission_classes = [ 38 IsAuthenticatedOrTokenHasScope, 39 DjangoModelPermissionsOrAnonReadOnly, 40 ] 41 required_scopes = ["food:read"] 42 43 44 class FoodEventDetailView(RetrieveAPIView): 45 """Returns one single food event.""" 46 47 serializer_class = FoodEventSerializer 48 queryset = FoodEvent.objects.all() 49 permission_classes = [ 50 IsAuthenticatedOrTokenHasScope, 51 DjangoModelPermissionsOrAnonReadOnly, 52 ] 53 required_scopes = ["food:read"] 54 55 56 class FoodEventProductsListView(ListAPIView): 57 """Returns an overview of all products.""" 58 59 serializer_class = ProductSerializer 60 queryset = Product.available_products.all() 61 filter_backends = (framework_filters.SearchFilter,) 62 search_fields = ("name",) 63 permission_classes = [ 64 IsAuthenticatedOrTokenHasScope, 65 DjangoModelPermissionsOrAnonReadOnly, 66 ] 67 required_scopes = ["food:read"] 68 69 70 class FoodEventOrderDetailView( 71 RetrieveAPIView, CreateAPIView, UpdateAPIView, DestroyAPIView 72 ): 73 """Returns details of a food order.""" 74 75 permission_classes = [ 76 IsAuthenticatedOrTokenHasScopeForMethod, 77 DjangoModelPermissionsOrAnonReadOnly, 78 ] 79 required_scopes_per_method = { 80 "GET": ["food:read"], 81 "POST": ["food:order"], 82 "PUT": ["food:order"], 83 "PATCH": ["food:order"], 84 "DELETE": ["food:order"], 85 } 86 87 def get_serializer_class(self): 88 if self.request.method.lower() == "get": 89 return FoodOrderSerializer 90 if self.request.method.lower() == "post": 91 return FoodOrderCreateSerializer 92 return FoodOrderUpdateSerializer 93 94 def get_queryset(self): 95 return FoodOrder.objects.filter(food_event=self.food_event) 96 97 def get_object(self): 98 queryset = self.filter_queryset(self.get_queryset()) 99 obj = get_object_or_404(queryset, member=self.request.member) 100 101 # May raise a permission denied 102 self.check_object_permissions(self.request, obj) 103 104 return obj 105 106 def dispatch(self, request, *args, **kwargs): 107 self.food_event = get_object_or_404(FoodEvent, pk=self.kwargs.get("pk")) 108 return super().dispatch(request, *args, **kwargs) 109 110 def update(self, request, *args, **kwargs): 111 super().update(request, *args, **kwargs) 112 instance = self.get_object() 113 return Response( 114 FoodOrderSerializer(instance, context=self.get_serializer_context()).data 115 ) 116 117 def create(self, request, *args, **kwargs): 118 serializer = self.get_serializer(data=request.data) 119 serializer.is_valid(raise_exception=True) 120 self.perform_create(serializer) 121 return Response( 122 FoodOrderSerializer( 123 serializer.instance, context=self.get_serializer_context() 124 ).data, 125 status=status.HTTP_201_CREATED, 126 ) 127 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/website/pizzas/api/v2/views.py b/website/pizzas/api/v2/views.py --- a/website/pizzas/api/v2/views.py +++ b/website/pizzas/api/v2/views.py @@ -12,6 +12,8 @@ from rest_framework.permissions import DjangoModelPermissionsOrAnonReadOnly from rest_framework.response import Response +from payments.exceptions import PaymentError +from payments.services import delete_payment from pizzas.api.v2 import filters from pizzas.api.v2.serializers import ( ProductSerializer, @@ -110,6 +112,18 @@ def update(self, request, *args, **kwargs): super().update(request, *args, **kwargs) instance = self.get_object() + + if instance.payment: + try: + delete_payment( + instance, member=request.member, ignore_change_window=True + ) + except PaymentError: + return Response( + "Your order could not be updated because it was already paid.", + status=status.HTTP_403_FORBIDDEN, + ) + return Response( FoodOrderSerializer(instance, context=self.get_serializer_context()).data )
{"golden_diff": "diff --git a/website/pizzas/api/v2/views.py b/website/pizzas/api/v2/views.py\n--- a/website/pizzas/api/v2/views.py\n+++ b/website/pizzas/api/v2/views.py\n@@ -12,6 +12,8 @@\n from rest_framework.permissions import DjangoModelPermissionsOrAnonReadOnly\n from rest_framework.response import Response\n \n+from payments.exceptions import PaymentError\n+from payments.services import delete_payment\n from pizzas.api.v2 import filters\n from pizzas.api.v2.serializers import (\n ProductSerializer,\n@@ -110,6 +112,18 @@\n def update(self, request, *args, **kwargs):\n super().update(request, *args, **kwargs)\n instance = self.get_object()\n+\n+ if instance.payment:\n+ try:\n+ delete_payment(\n+ instance, member=request.member, ignore_change_window=True\n+ )\n+ except PaymentError:\n+ return Response(\n+ \"Your order could not be updated because it was already paid.\",\n+ status=status.HTTP_403_FORBIDDEN,\n+ )\n+\n return Response(\n FoodOrderSerializer(instance, context=self.get_serializer_context()).data\n )\n", "issue": "Food order can be changed after paying\n### Describe the bug\r\nIf you order a pizza and pay it, you can still change the product. If you change the product through the api, the payment is not removed.\r\n\r\n### How to reproduce\r\nSteps to reproduce the behaviour:\r\n1. Order a pizza\r\n2. Pay with Thalia Pay\r\n3. Change the order through the api\r\n4. Get an expensive pizza for little money\r\n\r\n### Expected behaviour\r\nEither changing the order after paying is impossible, or it removes the payment. I think removing the payment (as the website currently seems to do) would be strange, and for event registration we've decided not to enable this.\r\n\r\n### Screenshots\r\n<img width=\"569\" alt=\"image\" src=\"https://user-images.githubusercontent.com/41264528/123456318-01d59880-d5e3-11eb-86c8-9217e4720988.png\">\r\n\r\nThere are probably no food events any time soon, so a hotfix may not be needed, though it might be good to double-check that similar stuff is not possible with registrations.\r\n\n", "before_files": [{"content": "from oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope\nfrom rest_framework.generics import (\n ListAPIView,\n RetrieveAPIView,\n get_object_or_404,\n CreateAPIView,\n DestroyAPIView,\n UpdateAPIView,\n)\n\nfrom rest_framework import filters as framework_filters, status\nfrom rest_framework.permissions import DjangoModelPermissionsOrAnonReadOnly\nfrom rest_framework.response import Response\n\nfrom pizzas.api.v2 import filters\nfrom pizzas.api.v2.serializers import (\n ProductSerializer,\n FoodOrderSerializer,\n FoodOrderUpdateSerializer,\n FoodOrderCreateSerializer,\n)\nfrom pizzas.api.v2.serializers.food_event import FoodEventSerializer\nfrom pizzas.models import FoodEvent, Product, FoodOrder\nfrom thaliawebsite.api.v2.permissions import IsAuthenticatedOrTokenHasScopeForMethod\n\n\nclass FoodEventListView(ListAPIView):\n \"\"\"Returns an overview of all food events.\"\"\"\n\n serializer_class = FoodEventSerializer\n queryset = FoodEvent.objects.all()\n filter_backends = (\n framework_filters.OrderingFilter,\n filters.FoodEventDateFilterBackend,\n )\n ordering_fields = (\"start\", \"end\")\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n DjangoModelPermissionsOrAnonReadOnly,\n ]\n required_scopes = [\"food:read\"]\n\n\nclass FoodEventDetailView(RetrieveAPIView):\n \"\"\"Returns one single food event.\"\"\"\n\n serializer_class = FoodEventSerializer\n queryset = FoodEvent.objects.all()\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n DjangoModelPermissionsOrAnonReadOnly,\n ]\n required_scopes = [\"food:read\"]\n\n\nclass FoodEventProductsListView(ListAPIView):\n \"\"\"Returns an overview of all products.\"\"\"\n\n serializer_class = ProductSerializer\n queryset = Product.available_products.all()\n filter_backends = (framework_filters.SearchFilter,)\n search_fields = (\"name\",)\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n DjangoModelPermissionsOrAnonReadOnly,\n ]\n required_scopes = [\"food:read\"]\n\n\nclass FoodEventOrderDetailView(\n RetrieveAPIView, CreateAPIView, UpdateAPIView, DestroyAPIView\n):\n \"\"\"Returns details of a food order.\"\"\"\n\n permission_classes = [\n IsAuthenticatedOrTokenHasScopeForMethod,\n DjangoModelPermissionsOrAnonReadOnly,\n ]\n required_scopes_per_method = {\n \"GET\": [\"food:read\"],\n \"POST\": [\"food:order\"],\n \"PUT\": [\"food:order\"],\n \"PATCH\": [\"food:order\"],\n \"DELETE\": [\"food:order\"],\n }\n\n def get_serializer_class(self):\n if self.request.method.lower() == \"get\":\n return FoodOrderSerializer\n if self.request.method.lower() == \"post\":\n return FoodOrderCreateSerializer\n return FoodOrderUpdateSerializer\n\n def get_queryset(self):\n return FoodOrder.objects.filter(food_event=self.food_event)\n\n def get_object(self):\n queryset = self.filter_queryset(self.get_queryset())\n obj = get_object_or_404(queryset, member=self.request.member)\n\n # May raise a permission denied\n self.check_object_permissions(self.request, obj)\n\n return obj\n\n def dispatch(self, request, *args, **kwargs):\n self.food_event = get_object_or_404(FoodEvent, pk=self.kwargs.get(\"pk\"))\n return super().dispatch(request, *args, **kwargs)\n\n def update(self, request, *args, **kwargs):\n super().update(request, *args, **kwargs)\n instance = self.get_object()\n return Response(\n FoodOrderSerializer(instance, context=self.get_serializer_context()).data\n )\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n return Response(\n FoodOrderSerializer(\n serializer.instance, context=self.get_serializer_context()\n ).data,\n status=status.HTTP_201_CREATED,\n )\n", "path": "website/pizzas/api/v2/views.py"}], "after_files": [{"content": "from oauth2_provider.contrib.rest_framework import IsAuthenticatedOrTokenHasScope\nfrom rest_framework.generics import (\n ListAPIView,\n RetrieveAPIView,\n get_object_or_404,\n CreateAPIView,\n DestroyAPIView,\n UpdateAPIView,\n)\n\nfrom rest_framework import filters as framework_filters, status\nfrom rest_framework.permissions import DjangoModelPermissionsOrAnonReadOnly\nfrom rest_framework.response import Response\n\nfrom payments.exceptions import PaymentError\nfrom payments.services import delete_payment\nfrom pizzas.api.v2 import filters\nfrom pizzas.api.v2.serializers import (\n ProductSerializer,\n FoodOrderSerializer,\n FoodOrderUpdateSerializer,\n FoodOrderCreateSerializer,\n)\nfrom pizzas.api.v2.serializers.food_event import FoodEventSerializer\nfrom pizzas.models import FoodEvent, Product, FoodOrder\nfrom thaliawebsite.api.v2.permissions import IsAuthenticatedOrTokenHasScopeForMethod\n\n\nclass FoodEventListView(ListAPIView):\n \"\"\"Returns an overview of all food events.\"\"\"\n\n serializer_class = FoodEventSerializer\n queryset = FoodEvent.objects.all()\n filter_backends = (\n framework_filters.OrderingFilter,\n filters.FoodEventDateFilterBackend,\n )\n ordering_fields = (\"start\", \"end\")\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n DjangoModelPermissionsOrAnonReadOnly,\n ]\n required_scopes = [\"food:read\"]\n\n\nclass FoodEventDetailView(RetrieveAPIView):\n \"\"\"Returns one single food event.\"\"\"\n\n serializer_class = FoodEventSerializer\n queryset = FoodEvent.objects.all()\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n DjangoModelPermissionsOrAnonReadOnly,\n ]\n required_scopes = [\"food:read\"]\n\n\nclass FoodEventProductsListView(ListAPIView):\n \"\"\"Returns an overview of all products.\"\"\"\n\n serializer_class = ProductSerializer\n queryset = Product.available_products.all()\n filter_backends = (framework_filters.SearchFilter,)\n search_fields = (\"name\",)\n permission_classes = [\n IsAuthenticatedOrTokenHasScope,\n DjangoModelPermissionsOrAnonReadOnly,\n ]\n required_scopes = [\"food:read\"]\n\n\nclass FoodEventOrderDetailView(\n RetrieveAPIView, CreateAPIView, UpdateAPIView, DestroyAPIView\n):\n \"\"\"Returns details of a food order.\"\"\"\n\n permission_classes = [\n IsAuthenticatedOrTokenHasScopeForMethod,\n DjangoModelPermissionsOrAnonReadOnly,\n ]\n required_scopes_per_method = {\n \"GET\": [\"food:read\"],\n \"POST\": [\"food:order\"],\n \"PUT\": [\"food:order\"],\n \"PATCH\": [\"food:order\"],\n \"DELETE\": [\"food:order\"],\n }\n\n def get_serializer_class(self):\n if self.request.method.lower() == \"get\":\n return FoodOrderSerializer\n if self.request.method.lower() == \"post\":\n return FoodOrderCreateSerializer\n return FoodOrderUpdateSerializer\n\n def get_queryset(self):\n return FoodOrder.objects.filter(food_event=self.food_event)\n\n def get_object(self):\n queryset = self.filter_queryset(self.get_queryset())\n obj = get_object_or_404(queryset, member=self.request.member)\n\n # May raise a permission denied\n self.check_object_permissions(self.request, obj)\n\n return obj\n\n def dispatch(self, request, *args, **kwargs):\n self.food_event = get_object_or_404(FoodEvent, pk=self.kwargs.get(\"pk\"))\n return super().dispatch(request, *args, **kwargs)\n\n def update(self, request, *args, **kwargs):\n super().update(request, *args, **kwargs)\n instance = self.get_object()\n\n if instance.payment:\n try:\n delete_payment(\n instance, member=request.member, ignore_change_window=True\n )\n except PaymentError:\n return Response(\n \"Your order could not be updated because it was already paid.\",\n status=status.HTTP_403_FORBIDDEN,\n )\n\n return Response(\n FoodOrderSerializer(instance, context=self.get_serializer_context()).data\n )\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n return Response(\n FoodOrderSerializer(\n serializer.instance, context=self.get_serializer_context()\n ).data,\n status=status.HTTP_201_CREATED,\n )\n", "path": "website/pizzas/api/v2/views.py"}]}
1,638
259
gh_patches_debug_496
rasdani/github-patches
git_diff
deepchecks__deepchecks-1494
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [BUG] Add copy button to code snippets We used to have this: ![image](https://user-images.githubusercontent.com/33841818/168883725-41ce6768-925f-4d62-8de7-b1e77502d685.png) For all code snippets (currently both in sphinx-gallery files. Doesn't appear anymore. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/source/user-guide/tabular/tutorials/plot_quick_data_integrity.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 """ 3 Data Integrity Suite on Avocado Sales Data - Quickstart 4 ******************************************************* 5 6 The deepchecks integrity suite is relevant any time you have data that you wish to validate: 7 whether it's on a fresh batch of data, or right before splitting it or using it for training. 8 Here we'll use the avocado prices dataset, to demonstrate how you can run 9 the suite with only a few simple lines of code, and see which kind of insights it can find. 10 11 .. code-block:: bash 12 13 # Before we start, if you don't have deepchecks installed yet, 14 # make sure to run: 15 pip install deepchecks -U --quiet #--user 16 """ 17 18 #%% 19 # Load and Prepare Data 20 # ==================================================== 21 22 from deepchecks.tabular import datasets 23 24 # load data 25 data = datasets.regression.avocado.load_data(data_format='DataFrame', as_train_test=False) 26 #%% 27 28 # drop unused columns (remove after fix...) 29 data = data.drop(columns=['Unnamed: 0']) 30 31 #%% 32 # Insert a few typcial problems to dataset for demonstration. 33 34 import pandas as pd 35 36 def add_dirty_data(df): 37 # change strings 38 df.loc[df[df['type'] == 'organic'].sample(frac=0.18).index,'type'] = 'Organic' 39 df.loc[df[df['type'] == 'organic'].sample(frac=0.01).index,'type'] = 'ORGANIC' 40 # add duplicates 41 df = pd.concat([df, df.sample(frac=0.156)], axis=0, ignore_index=True) 42 # add column with single value 43 df['Is Ripe'] = True 44 return df 45 46 47 dirty_df = add_dirty_data(data) 48 49 #%% 50 # Run Deepchecks for Data Integrity 51 # ==================================== 52 # 53 # Define a Dataset Object 54 # ------------------------ 55 # 56 # Create a deepchecks Dataset, including the relevant metadata (label, date, index, etc.). 57 # Check out :class:`deepchecks.tabular.Dataset` to see all of the columns that can be declared. 58 59 from deepchecks.tabular import Dataset 60 61 # We explicitly state the categorical features, 62 # otherwise they will be automatically inferred, which may not work perfectly and is not recommended. 63 # The label can be passed as a column name or a separate pd.Series / pd.DataFrame 64 ds = Dataset(dirty_df, cat_features = ['type'], datetime_name='Date', label = 'AveragePrice') 65 66 #%% 67 # Run the Deepchecks Suite 68 # -------------------------- 69 # 70 # Validate your data with the :class:`deepchecks.tabular.suites.single_dataset_integrity` suite. 71 # It runs on a single dataset, so you can run it on any batch of data (e.g. train data, test data, a new batch of data 72 # that recently arrived) 73 # 74 # Check out the :doc:`when should you use </getting-started/when_should_you_use>` 75 # deepchecks guide for some more info about the existing suites and when to use them. 76 77 from deepchecks.tabular.suites import data_integrity 78 79 # Run Suite: 80 integ_suite = data_integrity() 81 integ_suite.run(ds) 82 83 #%% 84 # We can inspect the suite outputs and see that there are a few problems we'd like to fix. 85 # We'll now fix them and check that they're resolved by re-running those specific checks. 86 87 88 #%% 89 # Run a Single Check 90 # ------------------- 91 # We can run a single check on a dataset, and see the results. 92 93 from deepchecks.tabular.checks import IsSingleValue, DataDuplicates 94 95 # first let's see how the check runs: 96 IsSingleValue().run(ds) 97 98 #%% 99 100 # we can also add a condition: 101 single_value_with_condition = IsSingleValue().add_condition_not_single_value() 102 result = single_value_with_condition.run(ds) 103 result 104 105 #%% 106 107 # We can also inspect and use the result's value: 108 result.value 109 110 #%% 111 # Now let's remove the single value column and rerun (notice that we're using directly 112 # the ``data`` attribute that stores the dataframe inside the Dataset) 113 114 ds.data.drop('Is Ripe', axis=1, inplace=True) 115 result = single_value_with_condition.run(ds) 116 result 117 118 #%% 119 120 # Alternatively we can fix the dataframe directly, and create a new dataset. 121 # Let's fix also the duplicate values: 122 dirty_df.drop_duplicates(inplace=True) 123 dirty_df.drop('Is Ripe', axis=1, inplace=True) 124 ds = Dataset(dirty_df, cat_features=['type'], datetime_name='Date', label='AveragePrice') 125 result = DataDuplicates().add_condition_ratio_not_greater_than(0).run(ds) 126 result 127 128 #%% 129 # Rerun Suite on the Fixed Dataset 130 # --------------------------------- 131 # Finally, we'll choose to keep the "organic" multiple spellings as they represent different sources. 132 # So we'll customaize the suite by removing the condition from it (or delete check completely). 133 # Alternatively - we can customize it by creating a new Suite with the desired checks and conditions. 134 # See :doc:`/user-guide/general/customizations/examples/customizing-suites` for more info. 135 136 # let's inspect the suite's structure 137 integ_suite 138 139 #%% 140 141 # and remove the condition: 142 integ_suite[3].clean_conditions() 143 144 #%% 145 # Now we can re-run the suite using: 146 integ_suite.run(ds) 147 148 #%% 149 # and all of the conditions will pass. 150 # 151 # *Note: the check we manipulated will still run as part of the Suite, however 152 # it won't appear in the Conditions Summary since it no longer has any 153 # conditions defined on it. You can still see its display results in the 154 # Additional Outputs section* 155 # 156 # For more info about working with conditions, see the detailed 157 # :doc:`/user-guide/general/customizations/examples/plot_configure_checks_conditions' guide. 158 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/source/user-guide/tabular/tutorials/plot_quick_data_integrity.py b/docs/source/user-guide/tabular/tutorials/plot_quick_data_integrity.py --- a/docs/source/user-guide/tabular/tutorials/plot_quick_data_integrity.py +++ b/docs/source/user-guide/tabular/tutorials/plot_quick_data_integrity.py @@ -143,7 +143,7 @@ #%% # Now we can re-run the suite using: -integ_suite.run(ds) +res = integ_suite.run(ds) #%% # and all of the conditions will pass.
{"golden_diff": "diff --git a/docs/source/user-guide/tabular/tutorials/plot_quick_data_integrity.py b/docs/source/user-guide/tabular/tutorials/plot_quick_data_integrity.py\n--- a/docs/source/user-guide/tabular/tutorials/plot_quick_data_integrity.py\n+++ b/docs/source/user-guide/tabular/tutorials/plot_quick_data_integrity.py\n@@ -143,7 +143,7 @@\n \n #%%\n # Now we can re-run the suite using:\n-integ_suite.run(ds)\n+res = integ_suite.run(ds)\n \n #%%\n # and all of the conditions will pass.\n", "issue": "[BUG] Add copy button to code snippets\nWe used to have this:\r\n![image](https://user-images.githubusercontent.com/33841818/168883725-41ce6768-925f-4d62-8de7-b1e77502d685.png)\r\n\r\nFor all code snippets (currently both in sphinx-gallery files. Doesn't appear anymore.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nData Integrity Suite on Avocado Sales Data - Quickstart\n*******************************************************\n\nThe deepchecks integrity suite is relevant any time you have data that you wish to validate:\nwhether it's on a fresh batch of data, or right before splitting it or using it for training. \nHere we'll use the avocado prices dataset, to demonstrate how you can run\nthe suite with only a few simple lines of code, and see which kind of insights it can find.\n\n.. code-block:: bash\n\n # Before we start, if you don't have deepchecks installed yet,\n # make sure to run:\n pip install deepchecks -U --quiet #--user\n\"\"\"\n\n#%%\n# Load and Prepare Data\n# ====================================================\n\nfrom deepchecks.tabular import datasets\n\n# load data\ndata = datasets.regression.avocado.load_data(data_format='DataFrame', as_train_test=False)\n#%%\n\n# drop unused columns (remove after fix...)\ndata = data.drop(columns=['Unnamed: 0'])\n\n#%%\n# Insert a few typcial problems to dataset for demonstration.\n\nimport pandas as pd\n\ndef add_dirty_data(df):\n # change strings\n df.loc[df[df['type'] == 'organic'].sample(frac=0.18).index,'type'] = 'Organic'\n df.loc[df[df['type'] == 'organic'].sample(frac=0.01).index,'type'] = 'ORGANIC'\n # add duplicates\n df = pd.concat([df, df.sample(frac=0.156)], axis=0, ignore_index=True)\n # add column with single value\n df['Is Ripe'] = True\n return df\n\n\ndirty_df = add_dirty_data(data)\n\n#%%\n# Run Deepchecks for Data Integrity\n# ====================================\n#\n# Define a Dataset Object\n# ------------------------\n#\n# Create a deepchecks Dataset, including the relevant metadata (label, date, index, etc.).\n# Check out :class:`deepchecks.tabular.Dataset` to see all of the columns that can be declared.\n\nfrom deepchecks.tabular import Dataset\n\n# We explicitly state the categorical features,\n# otherwise they will be automatically inferred, which may not work perfectly and is not recommended.\n# The label can be passed as a column name or a separate pd.Series / pd.DataFrame\nds = Dataset(dirty_df, cat_features = ['type'], datetime_name='Date', label = 'AveragePrice')\n\n#%%\n# Run the Deepchecks Suite\n# --------------------------\n#\n# Validate your data with the :class:`deepchecks.tabular.suites.single_dataset_integrity` suite.\n# It runs on a single dataset, so you can run it on any batch of data (e.g. train data, test data, a new batch of data\n# that recently arrived)\n#\n# Check out the :doc:`when should you use </getting-started/when_should_you_use>`\n# deepchecks guide for some more info about the existing suites and when to use them.\n\nfrom deepchecks.tabular.suites import data_integrity\n\n# Run Suite:\ninteg_suite = data_integrity()\ninteg_suite.run(ds)\n\n#%%\n# We can inspect the suite outputs and see that there are a few problems we'd like to fix.\n# We'll now fix them and check that they're resolved by re-running those specific checks.\n\n\n#%%\n# Run a Single Check\n# -------------------\n# We can run a single check on a dataset, and see the results.\n\nfrom deepchecks.tabular.checks import IsSingleValue, DataDuplicates\n\n# first let's see how the check runs:\nIsSingleValue().run(ds)\n\n#%%\n\n# we can also add a condition:\nsingle_value_with_condition = IsSingleValue().add_condition_not_single_value()\nresult = single_value_with_condition.run(ds)\nresult\n\n#%%\n\n# We can also inspect and use the result's value:\nresult.value\n\n#%%\n# Now let's remove the single value column and rerun (notice that we're using directly \n# the ``data`` attribute that stores the dataframe inside the Dataset)\n\nds.data.drop('Is Ripe', axis=1, inplace=True)\nresult = single_value_with_condition.run(ds)\nresult\n\n#%%\n\n# Alternatively we can fix the dataframe directly, and create a new dataset.\n# Let's fix also the duplicate values:\ndirty_df.drop_duplicates(inplace=True)\ndirty_df.drop('Is Ripe', axis=1, inplace=True)\nds = Dataset(dirty_df, cat_features=['type'], datetime_name='Date', label='AveragePrice')\nresult = DataDuplicates().add_condition_ratio_not_greater_than(0).run(ds)\nresult\n\n#%%\n# Rerun Suite on the Fixed Dataset\n# ---------------------------------\n# Finally, we'll choose to keep the \"organic\" multiple spellings as they represent different sources.\n# So we'll customaize the suite by removing the condition from it (or delete check completely).\n# Alternatively - we can customize it by creating a new Suite with the desired checks and conditions.\n# See :doc:`/user-guide/general/customizations/examples/customizing-suites` for more info.\n\n# let's inspect the suite's structure\ninteg_suite\n\n#%%\n\n# and remove the condition:\ninteg_suite[3].clean_conditions()\n\n#%%\n# Now we can re-run the suite using:\ninteg_suite.run(ds)\n\n#%%\n# and all of the conditions will pass.\n#\n# *Note: the check we manipulated will still run as part of the Suite, however\n# it won't appear in the Conditions Summary since it no longer has any\n# conditions defined on it. You can still see its display results in the \n# Additional Outputs section*\n#\n# For more info about working with conditions, see the detailed\n# :doc:`/user-guide/general/customizations/examples/plot_configure_checks_conditions' guide.\n", "path": "docs/source/user-guide/tabular/tutorials/plot_quick_data_integrity.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nData Integrity Suite on Avocado Sales Data - Quickstart\n*******************************************************\n\nThe deepchecks integrity suite is relevant any time you have data that you wish to validate:\nwhether it's on a fresh batch of data, or right before splitting it or using it for training. \nHere we'll use the avocado prices dataset, to demonstrate how you can run\nthe suite with only a few simple lines of code, and see which kind of insights it can find.\n\n.. code-block:: bash\n\n # Before we start, if you don't have deepchecks installed yet,\n # make sure to run:\n pip install deepchecks -U --quiet #--user\n\"\"\"\n\n#%%\n# Load and Prepare Data\n# ====================================================\n\nfrom deepchecks.tabular import datasets\n\n# load data\ndata = datasets.regression.avocado.load_data(data_format='DataFrame', as_train_test=False)\n#%%\n\n# drop unused columns (remove after fix...)\ndata = data.drop(columns=['Unnamed: 0'])\n\n#%%\n# Insert a few typcial problems to dataset for demonstration.\n\nimport pandas as pd\n\ndef add_dirty_data(df):\n # change strings\n df.loc[df[df['type'] == 'organic'].sample(frac=0.18).index,'type'] = 'Organic'\n df.loc[df[df['type'] == 'organic'].sample(frac=0.01).index,'type'] = 'ORGANIC'\n # add duplicates\n df = pd.concat([df, df.sample(frac=0.156)], axis=0, ignore_index=True)\n # add column with single value\n df['Is Ripe'] = True\n return df\n\n\ndirty_df = add_dirty_data(data)\n\n#%%\n# Run Deepchecks for Data Integrity\n# ====================================\n#\n# Define a Dataset Object\n# ------------------------\n#\n# Create a deepchecks Dataset, including the relevant metadata (label, date, index, etc.).\n# Check out :class:`deepchecks.tabular.Dataset` to see all of the columns that can be declared.\n\nfrom deepchecks.tabular import Dataset\n\n# We explicitly state the categorical features,\n# otherwise they will be automatically inferred, which may not work perfectly and is not recommended.\n# The label can be passed as a column name or a separate pd.Series / pd.DataFrame\nds = Dataset(dirty_df, cat_features = ['type'], datetime_name='Date', label = 'AveragePrice')\n\n#%%\n# Run the Deepchecks Suite\n# --------------------------\n#\n# Validate your data with the :class:`deepchecks.tabular.suites.single_dataset_integrity` suite.\n# It runs on a single dataset, so you can run it on any batch of data (e.g. train data, test data, a new batch of data\n# that recently arrived)\n#\n# Check out the :doc:`when should you use </getting-started/when_should_you_use>`\n# deepchecks guide for some more info about the existing suites and when to use them.\n\nfrom deepchecks.tabular.suites import data_integrity\n\n# Run Suite:\ninteg_suite = data_integrity()\ninteg_suite.run(ds)\n\n#%%\n# We can inspect the suite outputs and see that there are a few problems we'd like to fix.\n# We'll now fix them and check that they're resolved by re-running those specific checks.\n\n\n#%%\n# Run a Single Check\n# -------------------\n# We can run a single check on a dataset, and see the results.\n\nfrom deepchecks.tabular.checks import IsSingleValue, DataDuplicates\n\n# first let's see how the check runs:\nIsSingleValue().run(ds)\n\n#%%\n\n# we can also add a condition:\nsingle_value_with_condition = IsSingleValue().add_condition_not_single_value()\nresult = single_value_with_condition.run(ds)\nresult\n\n#%%\n\n# We can also inspect and use the result's value:\nresult.value\n\n#%%\n# Now let's remove the single value column and rerun (notice that we're using directly \n# the ``data`` attribute that stores the dataframe inside the Dataset)\n\nds.data.drop('Is Ripe', axis=1, inplace=True)\nresult = single_value_with_condition.run(ds)\nresult\n\n#%%\n\n# Alternatively we can fix the dataframe directly, and create a new dataset.\n# Let's fix also the duplicate values:\ndirty_df.drop_duplicates(inplace=True)\ndirty_df.drop('Is Ripe', axis=1, inplace=True)\nds = Dataset(dirty_df, cat_features=['type'], datetime_name='Date', label='AveragePrice')\nresult = DataDuplicates().add_condition_ratio_not_greater_than(0).run(ds)\nresult\n\n#%%\n# Rerun Suite on the Fixed Dataset\n# ---------------------------------\n# Finally, we'll choose to keep the \"organic\" multiple spellings as they represent different sources.\n# So we'll customaize the suite by removing the condition from it (or delete check completely).\n# Alternatively - we can customize it by creating a new Suite with the desired checks and conditions.\n# See :doc:`/user-guide/general/customizations/examples/customizing-suites` for more info.\n\n# let's inspect the suite's structure\ninteg_suite\n\n#%%\n\n# and remove the condition:\ninteg_suite[3].clean_conditions()\n\n#%%\n# Now we can re-run the suite using:\nres = integ_suite.run(ds)\n\n#%%\n# and all of the conditions will pass.\n#\n# *Note: the check we manipulated will still run as part of the Suite, however\n# it won't appear in the Conditions Summary since it no longer has any\n# conditions defined on it. You can still see its display results in the \n# Additional Outputs section*\n#\n# For more info about working with conditions, see the detailed\n# :doc:`/user-guide/general/customizations/examples/plot_configure_checks_conditions' guide.\n", "path": "docs/source/user-guide/tabular/tutorials/plot_quick_data_integrity.py"}]}
1,968
125
gh_patches_debug_6893
rasdani/github-patches
git_diff
pypa__setuptools-830
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Discrepancy between setuptools and distutils in sdist without MANIFEST.in Originally reported by: **ikelos (Bitbucket: [ikelos](http://bitbucket.org/ikelos), GitHub: [ikelos](http://github.com/ikelos))** --- [This upstream bug](http://bugs.python.org/issue2279) (fixed in to 2010) resolved the need to create a MANIFEST.in file as well as list data_files in the setup.py. This does not appear to have been fixed in setuptools, and as such sdist and similar functions fail to include the appropriate files on machines that have setuptools, but not those that don't, when a fallback import system is used... Would it be possible to align the two again so that setuptools is again a drop-in replacement for distutils? --- - Bitbucket: https://bitbucket.org/pypa/setuptools/issue/274 Setuptools' sdist doesn't use data_files while distutils does Originally reported by: **ionelmc (Bitbucket: [ionelmc](http://bitbucket.org/ionelmc), GitHub: [ionelmc](http://github.com/ionelmc))** --- Distutils has this: https://github.com/python/cpython/blob/master/Lib/distutils/command/sdist.py#L270-L282 However that code is not copies in the sdist provided by setuptools. This can be problematic if you don't inlcude a file in MANIFEST.in but you have it in data_files - python's docs will most certainly mislead you as they say that sdist uses data_files: https://docs.python.org/3/distutils/sourcedist.html?highlight=data_files#specifying-the-files-to-distribute (last bullet) --- - Bitbucket: https://bitbucket.org/pypa/setuptools/issue/521 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setuptools/command/sdist.py` Content: ``` 1 from distutils import log 2 import distutils.command.sdist as orig 3 import os 4 import sys 5 import io 6 import contextlib 7 8 from setuptools.extern import six 9 10 from .py36compat import sdist_add_defaults 11 12 import pkg_resources 13 14 _default_revctrl = list 15 16 17 def walk_revctrl(dirname=''): 18 """Find all files under revision control""" 19 for ep in pkg_resources.iter_entry_points('setuptools.file_finders'): 20 for item in ep.load()(dirname): 21 yield item 22 23 24 class sdist(sdist_add_defaults, orig.sdist): 25 """Smart sdist that finds anything supported by revision control""" 26 27 user_options = [ 28 ('formats=', None, 29 "formats for source distribution (comma-separated list)"), 30 ('keep-temp', 'k', 31 "keep the distribution tree around after creating " + 32 "archive file(s)"), 33 ('dist-dir=', 'd', 34 "directory to put the source distribution archive(s) in " 35 "[default: dist]"), 36 ] 37 38 negative_opt = {} 39 40 READMES = 'README', 'README.rst', 'README.txt' 41 42 def run(self): 43 self.run_command('egg_info') 44 ei_cmd = self.get_finalized_command('egg_info') 45 self.filelist = ei_cmd.filelist 46 self.filelist.append(os.path.join(ei_cmd.egg_info, 'SOURCES.txt')) 47 self.check_readme() 48 49 # Run sub commands 50 for cmd_name in self.get_sub_commands(): 51 self.run_command(cmd_name) 52 53 # Call check_metadata only if no 'check' command 54 # (distutils <= 2.6) 55 import distutils.command 56 57 if 'check' not in distutils.command.__all__: 58 self.check_metadata() 59 60 self.make_distribution() 61 62 dist_files = getattr(self.distribution, 'dist_files', []) 63 for file in self.archive_files: 64 data = ('sdist', '', file) 65 if data not in dist_files: 66 dist_files.append(data) 67 68 def initialize_options(self): 69 orig.sdist.initialize_options(self) 70 71 self._default_to_gztar() 72 73 def _default_to_gztar(self): 74 # only needed on Python prior to 3.6. 75 if sys.version_info >= (3, 6, 0, 'beta', 1): 76 return 77 self.formats = ['gztar'] 78 79 def make_distribution(self): 80 """ 81 Workaround for #516 82 """ 83 with self._remove_os_link(): 84 orig.sdist.make_distribution(self) 85 86 @staticmethod 87 @contextlib.contextmanager 88 def _remove_os_link(): 89 """ 90 In a context, remove and restore os.link if it exists 91 """ 92 93 class NoValue: 94 pass 95 96 orig_val = getattr(os, 'link', NoValue) 97 try: 98 del os.link 99 except Exception: 100 pass 101 try: 102 yield 103 finally: 104 if orig_val is not NoValue: 105 setattr(os, 'link', orig_val) 106 107 def __read_template_hack(self): 108 # This grody hack closes the template file (MANIFEST.in) if an 109 # exception occurs during read_template. 110 # Doing so prevents an error when easy_install attempts to delete the 111 # file. 112 try: 113 orig.sdist.read_template(self) 114 except Exception: 115 _, _, tb = sys.exc_info() 116 tb.tb_next.tb_frame.f_locals['template'].close() 117 raise 118 119 # Beginning with Python 2.7.2, 3.1.4, and 3.2.1, this leaky file handle 120 # has been fixed, so only override the method if we're using an earlier 121 # Python. 122 has_leaky_handle = ( 123 sys.version_info < (2, 7, 2) 124 or (3, 0) <= sys.version_info < (3, 1, 4) 125 or (3, 2) <= sys.version_info < (3, 2, 1) 126 ) 127 if has_leaky_handle: 128 read_template = __read_template_hack 129 130 def _add_defaults_python(self): 131 """getting python files""" 132 if self.distribution.has_pure_modules(): 133 build_py = self.get_finalized_command('build_py') 134 self.filelist.extend(build_py.get_source_files()) 135 # This functionality is incompatible with include_package_data, and 136 # will in fact create an infinite recursion if include_package_data 137 # is True. Use of include_package_data will imply that 138 # distutils-style automatic handling of package_data is disabled 139 if not self.distribution.include_package_data: 140 for _, src_dir, _, filenames in build_py.data_files: 141 self.filelist.extend([os.path.join(src_dir, filename) 142 for filename in filenames]) 143 144 def _add_defaults_data_files(self): 145 """ 146 Don't add any data files, but why? 147 """ 148 149 def check_readme(self): 150 for f in self.READMES: 151 if os.path.exists(f): 152 return 153 else: 154 self.warn( 155 "standard file not found: should have one of " + 156 ', '.join(self.READMES) 157 ) 158 159 def make_release_tree(self, base_dir, files): 160 orig.sdist.make_release_tree(self, base_dir, files) 161 162 # Save any egg_info command line options used to create this sdist 163 dest = os.path.join(base_dir, 'setup.cfg') 164 if hasattr(os, 'link') and os.path.exists(dest): 165 # unlink and re-copy, since it might be hard-linked, and 166 # we don't want to change the source version 167 os.unlink(dest) 168 self.copy_file('setup.cfg', dest) 169 170 self.get_finalized_command('egg_info').save_version_info(dest) 171 172 def _manifest_is_not_generated(self): 173 # check for special comment used in 2.7.1 and higher 174 if not os.path.isfile(self.manifest): 175 return False 176 177 with io.open(self.manifest, 'rb') as fp: 178 first_line = fp.readline() 179 return (first_line != 180 '# file GENERATED by distutils, do NOT edit\n'.encode()) 181 182 def read_manifest(self): 183 """Read the manifest file (named by 'self.manifest') and use it to 184 fill in 'self.filelist', the list of files to include in the source 185 distribution. 186 """ 187 log.info("reading manifest file '%s'", self.manifest) 188 manifest = open(self.manifest, 'rb') 189 for line in manifest: 190 # The manifest must contain UTF-8. See #303. 191 if six.PY3: 192 try: 193 line = line.decode('UTF-8') 194 except UnicodeDecodeError: 195 log.warn("%r not UTF-8 decodable -- skipping" % line) 196 continue 197 # ignore comments and blank lines 198 line = line.strip() 199 if line.startswith('#') or not line: 200 continue 201 self.filelist.append(line) 202 manifest.close() 203 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setuptools/command/sdist.py b/setuptools/command/sdist.py --- a/setuptools/command/sdist.py +++ b/setuptools/command/sdist.py @@ -141,11 +141,6 @@ self.filelist.extend([os.path.join(src_dir, filename) for filename in filenames]) - def _add_defaults_data_files(self): - """ - Don't add any data files, but why? - """ - def check_readme(self): for f in self.READMES: if os.path.exists(f):
{"golden_diff": "diff --git a/setuptools/command/sdist.py b/setuptools/command/sdist.py\n--- a/setuptools/command/sdist.py\n+++ b/setuptools/command/sdist.py\n@@ -141,11 +141,6 @@\n self.filelist.extend([os.path.join(src_dir, filename)\n for filename in filenames])\n \n- def _add_defaults_data_files(self):\n- \"\"\"\n- Don't add any data files, but why?\n- \"\"\"\n-\n def check_readme(self):\n for f in self.READMES:\n if os.path.exists(f):\n", "issue": "Discrepancy between setuptools and distutils in sdist without MANIFEST.in\nOriginally reported by: **ikelos (Bitbucket: [ikelos](http://bitbucket.org/ikelos), GitHub: [ikelos](http://github.com/ikelos))**\n\n---\n\n[This upstream bug](http://bugs.python.org/issue2279) (fixed in to 2010) resolved the need to create a MANIFEST.in file as well as list data_files in the setup.py. This does not appear to have been fixed in setuptools, and as such sdist and similar functions fail to include the appropriate files on machines that have setuptools, but not those that don't, when a fallback import system is used...\n\nWould it be possible to align the two again so that setuptools is again a drop-in replacement for distutils?\n\n---\n- Bitbucket: https://bitbucket.org/pypa/setuptools/issue/274\n\nSetuptools' sdist doesn't use data_files while distutils does\nOriginally reported by: **ionelmc (Bitbucket: [ionelmc](http://bitbucket.org/ionelmc), GitHub: [ionelmc](http://github.com/ionelmc))**\n\n---\n\nDistutils has this: https://github.com/python/cpython/blob/master/Lib/distutils/command/sdist.py#L270-L282\n\nHowever that code is not copies in the sdist provided by setuptools. \n\nThis can be problematic if you don't inlcude a file in MANIFEST.in but you have it in data_files - python's docs will most certainly mislead you as they say that sdist uses data_files: https://docs.python.org/3/distutils/sourcedist.html?highlight=data_files#specifying-the-files-to-distribute (last bullet)\n\n---\n- Bitbucket: https://bitbucket.org/pypa/setuptools/issue/521\n\n", "before_files": [{"content": "from distutils import log\nimport distutils.command.sdist as orig\nimport os\nimport sys\nimport io\nimport contextlib\n\nfrom setuptools.extern import six\n\nfrom .py36compat import sdist_add_defaults\n\nimport pkg_resources\n\n_default_revctrl = list\n\n\ndef walk_revctrl(dirname=''):\n \"\"\"Find all files under revision control\"\"\"\n for ep in pkg_resources.iter_entry_points('setuptools.file_finders'):\n for item in ep.load()(dirname):\n yield item\n\n\nclass sdist(sdist_add_defaults, orig.sdist):\n \"\"\"Smart sdist that finds anything supported by revision control\"\"\"\n\n user_options = [\n ('formats=', None,\n \"formats for source distribution (comma-separated list)\"),\n ('keep-temp', 'k',\n \"keep the distribution tree around after creating \" +\n \"archive file(s)\"),\n ('dist-dir=', 'd',\n \"directory to put the source distribution archive(s) in \"\n \"[default: dist]\"),\n ]\n\n negative_opt = {}\n\n READMES = 'README', 'README.rst', 'README.txt'\n\n def run(self):\n self.run_command('egg_info')\n ei_cmd = self.get_finalized_command('egg_info')\n self.filelist = ei_cmd.filelist\n self.filelist.append(os.path.join(ei_cmd.egg_info, 'SOURCES.txt'))\n self.check_readme()\n\n # Run sub commands\n for cmd_name in self.get_sub_commands():\n self.run_command(cmd_name)\n\n # Call check_metadata only if no 'check' command\n # (distutils <= 2.6)\n import distutils.command\n\n if 'check' not in distutils.command.__all__:\n self.check_metadata()\n\n self.make_distribution()\n\n dist_files = getattr(self.distribution, 'dist_files', [])\n for file in self.archive_files:\n data = ('sdist', '', file)\n if data not in dist_files:\n dist_files.append(data)\n\n def initialize_options(self):\n orig.sdist.initialize_options(self)\n\n self._default_to_gztar()\n\n def _default_to_gztar(self):\n # only needed on Python prior to 3.6.\n if sys.version_info >= (3, 6, 0, 'beta', 1):\n return\n self.formats = ['gztar']\n\n def make_distribution(self):\n \"\"\"\n Workaround for #516\n \"\"\"\n with self._remove_os_link():\n orig.sdist.make_distribution(self)\n\n @staticmethod\n @contextlib.contextmanager\n def _remove_os_link():\n \"\"\"\n In a context, remove and restore os.link if it exists\n \"\"\"\n\n class NoValue:\n pass\n\n orig_val = getattr(os, 'link', NoValue)\n try:\n del os.link\n except Exception:\n pass\n try:\n yield\n finally:\n if orig_val is not NoValue:\n setattr(os, 'link', orig_val)\n\n def __read_template_hack(self):\n # This grody hack closes the template file (MANIFEST.in) if an\n # exception occurs during read_template.\n # Doing so prevents an error when easy_install attempts to delete the\n # file.\n try:\n orig.sdist.read_template(self)\n except Exception:\n _, _, tb = sys.exc_info()\n tb.tb_next.tb_frame.f_locals['template'].close()\n raise\n\n # Beginning with Python 2.7.2, 3.1.4, and 3.2.1, this leaky file handle\n # has been fixed, so only override the method if we're using an earlier\n # Python.\n has_leaky_handle = (\n sys.version_info < (2, 7, 2)\n or (3, 0) <= sys.version_info < (3, 1, 4)\n or (3, 2) <= sys.version_info < (3, 2, 1)\n )\n if has_leaky_handle:\n read_template = __read_template_hack\n\n def _add_defaults_python(self):\n \"\"\"getting python files\"\"\"\n if self.distribution.has_pure_modules():\n build_py = self.get_finalized_command('build_py')\n self.filelist.extend(build_py.get_source_files())\n # This functionality is incompatible with include_package_data, and\n # will in fact create an infinite recursion if include_package_data\n # is True. Use of include_package_data will imply that\n # distutils-style automatic handling of package_data is disabled\n if not self.distribution.include_package_data:\n for _, src_dir, _, filenames in build_py.data_files:\n self.filelist.extend([os.path.join(src_dir, filename)\n for filename in filenames])\n\n def _add_defaults_data_files(self):\n \"\"\"\n Don't add any data files, but why?\n \"\"\"\n\n def check_readme(self):\n for f in self.READMES:\n if os.path.exists(f):\n return\n else:\n self.warn(\n \"standard file not found: should have one of \" +\n ', '.join(self.READMES)\n )\n\n def make_release_tree(self, base_dir, files):\n orig.sdist.make_release_tree(self, base_dir, files)\n\n # Save any egg_info command line options used to create this sdist\n dest = os.path.join(base_dir, 'setup.cfg')\n if hasattr(os, 'link') and os.path.exists(dest):\n # unlink and re-copy, since it might be hard-linked, and\n # we don't want to change the source version\n os.unlink(dest)\n self.copy_file('setup.cfg', dest)\n\n self.get_finalized_command('egg_info').save_version_info(dest)\n\n def _manifest_is_not_generated(self):\n # check for special comment used in 2.7.1 and higher\n if not os.path.isfile(self.manifest):\n return False\n\n with io.open(self.manifest, 'rb') as fp:\n first_line = fp.readline()\n return (first_line !=\n '# file GENERATED by distutils, do NOT edit\\n'.encode())\n\n def read_manifest(self):\n \"\"\"Read the manifest file (named by 'self.manifest') and use it to\n fill in 'self.filelist', the list of files to include in the source\n distribution.\n \"\"\"\n log.info(\"reading manifest file '%s'\", self.manifest)\n manifest = open(self.manifest, 'rb')\n for line in manifest:\n # The manifest must contain UTF-8. See #303.\n if six.PY3:\n try:\n line = line.decode('UTF-8')\n except UnicodeDecodeError:\n log.warn(\"%r not UTF-8 decodable -- skipping\" % line)\n continue\n # ignore comments and blank lines\n line = line.strip()\n if line.startswith('#') or not line:\n continue\n self.filelist.append(line)\n manifest.close()\n", "path": "setuptools/command/sdist.py"}], "after_files": [{"content": "from distutils import log\nimport distutils.command.sdist as orig\nimport os\nimport sys\nimport io\nimport contextlib\n\nfrom setuptools.extern import six\n\nfrom .py36compat import sdist_add_defaults\n\nimport pkg_resources\n\n_default_revctrl = list\n\n\ndef walk_revctrl(dirname=''):\n \"\"\"Find all files under revision control\"\"\"\n for ep in pkg_resources.iter_entry_points('setuptools.file_finders'):\n for item in ep.load()(dirname):\n yield item\n\n\nclass sdist(sdist_add_defaults, orig.sdist):\n \"\"\"Smart sdist that finds anything supported by revision control\"\"\"\n\n user_options = [\n ('formats=', None,\n \"formats for source distribution (comma-separated list)\"),\n ('keep-temp', 'k',\n \"keep the distribution tree around after creating \" +\n \"archive file(s)\"),\n ('dist-dir=', 'd',\n \"directory to put the source distribution archive(s) in \"\n \"[default: dist]\"),\n ]\n\n negative_opt = {}\n\n READMES = 'README', 'README.rst', 'README.txt'\n\n def run(self):\n self.run_command('egg_info')\n ei_cmd = self.get_finalized_command('egg_info')\n self.filelist = ei_cmd.filelist\n self.filelist.append(os.path.join(ei_cmd.egg_info, 'SOURCES.txt'))\n self.check_readme()\n\n # Run sub commands\n for cmd_name in self.get_sub_commands():\n self.run_command(cmd_name)\n\n # Call check_metadata only if no 'check' command\n # (distutils <= 2.6)\n import distutils.command\n\n if 'check' not in distutils.command.__all__:\n self.check_metadata()\n\n self.make_distribution()\n\n dist_files = getattr(self.distribution, 'dist_files', [])\n for file in self.archive_files:\n data = ('sdist', '', file)\n if data not in dist_files:\n dist_files.append(data)\n\n def initialize_options(self):\n orig.sdist.initialize_options(self)\n\n self._default_to_gztar()\n\n def _default_to_gztar(self):\n # only needed on Python prior to 3.6.\n if sys.version_info >= (3, 6, 0, 'beta', 1):\n return\n self.formats = ['gztar']\n\n def make_distribution(self):\n \"\"\"\n Workaround for #516\n \"\"\"\n with self._remove_os_link():\n orig.sdist.make_distribution(self)\n\n @staticmethod\n @contextlib.contextmanager\n def _remove_os_link():\n \"\"\"\n In a context, remove and restore os.link if it exists\n \"\"\"\n\n class NoValue:\n pass\n\n orig_val = getattr(os, 'link', NoValue)\n try:\n del os.link\n except Exception:\n pass\n try:\n yield\n finally:\n if orig_val is not NoValue:\n setattr(os, 'link', orig_val)\n\n def __read_template_hack(self):\n # This grody hack closes the template file (MANIFEST.in) if an\n # exception occurs during read_template.\n # Doing so prevents an error when easy_install attempts to delete the\n # file.\n try:\n orig.sdist.read_template(self)\n except Exception:\n _, _, tb = sys.exc_info()\n tb.tb_next.tb_frame.f_locals['template'].close()\n raise\n\n # Beginning with Python 2.7.2, 3.1.4, and 3.2.1, this leaky file handle\n # has been fixed, so only override the method if we're using an earlier\n # Python.\n has_leaky_handle = (\n sys.version_info < (2, 7, 2)\n or (3, 0) <= sys.version_info < (3, 1, 4)\n or (3, 2) <= sys.version_info < (3, 2, 1)\n )\n if has_leaky_handle:\n read_template = __read_template_hack\n\n def _add_defaults_python(self):\n \"\"\"getting python files\"\"\"\n if self.distribution.has_pure_modules():\n build_py = self.get_finalized_command('build_py')\n self.filelist.extend(build_py.get_source_files())\n # This functionality is incompatible with include_package_data, and\n # will in fact create an infinite recursion if include_package_data\n # is True. Use of include_package_data will imply that\n # distutils-style automatic handling of package_data is disabled\n if not self.distribution.include_package_data:\n for _, src_dir, _, filenames in build_py.data_files:\n self.filelist.extend([os.path.join(src_dir, filename)\n for filename in filenames])\n\n def check_readme(self):\n for f in self.READMES:\n if os.path.exists(f):\n return\n else:\n self.warn(\n \"standard file not found: should have one of \" +\n ', '.join(self.READMES)\n )\n\n def make_release_tree(self, base_dir, files):\n orig.sdist.make_release_tree(self, base_dir, files)\n\n # Save any egg_info command line options used to create this sdist\n dest = os.path.join(base_dir, 'setup.cfg')\n if hasattr(os, 'link') and os.path.exists(dest):\n # unlink and re-copy, since it might be hard-linked, and\n # we don't want to change the source version\n os.unlink(dest)\n self.copy_file('setup.cfg', dest)\n\n self.get_finalized_command('egg_info').save_version_info(dest)\n\n def _manifest_is_not_generated(self):\n # check for special comment used in 2.7.1 and higher\n if not os.path.isfile(self.manifest):\n return False\n\n with io.open(self.manifest, 'rb') as fp:\n first_line = fp.readline()\n return (first_line !=\n '# file GENERATED by distutils, do NOT edit\\n'.encode())\n\n def read_manifest(self):\n \"\"\"Read the manifest file (named by 'self.manifest') and use it to\n fill in 'self.filelist', the list of files to include in the source\n distribution.\n \"\"\"\n log.info(\"reading manifest file '%s'\", self.manifest)\n manifest = open(self.manifest, 'rb')\n for line in manifest:\n # The manifest must contain UTF-8. See #303.\n if six.PY3:\n try:\n line = line.decode('UTF-8')\n except UnicodeDecodeError:\n log.warn(\"%r not UTF-8 decodable -- skipping\" % line)\n continue\n # ignore comments and blank lines\n line = line.strip()\n if line.startswith('#') or not line:\n continue\n self.filelist.append(line)\n manifest.close()\n", "path": "setuptools/command/sdist.py"}]}
2,669
123
gh_patches_debug_4097
rasdani/github-patches
git_diff
mozilla__bugbug-130
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add bug alias as a feature Not the alias by itself, but something like True if `CVE` is in bug['alias'], False otherwise. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bugbug/bug_features.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # This Source Code Form is subject to the terms of the Mozilla Public 3 # License, v. 2.0. If a copy of the MPL was not distributed with this file, 4 # You can obtain one at http://mozilla.org/MPL/2.0/. 5 6 import re 7 from datetime import datetime 8 from datetime import timezone 9 10 import pandas as pd 11 from libmozdata import versions 12 from sklearn.base import BaseEstimator 13 from sklearn.base import TransformerMixin 14 15 from bugbug import bug_snapshot 16 from bugbug import repository 17 18 19 def field(bug, field): 20 if field in bug and bug[field] != '---': 21 return bug[field] 22 23 return None 24 25 26 class has_str(object): 27 def __call__(self, bug): 28 return field(bug, 'cf_has_str') 29 30 31 class has_regression_range(object): 32 def __call__(self, bug): 33 return field(bug, 'cf_has_regression_range') 34 35 36 class has_crash_signature(object): 37 def __call__(self, bug): 38 return 'cf_crash_signature' in bug and bug['cf_crash_signature'] != '' 39 40 41 class keywords(object): 42 def __init__(self, to_ignore=set()): 43 self.to_ignore = to_ignore 44 45 def __call__(self, bug): 46 keywords = [] 47 subkeywords = [] 48 for keyword in bug['keywords']: 49 if keyword in self.to_ignore: 50 continue 51 52 keywords.append(keyword) 53 54 if keyword.startswith('sec-'): 55 subkeywords.append('sec-') 56 elif keyword.startswith('csectype-'): 57 subkeywords.append('csectype-') 58 return keywords + subkeywords 59 60 61 class severity(object): 62 def __call__(self, bug): 63 return field(bug, 'severity') 64 65 66 class is_coverity_issue(object): 67 def __call__(self, bug): 68 return re.search('[CID ?[0-9]+]', bug['summary']) is not None or re.search('[CID ?[0-9]+]', bug['whiteboard']) is not None 69 70 71 class has_url(object): 72 def __call__(self, bug): 73 return bug['url'] != '' 74 75 76 class has_w3c_url(object): 77 def __call__(self, bug): 78 return 'w3c' in bug['url'] 79 80 81 class has_github_url(object): 82 def __call__(self, bug): 83 return 'github' in bug['url'] 84 85 86 class whiteboard(object): 87 def __call__(self, bug): 88 89 # Split by '[' 90 paren_splits = bug['whiteboard'].lower().split('[') 91 92 # Split splits by space if they weren't in [ and ]. 93 splits = [] 94 for paren_split in paren_splits: 95 if ']' in paren_split: 96 paren_split = paren_split.split(']') 97 splits += paren_split 98 else: 99 splits += paren_split.split(' ') 100 101 # Remove empty splits and strip 102 splits = [split.strip() for split in splits if split.strip() != ''] 103 104 # For splits which contain ':', return both the whole string and the string before ':'. 105 splits += [split.split(':', 1)[0] for split in splits if ':' in split] 106 107 return splits 108 109 110 class patches(object): 111 def __call__(self, bug): 112 return sum(1 for a in bug['attachments'] if a['is_patch'] or a['content_type'] in ['text/x-review-board-request', 'text/x-phabricator-request']) 113 114 115 class landings(object): 116 def __call__(self, bug): 117 return sum(1 for c in bug['comments'] if '://hg.mozilla.org/' in c['text']) 118 119 120 class title(object): 121 def __call__(self, bug): 122 ret = [] 123 124 keywords = [ 125 'fail', 126 ] 127 for keyword in keywords: 128 if keyword in bug['summary'].lower(): 129 ret.append(keyword) 130 131 return ret 132 133 134 class product(object): 135 def __call__(self, bug): 136 return bug['product'] 137 138 139 class component(object): 140 def __call__(self, bug): 141 return bug['component'] 142 143 144 class is_mozillian(object): 145 def __call__(self, bug): 146 return any(bug['creator_detail']['email'].endswith(domain) for domain in ['@mozilla.com', '@mozilla.org']) 147 148 149 class delta_request_merge(object): 150 def __call__(self, bug): 151 for history in bug['history']: 152 for change in history['changes']: 153 if change['added'].startswith('approval-mozilla'): 154 uplift_request_datetime = datetime.strptime(history['when'], '%Y-%m-%dT%H:%M:%SZ').replace(tzinfo=timezone.utc) 155 timedelta = versions.getCloserRelease(uplift_request_datetime)[1] - uplift_request_datetime 156 return timedelta.days + timedelta.seconds / (24 * 60 * 60) 157 158 return None 159 160 161 class commit_added(object): 162 def __call__(self, bug): 163 return sum(commit['added'] for commit in bug['commits']) 164 165 166 class commit_deleted(object): 167 def __call__(self, bug): 168 return sum(commit['deleted'] for commit in bug['commits']) 169 170 171 class commit_types(object): 172 def __call__(self, bug): 173 return sum((commit['types'] for commit in bug['commits']), []) 174 175 176 class blocked_bugs_number(object): 177 def __call__(self, bug): 178 return len(bug['blocks']) 179 180 181 class priority(object): 182 def __call__(self, bug): 183 return bug['priority'] 184 185 186 class commit_files_modified_num(object): 187 def __call__(self, bug): 188 return sum(commit['files_modified_num'] for commit in bug['commits']) 189 190 191 class comment_count(object): 192 def __call__(self, bug): 193 return field(bug, 'comment_count') 194 195 196 class comment_length(object): 197 def __call__(self, bug): 198 return sum(len(x['text']) for x in bug['comments']) 199 200 201 def cleanup_url(text): 202 text = re.sub(r'http[s]?://(hg.mozilla|searchfox|dxr.mozilla)\S+', '__CODE_REFERENCE_URL__', text) 203 return re.sub(r'http\S+', '__URL__', text) 204 205 206 def cleanup_fileref(text): 207 return re.sub(r'\w+\.py\b|\w+\.json\b|\w+\.js\b|\w+\.jsm\b|\w+\.html\b|\w+\.css\b|\w+\.c\b|\w+\.cpp\b|\w+\.h\b', '__FILE_REFERENCE__', text) 208 209 210 def cleanup_responses(text): 211 return re.sub('>[^\n]+', ' ', text) 212 213 214 def cleanup_hex(text): 215 return re.sub(r'\b0[xX][0-9a-fA-F]+\b', '__HEX_NUMBER__', text) 216 217 218 def cleanup_dll(text): 219 return re.sub(r'\w+(\.dll|\.so|\.dylib)\b', '__DLL_NAME__', text) 220 221 222 def cleanup_synonyms(text): 223 synonyms = [ 224 ('safemode', ['safemode', 'safe mode']), 225 ('str', ['str', 'steps to reproduce', 'repro steps']), 226 ('uaf', ['uaf', 'use after free', 'use-after-free']), 227 ('asan', ['asan', 'address sanitizer', 'addresssanitizer']), 228 ('permafailure', ['permafailure', 'permafailing', 'permafail', 'perma failure', 'perma failing', 'perma fail', 'perma-failure', 'perma-failing', 'perma-fail']), 229 ('spec', ['spec', 'specification']), 230 ] 231 232 for synonym_group, synonym_list in synonyms: 233 text = re.sub('|'.join(fr'\b{synonym}\b' for synonym in synonym_list), synonym_group, text, flags=re.IGNORECASE) 234 235 return text 236 237 238 def cleanup_crash(text): 239 return re.sub(r'bp-[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{6}[0-9]{6}\b', '__CRASH_STATS_LINK__', text) 240 241 242 class BugExtractor(BaseEstimator, TransformerMixin): 243 def __init__(self, feature_extractors, cleanup_functions, rollback=False, rollback_when=None, commit_data=False): 244 self.feature_extractors = feature_extractors 245 self.cleanup_functions = cleanup_functions 246 self.rollback = rollback 247 self.rollback_when = rollback_when 248 self.commit_map = repository.get_commit_map() if commit_data else None 249 250 def fit(self, x, y=None): 251 return self 252 253 def transform(self, bugs): 254 results = [] 255 256 for bug in bugs: 257 bug_id = bug['id'] 258 259 if self.rollback: 260 bug = bug_snapshot.rollback(bug, self.rollback_when) 261 262 data = {} 263 264 if self.commit_map is not None: 265 if bug_id in self.commit_map: 266 bug['commits'] = self.commit_map[bug_id] 267 else: 268 bug['commits'] = [] 269 270 for f in self.feature_extractors: 271 res = f(bug) 272 273 if res is None: 274 continue 275 276 if isinstance(res, list): 277 for item in res: 278 data[f.__class__.__name__ + '-' + item] = 'True' 279 continue 280 281 if isinstance(res, bool): 282 res = str(res) 283 284 data[f.__class__.__name__] = res 285 286 # TODO: Try simply using all possible fields instead of extracting features manually. 287 288 for cleanup_function in self.cleanup_functions: 289 bug['summary'] = cleanup_function(bug['summary']) 290 for c in bug['comments']: 291 c['text'] = cleanup_function(c['text']) 292 293 result = { 294 'data': data, 295 'title': bug['summary'], 296 'first_comment': bug['comments'][0]['text'], 297 'comments': ' '.join([c['text'] for c in bug['comments']]), 298 } 299 300 results.append(result) 301 302 return pd.DataFrame(results) 303 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bugbug/bug_features.py b/bugbug/bug_features.py --- a/bugbug/bug_features.py +++ b/bugbug/bug_features.py @@ -183,6 +183,11 @@ return bug['priority'] +class bug_has_cve_in_alias(object): + def __call__(self, bug): + return bug['alias'] is not None and 'CVE' in bug['alias'] + + class commit_files_modified_num(object): def __call__(self, bug): return sum(commit['files_modified_num'] for commit in bug['commits'])
{"golden_diff": "diff --git a/bugbug/bug_features.py b/bugbug/bug_features.py\n--- a/bugbug/bug_features.py\n+++ b/bugbug/bug_features.py\n@@ -183,6 +183,11 @@\n return bug['priority']\n \n \n+class bug_has_cve_in_alias(object):\n+ def __call__(self, bug):\n+ return bug['alias'] is not None and 'CVE' in bug['alias']\n+\n+\n class commit_files_modified_num(object):\n def __call__(self, bug):\n return sum(commit['files_modified_num'] for commit in bug['commits'])\n", "issue": "Add bug alias as a feature\nNot the alias by itself, but something like True if `CVE` is in bug['alias'], False otherwise.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport re\nfrom datetime import datetime\nfrom datetime import timezone\n\nimport pandas as pd\nfrom libmozdata import versions\nfrom sklearn.base import BaseEstimator\nfrom sklearn.base import TransformerMixin\n\nfrom bugbug import bug_snapshot\nfrom bugbug import repository\n\n\ndef field(bug, field):\n if field in bug and bug[field] != '---':\n return bug[field]\n\n return None\n\n\nclass has_str(object):\n def __call__(self, bug):\n return field(bug, 'cf_has_str')\n\n\nclass has_regression_range(object):\n def __call__(self, bug):\n return field(bug, 'cf_has_regression_range')\n\n\nclass has_crash_signature(object):\n def __call__(self, bug):\n return 'cf_crash_signature' in bug and bug['cf_crash_signature'] != ''\n\n\nclass keywords(object):\n def __init__(self, to_ignore=set()):\n self.to_ignore = to_ignore\n\n def __call__(self, bug):\n keywords = []\n subkeywords = []\n for keyword in bug['keywords']:\n if keyword in self.to_ignore:\n continue\n\n keywords.append(keyword)\n\n if keyword.startswith('sec-'):\n subkeywords.append('sec-')\n elif keyword.startswith('csectype-'):\n subkeywords.append('csectype-')\n return keywords + subkeywords\n\n\nclass severity(object):\n def __call__(self, bug):\n return field(bug, 'severity')\n\n\nclass is_coverity_issue(object):\n def __call__(self, bug):\n return re.search('[CID ?[0-9]+]', bug['summary']) is not None or re.search('[CID ?[0-9]+]', bug['whiteboard']) is not None\n\n\nclass has_url(object):\n def __call__(self, bug):\n return bug['url'] != ''\n\n\nclass has_w3c_url(object):\n def __call__(self, bug):\n return 'w3c' in bug['url']\n\n\nclass has_github_url(object):\n def __call__(self, bug):\n return 'github' in bug['url']\n\n\nclass whiteboard(object):\n def __call__(self, bug):\n\n # Split by '['\n paren_splits = bug['whiteboard'].lower().split('[')\n\n # Split splits by space if they weren't in [ and ].\n splits = []\n for paren_split in paren_splits:\n if ']' in paren_split:\n paren_split = paren_split.split(']')\n splits += paren_split\n else:\n splits += paren_split.split(' ')\n\n # Remove empty splits and strip\n splits = [split.strip() for split in splits if split.strip() != '']\n\n # For splits which contain ':', return both the whole string and the string before ':'.\n splits += [split.split(':', 1)[0] for split in splits if ':' in split]\n\n return splits\n\n\nclass patches(object):\n def __call__(self, bug):\n return sum(1 for a in bug['attachments'] if a['is_patch'] or a['content_type'] in ['text/x-review-board-request', 'text/x-phabricator-request'])\n\n\nclass landings(object):\n def __call__(self, bug):\n return sum(1 for c in bug['comments'] if '://hg.mozilla.org/' in c['text'])\n\n\nclass title(object):\n def __call__(self, bug):\n ret = []\n\n keywords = [\n 'fail',\n ]\n for keyword in keywords:\n if keyword in bug['summary'].lower():\n ret.append(keyword)\n\n return ret\n\n\nclass product(object):\n def __call__(self, bug):\n return bug['product']\n\n\nclass component(object):\n def __call__(self, bug):\n return bug['component']\n\n\nclass is_mozillian(object):\n def __call__(self, bug):\n return any(bug['creator_detail']['email'].endswith(domain) for domain in ['@mozilla.com', '@mozilla.org'])\n\n\nclass delta_request_merge(object):\n def __call__(self, bug):\n for history in bug['history']:\n for change in history['changes']:\n if change['added'].startswith('approval-mozilla'):\n uplift_request_datetime = datetime.strptime(history['when'], '%Y-%m-%dT%H:%M:%SZ').replace(tzinfo=timezone.utc)\n timedelta = versions.getCloserRelease(uplift_request_datetime)[1] - uplift_request_datetime\n return timedelta.days + timedelta.seconds / (24 * 60 * 60)\n\n return None\n\n\nclass commit_added(object):\n def __call__(self, bug):\n return sum(commit['added'] for commit in bug['commits'])\n\n\nclass commit_deleted(object):\n def __call__(self, bug):\n return sum(commit['deleted'] for commit in bug['commits'])\n\n\nclass commit_types(object):\n def __call__(self, bug):\n return sum((commit['types'] for commit in bug['commits']), [])\n\n\nclass blocked_bugs_number(object):\n def __call__(self, bug):\n return len(bug['blocks'])\n\n\nclass priority(object):\n def __call__(self, bug):\n return bug['priority']\n\n\nclass commit_files_modified_num(object):\n def __call__(self, bug):\n return sum(commit['files_modified_num'] for commit in bug['commits'])\n\n\nclass comment_count(object):\n def __call__(self, bug):\n return field(bug, 'comment_count')\n\n\nclass comment_length(object):\n def __call__(self, bug):\n return sum(len(x['text']) for x in bug['comments'])\n\n\ndef cleanup_url(text):\n text = re.sub(r'http[s]?://(hg.mozilla|searchfox|dxr.mozilla)\\S+', '__CODE_REFERENCE_URL__', text)\n return re.sub(r'http\\S+', '__URL__', text)\n\n\ndef cleanup_fileref(text):\n return re.sub(r'\\w+\\.py\\b|\\w+\\.json\\b|\\w+\\.js\\b|\\w+\\.jsm\\b|\\w+\\.html\\b|\\w+\\.css\\b|\\w+\\.c\\b|\\w+\\.cpp\\b|\\w+\\.h\\b', '__FILE_REFERENCE__', text)\n\n\ndef cleanup_responses(text):\n return re.sub('>[^\\n]+', ' ', text)\n\n\ndef cleanup_hex(text):\n return re.sub(r'\\b0[xX][0-9a-fA-F]+\\b', '__HEX_NUMBER__', text)\n\n\ndef cleanup_dll(text):\n return re.sub(r'\\w+(\\.dll|\\.so|\\.dylib)\\b', '__DLL_NAME__', text)\n\n\ndef cleanup_synonyms(text):\n synonyms = [\n ('safemode', ['safemode', 'safe mode']),\n ('str', ['str', 'steps to reproduce', 'repro steps']),\n ('uaf', ['uaf', 'use after free', 'use-after-free']),\n ('asan', ['asan', 'address sanitizer', 'addresssanitizer']),\n ('permafailure', ['permafailure', 'permafailing', 'permafail', 'perma failure', 'perma failing', 'perma fail', 'perma-failure', 'perma-failing', 'perma-fail']),\n ('spec', ['spec', 'specification']),\n ]\n\n for synonym_group, synonym_list in synonyms:\n text = re.sub('|'.join(fr'\\b{synonym}\\b' for synonym in synonym_list), synonym_group, text, flags=re.IGNORECASE)\n\n return text\n\n\ndef cleanup_crash(text):\n return re.sub(r'bp-[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{6}[0-9]{6}\\b', '__CRASH_STATS_LINK__', text)\n\n\nclass BugExtractor(BaseEstimator, TransformerMixin):\n def __init__(self, feature_extractors, cleanup_functions, rollback=False, rollback_when=None, commit_data=False):\n self.feature_extractors = feature_extractors\n self.cleanup_functions = cleanup_functions\n self.rollback = rollback\n self.rollback_when = rollback_when\n self.commit_map = repository.get_commit_map() if commit_data else None\n\n def fit(self, x, y=None):\n return self\n\n def transform(self, bugs):\n results = []\n\n for bug in bugs:\n bug_id = bug['id']\n\n if self.rollback:\n bug = bug_snapshot.rollback(bug, self.rollback_when)\n\n data = {}\n\n if self.commit_map is not None:\n if bug_id in self.commit_map:\n bug['commits'] = self.commit_map[bug_id]\n else:\n bug['commits'] = []\n\n for f in self.feature_extractors:\n res = f(bug)\n\n if res is None:\n continue\n\n if isinstance(res, list):\n for item in res:\n data[f.__class__.__name__ + '-' + item] = 'True'\n continue\n\n if isinstance(res, bool):\n res = str(res)\n\n data[f.__class__.__name__] = res\n\n # TODO: Try simply using all possible fields instead of extracting features manually.\n\n for cleanup_function in self.cleanup_functions:\n bug['summary'] = cleanup_function(bug['summary'])\n for c in bug['comments']:\n c['text'] = cleanup_function(c['text'])\n\n result = {\n 'data': data,\n 'title': bug['summary'],\n 'first_comment': bug['comments'][0]['text'],\n 'comments': ' '.join([c['text'] for c in bug['comments']]),\n }\n\n results.append(result)\n\n return pd.DataFrame(results)\n", "path": "bugbug/bug_features.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport re\nfrom datetime import datetime\nfrom datetime import timezone\n\nimport pandas as pd\nfrom libmozdata import versions\nfrom sklearn.base import BaseEstimator\nfrom sklearn.base import TransformerMixin\n\nfrom bugbug import bug_snapshot\nfrom bugbug import repository\n\n\ndef field(bug, field):\n if field in bug and bug[field] != '---':\n return bug[field]\n\n return None\n\n\nclass has_str(object):\n def __call__(self, bug):\n return field(bug, 'cf_has_str')\n\n\nclass has_regression_range(object):\n def __call__(self, bug):\n return field(bug, 'cf_has_regression_range')\n\n\nclass has_crash_signature(object):\n def __call__(self, bug):\n return 'cf_crash_signature' in bug and bug['cf_crash_signature'] != ''\n\n\nclass keywords(object):\n def __init__(self, to_ignore=set()):\n self.to_ignore = to_ignore\n\n def __call__(self, bug):\n keywords = []\n subkeywords = []\n for keyword in bug['keywords']:\n if keyword in self.to_ignore:\n continue\n\n keywords.append(keyword)\n\n if keyword.startswith('sec-'):\n subkeywords.append('sec-')\n elif keyword.startswith('csectype-'):\n subkeywords.append('csectype-')\n return keywords + subkeywords\n\n\nclass severity(object):\n def __call__(self, bug):\n return field(bug, 'severity')\n\n\nclass is_coverity_issue(object):\n def __call__(self, bug):\n return re.search('[CID ?[0-9]+]', bug['summary']) is not None or re.search('[CID ?[0-9]+]', bug['whiteboard']) is not None\n\n\nclass has_url(object):\n def __call__(self, bug):\n return bug['url'] != ''\n\n\nclass has_w3c_url(object):\n def __call__(self, bug):\n return 'w3c' in bug['url']\n\n\nclass has_github_url(object):\n def __call__(self, bug):\n return 'github' in bug['url']\n\n\nclass whiteboard(object):\n def __call__(self, bug):\n\n # Split by '['\n paren_splits = bug['whiteboard'].lower().split('[')\n\n # Split splits by space if they weren't in [ and ].\n splits = []\n for paren_split in paren_splits:\n if ']' in paren_split:\n paren_split = paren_split.split(']')\n splits += paren_split\n else:\n splits += paren_split.split(' ')\n\n # Remove empty splits and strip\n splits = [split.strip() for split in splits if split.strip() != '']\n\n # For splits which contain ':', return both the whole string and the string before ':'.\n splits += [split.split(':', 1)[0] for split in splits if ':' in split]\n\n return splits\n\n\nclass patches(object):\n def __call__(self, bug):\n return sum(1 for a in bug['attachments'] if a['is_patch'] or a['content_type'] in ['text/x-review-board-request', 'text/x-phabricator-request'])\n\n\nclass landings(object):\n def __call__(self, bug):\n return sum(1 for c in bug['comments'] if '://hg.mozilla.org/' in c['text'])\n\n\nclass title(object):\n def __call__(self, bug):\n ret = []\n\n keywords = [\n 'fail',\n ]\n for keyword in keywords:\n if keyword in bug['summary'].lower():\n ret.append(keyword)\n\n return ret\n\n\nclass product(object):\n def __call__(self, bug):\n return bug['product']\n\n\nclass component(object):\n def __call__(self, bug):\n return bug['component']\n\n\nclass is_mozillian(object):\n def __call__(self, bug):\n return any(bug['creator_detail']['email'].endswith(domain) for domain in ['@mozilla.com', '@mozilla.org'])\n\n\nclass delta_request_merge(object):\n def __call__(self, bug):\n for history in bug['history']:\n for change in history['changes']:\n if change['added'].startswith('approval-mozilla'):\n uplift_request_datetime = datetime.strptime(history['when'], '%Y-%m-%dT%H:%M:%SZ').replace(tzinfo=timezone.utc)\n timedelta = versions.getCloserRelease(uplift_request_datetime)[1] - uplift_request_datetime\n return timedelta.days + timedelta.seconds / (24 * 60 * 60)\n\n return None\n\n\nclass commit_added(object):\n def __call__(self, bug):\n return sum(commit['added'] for commit in bug['commits'])\n\n\nclass commit_deleted(object):\n def __call__(self, bug):\n return sum(commit['deleted'] for commit in bug['commits'])\n\n\nclass commit_types(object):\n def __call__(self, bug):\n return sum((commit['types'] for commit in bug['commits']), [])\n\n\nclass blocked_bugs_number(object):\n def __call__(self, bug):\n return len(bug['blocks'])\n\n\nclass priority(object):\n def __call__(self, bug):\n return bug['priority']\n\n\nclass bug_has_cve_in_alias(object):\n def __call__(self, bug):\n return bug['alias'] is not None and 'CVE' in bug['alias']\n\n\nclass commit_files_modified_num(object):\n def __call__(self, bug):\n return sum(commit['files_modified_num'] for commit in bug['commits'])\n\n\nclass comment_count(object):\n def __call__(self, bug):\n return field(bug, 'comment_count')\n\n\nclass comment_length(object):\n def __call__(self, bug):\n return sum(len(x['text']) for x in bug['comments'])\n\n\ndef cleanup_url(text):\n text = re.sub(r'http[s]?://(hg.mozilla|searchfox|dxr.mozilla)\\S+', '__CODE_REFERENCE_URL__', text)\n return re.sub(r'http\\S+', '__URL__', text)\n\n\ndef cleanup_fileref(text):\n return re.sub(r'\\w+\\.py\\b|\\w+\\.json\\b|\\w+\\.js\\b|\\w+\\.jsm\\b|\\w+\\.html\\b|\\w+\\.css\\b|\\w+\\.c\\b|\\w+\\.cpp\\b|\\w+\\.h\\b', '__FILE_REFERENCE__', text)\n\n\ndef cleanup_responses(text):\n return re.sub('>[^\\n]+', ' ', text)\n\n\ndef cleanup_hex(text):\n return re.sub(r'\\b0[xX][0-9a-fA-F]+\\b', '__HEX_NUMBER__', text)\n\n\ndef cleanup_dll(text):\n return re.sub(r'\\w+(\\.dll|\\.so|\\.dylib)\\b', '__DLL_NAME__', text)\n\n\ndef cleanup_synonyms(text):\n synonyms = [\n ('safemode', ['safemode', 'safe mode']),\n ('str', ['str', 'steps to reproduce', 'repro steps']),\n ('uaf', ['uaf', 'use after free', 'use-after-free']),\n ('asan', ['asan', 'address sanitizer', 'addresssanitizer']),\n ('permafailure', ['permafailure', 'permafailing', 'permafail', 'perma failure', 'perma failing', 'perma fail', 'perma-failure', 'perma-failing', 'perma-fail']),\n ('spec', ['spec', 'specification']),\n ]\n\n for synonym_group, synonym_list in synonyms:\n text = re.sub('|'.join(fr'\\b{synonym}\\b' for synonym in synonym_list), synonym_group, text, flags=re.IGNORECASE)\n\n return text\n\n\ndef cleanup_crash(text):\n return re.sub(r'bp-[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{6}[0-9]{6}\\b', '__CRASH_STATS_LINK__', text)\n\n\nclass BugExtractor(BaseEstimator, TransformerMixin):\n def __init__(self, feature_extractors, cleanup_functions, rollback=False, rollback_when=None, commit_data=False):\n self.feature_extractors = feature_extractors\n self.cleanup_functions = cleanup_functions\n self.rollback = rollback\n self.rollback_when = rollback_when\n self.commit_map = repository.get_commit_map() if commit_data else None\n\n def fit(self, x, y=None):\n return self\n\n def transform(self, bugs):\n results = []\n\n for bug in bugs:\n bug_id = bug['id']\n\n if self.rollback:\n bug = bug_snapshot.rollback(bug, self.rollback_when)\n\n data = {}\n\n if self.commit_map is not None:\n if bug_id in self.commit_map:\n bug['commits'] = self.commit_map[bug_id]\n else:\n bug['commits'] = []\n\n for f in self.feature_extractors:\n res = f(bug)\n\n if res is None:\n continue\n\n if isinstance(res, list):\n for item in res:\n data[f.__class__.__name__ + '-' + item] = 'True'\n continue\n\n if isinstance(res, bool):\n res = str(res)\n\n data[f.__class__.__name__] = res\n\n # TODO: Try simply using all possible fields instead of extracting features manually.\n\n for cleanup_function in self.cleanup_functions:\n bug['summary'] = cleanup_function(bug['summary'])\n for c in bug['comments']:\n c['text'] = cleanup_function(c['text'])\n\n result = {\n 'data': data,\n 'title': bug['summary'],\n 'first_comment': bug['comments'][0]['text'],\n 'comments': ' '.join([c['text'] for c in bug['comments']]),\n }\n\n results.append(result)\n\n return pd.DataFrame(results)\n", "path": "bugbug/bug_features.py"}]}
3,274
136
gh_patches_debug_9032
rasdani/github-patches
git_diff
scikit-hep__pyhf-101
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- speed up CI tests (do we need all conda packages?) By using Conda, unfortunately the setup phase of the CI jobs have become a bit slower than without conda, maybe we can look into speeding them up again by checking whether we need all the packages that we install during CI --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 from setuptools import setup, find_packages 2 setup( 3 name = 'pyhf', 4 version = '0.0.8', 5 description = '(partial) pure python histfactory implementation', 6 url = '', 7 author = 'Lukas Heinrich', 8 author_email = '[email protected]', 9 packages = find_packages(), 10 include_package_data = True, 11 install_requires = [ 12 'numpy', 13 'scipy' 14 ], 15 extras_require = { 16 'xmlimport': [ 17 'uproot', 18 ], 19 'torch': [ 20 'torch' 21 ], 22 'mxnet':[ 23 'mxnet', 24 ], 25 'develop': [ 26 'pyflakes', 27 'pytest>=3.2.0', 28 'pytest-cov>=2.5.1', 29 'pytest-benchmark[histogram]', 30 'python-coveralls', 31 'matplotlib', 32 'jupyter', 33 'uproot', 34 'papermill', 35 'torch', 36 'tensorflow', 37 'mxnet>=1.0.0', 38 'graphviz', 39 'sphinx', 40 'sphinxcontrib-napoleon', 41 'sphinx_rtd_theme', 42 'nbsphinx', 43 'jsonschema>=2.6.0' 44 ] 45 }, 46 entry_points = { 47 }, 48 dependency_links = [ 49 ] 50 ) 51 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -9,7 +9,7 @@ packages = find_packages(), include_package_data = True, install_requires = [ - 'numpy', + 'numpy>=1.14.3', 'scipy' ], extras_require = { @@ -24,7 +24,7 @@ ], 'develop': [ 'pyflakes', - 'pytest>=3.2.0', + 'pytest>=3.5.1', 'pytest-cov>=2.5.1', 'pytest-benchmark[histogram]', 'python-coveralls',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -9,7 +9,7 @@\n packages = find_packages(),\n include_package_data = True,\n install_requires = [\n- 'numpy',\n+ 'numpy>=1.14.3',\n 'scipy'\n ],\n extras_require = {\n@@ -24,7 +24,7 @@\n ],\n 'develop': [\n 'pyflakes',\n- 'pytest>=3.2.0',\n+ 'pytest>=3.5.1',\n 'pytest-cov>=2.5.1',\n 'pytest-benchmark[histogram]',\n 'python-coveralls',\n", "issue": "speed up CI tests (do we need all conda packages?)\nBy using Conda, unfortunately the setup phase of the CI jobs have become a bit slower than without conda, maybe we can look into speeding them up again by checking whether we need all the packages that we install during CI\n", "before_files": [{"content": "from setuptools import setup, find_packages\nsetup(\n name = 'pyhf',\n version = '0.0.8',\n description = '(partial) pure python histfactory implementation',\n url = '',\n author = 'Lukas Heinrich',\n author_email = '[email protected]',\n packages = find_packages(),\n include_package_data = True,\n install_requires = [\n 'numpy',\n 'scipy'\n ],\n extras_require = {\n 'xmlimport': [\n 'uproot',\n ],\n 'torch': [\n 'torch'\n ],\n 'mxnet':[\n 'mxnet',\n ],\n 'develop': [\n 'pyflakes',\n 'pytest>=3.2.0',\n 'pytest-cov>=2.5.1',\n 'pytest-benchmark[histogram]',\n 'python-coveralls',\n 'matplotlib',\n 'jupyter',\n 'uproot',\n 'papermill',\n 'torch',\n 'tensorflow',\n 'mxnet>=1.0.0',\n 'graphviz',\n 'sphinx',\n 'sphinxcontrib-napoleon',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'jsonschema>=2.6.0'\n ]\n },\n entry_points = {\n },\n dependency_links = [\n ]\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\nsetup(\n name = 'pyhf',\n version = '0.0.8',\n description = '(partial) pure python histfactory implementation',\n url = '',\n author = 'Lukas Heinrich',\n author_email = '[email protected]',\n packages = find_packages(),\n include_package_data = True,\n install_requires = [\n 'numpy>=1.14.3',\n 'scipy'\n ],\n extras_require = {\n 'xmlimport': [\n 'uproot',\n ],\n 'torch': [\n 'torch'\n ],\n 'mxnet':[\n 'mxnet',\n ],\n 'develop': [\n 'pyflakes',\n 'pytest>=3.5.1',\n 'pytest-cov>=2.5.1',\n 'pytest-benchmark[histogram]',\n 'python-coveralls',\n 'matplotlib',\n 'jupyter',\n 'uproot',\n 'papermill',\n 'torch',\n 'tensorflow',\n 'mxnet>=1.0.0',\n 'graphviz',\n 'sphinx',\n 'sphinxcontrib-napoleon',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'jsonschema>=2.6.0'\n ]\n },\n entry_points = {\n },\n dependency_links = [\n ]\n)\n", "path": "setup.py"}]}
696
151
gh_patches_debug_29080
rasdani/github-patches
git_diff
matrix-org__synapse-7506
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Implement room version 6 Will contain additional features: * #6898 * https://github.com/matrix-org/synapse/pull/7381 * #7501 Do not enable by default just yet. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `synapse/api/room_versions.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # Copyright 2019 New Vector Ltd 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 16 from typing import Dict 17 18 import attr 19 20 21 class EventFormatVersions(object): 22 """This is an internal enum for tracking the version of the event format, 23 independently from the room version. 24 """ 25 26 V1 = 1 # $id:server event id format 27 V2 = 2 # MSC1659-style $hash event id format: introduced for room v3 28 V3 = 3 # MSC1884-style $hash format: introduced for room v4 29 30 31 KNOWN_EVENT_FORMAT_VERSIONS = { 32 EventFormatVersions.V1, 33 EventFormatVersions.V2, 34 EventFormatVersions.V3, 35 } 36 37 38 class StateResolutionVersions(object): 39 """Enum to identify the state resolution algorithms""" 40 41 V1 = 1 # room v1 state res 42 V2 = 2 # MSC1442 state res: room v2 and later 43 44 45 class RoomDisposition(object): 46 STABLE = "stable" 47 UNSTABLE = "unstable" 48 49 50 @attr.s(slots=True, frozen=True) 51 class RoomVersion(object): 52 """An object which describes the unique attributes of a room version.""" 53 54 identifier = attr.ib() # str; the identifier for this version 55 disposition = attr.ib() # str; one of the RoomDispositions 56 event_format = attr.ib() # int; one of the EventFormatVersions 57 state_res = attr.ib() # int; one of the StateResolutionVersions 58 enforce_key_validity = attr.ib() # bool 59 60 # bool: before MSC2261/MSC2432, m.room.aliases had special auth rules and redaction rules 61 special_case_aliases_auth = attr.ib(type=bool) 62 # Strictly enforce canonicaljson, do not allow: 63 # * Integers outside the range of [-2 ^ 53 + 1, 2 ^ 53 - 1] 64 # * Floats 65 # * NaN, Infinity, -Infinity 66 strict_canonicaljson = attr.ib(type=bool) 67 # bool: MSC2209: Check 'notifications' key while verifying 68 # m.room.power_levels auth rules. 69 limit_notifications_power_levels = attr.ib(type=bool) 70 71 72 class RoomVersions(object): 73 V1 = RoomVersion( 74 "1", 75 RoomDisposition.STABLE, 76 EventFormatVersions.V1, 77 StateResolutionVersions.V1, 78 enforce_key_validity=False, 79 special_case_aliases_auth=True, 80 strict_canonicaljson=False, 81 limit_notifications_power_levels=False, 82 ) 83 V2 = RoomVersion( 84 "2", 85 RoomDisposition.STABLE, 86 EventFormatVersions.V1, 87 StateResolutionVersions.V2, 88 enforce_key_validity=False, 89 special_case_aliases_auth=True, 90 strict_canonicaljson=False, 91 limit_notifications_power_levels=False, 92 ) 93 V3 = RoomVersion( 94 "3", 95 RoomDisposition.STABLE, 96 EventFormatVersions.V2, 97 StateResolutionVersions.V2, 98 enforce_key_validity=False, 99 special_case_aliases_auth=True, 100 strict_canonicaljson=False, 101 limit_notifications_power_levels=False, 102 ) 103 V4 = RoomVersion( 104 "4", 105 RoomDisposition.STABLE, 106 EventFormatVersions.V3, 107 StateResolutionVersions.V2, 108 enforce_key_validity=False, 109 special_case_aliases_auth=True, 110 strict_canonicaljson=False, 111 limit_notifications_power_levels=False, 112 ) 113 V5 = RoomVersion( 114 "5", 115 RoomDisposition.STABLE, 116 EventFormatVersions.V3, 117 StateResolutionVersions.V2, 118 enforce_key_validity=True, 119 special_case_aliases_auth=True, 120 strict_canonicaljson=False, 121 limit_notifications_power_levels=False, 122 ) 123 MSC2432_DEV = RoomVersion( 124 "org.matrix.msc2432", 125 RoomDisposition.UNSTABLE, 126 EventFormatVersions.V3, 127 StateResolutionVersions.V2, 128 enforce_key_validity=True, 129 special_case_aliases_auth=False, 130 strict_canonicaljson=False, 131 limit_notifications_power_levels=False, 132 ) 133 STRICT_CANONICALJSON = RoomVersion( 134 "org.matrix.strict_canonicaljson", 135 RoomDisposition.UNSTABLE, 136 EventFormatVersions.V3, 137 StateResolutionVersions.V2, 138 enforce_key_validity=True, 139 special_case_aliases_auth=True, 140 strict_canonicaljson=True, 141 limit_notifications_power_levels=False, 142 ) 143 MSC2209_DEV = RoomVersion( 144 "org.matrix.msc2209", 145 RoomDisposition.UNSTABLE, 146 EventFormatVersions.V3, 147 StateResolutionVersions.V2, 148 enforce_key_validity=True, 149 special_case_aliases_auth=True, 150 strict_canonicaljson=False, 151 limit_notifications_power_levels=True, 152 ) 153 154 155 KNOWN_ROOM_VERSIONS = { 156 v.identifier: v 157 for v in ( 158 RoomVersions.V1, 159 RoomVersions.V2, 160 RoomVersions.V3, 161 RoomVersions.V4, 162 RoomVersions.V5, 163 RoomVersions.MSC2432_DEV, 164 RoomVersions.STRICT_CANONICALJSON, 165 RoomVersions.MSC2209_DEV, 166 ) 167 } # type: Dict[str, RoomVersion] 168 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py --- a/synapse/api/room_versions.py +++ b/synapse/api/room_versions.py @@ -120,34 +120,14 @@ strict_canonicaljson=False, limit_notifications_power_levels=False, ) - MSC2432_DEV = RoomVersion( - "org.matrix.msc2432", - RoomDisposition.UNSTABLE, + V6 = RoomVersion( + "6", + RoomDisposition.STABLE, EventFormatVersions.V3, StateResolutionVersions.V2, enforce_key_validity=True, special_case_aliases_auth=False, - strict_canonicaljson=False, - limit_notifications_power_levels=False, - ) - STRICT_CANONICALJSON = RoomVersion( - "org.matrix.strict_canonicaljson", - RoomDisposition.UNSTABLE, - EventFormatVersions.V3, - StateResolutionVersions.V2, - enforce_key_validity=True, - special_case_aliases_auth=True, strict_canonicaljson=True, - limit_notifications_power_levels=False, - ) - MSC2209_DEV = RoomVersion( - "org.matrix.msc2209", - RoomDisposition.UNSTABLE, - EventFormatVersions.V3, - StateResolutionVersions.V2, - enforce_key_validity=True, - special_case_aliases_auth=True, - strict_canonicaljson=False, limit_notifications_power_levels=True, ) @@ -160,8 +140,6 @@ RoomVersions.V3, RoomVersions.V4, RoomVersions.V5, - RoomVersions.MSC2432_DEV, - RoomVersions.STRICT_CANONICALJSON, - RoomVersions.MSC2209_DEV, + RoomVersions.V6, ) } # type: Dict[str, RoomVersion]
{"golden_diff": "diff --git a/synapse/api/room_versions.py b/synapse/api/room_versions.py\n--- a/synapse/api/room_versions.py\n+++ b/synapse/api/room_versions.py\n@@ -120,34 +120,14 @@\n strict_canonicaljson=False,\n limit_notifications_power_levels=False,\n )\n- MSC2432_DEV = RoomVersion(\n- \"org.matrix.msc2432\",\n- RoomDisposition.UNSTABLE,\n+ V6 = RoomVersion(\n+ \"6\",\n+ RoomDisposition.STABLE,\n EventFormatVersions.V3,\n StateResolutionVersions.V2,\n enforce_key_validity=True,\n special_case_aliases_auth=False,\n- strict_canonicaljson=False,\n- limit_notifications_power_levels=False,\n- )\n- STRICT_CANONICALJSON = RoomVersion(\n- \"org.matrix.strict_canonicaljson\",\n- RoomDisposition.UNSTABLE,\n- EventFormatVersions.V3,\n- StateResolutionVersions.V2,\n- enforce_key_validity=True,\n- special_case_aliases_auth=True,\n strict_canonicaljson=True,\n- limit_notifications_power_levels=False,\n- )\n- MSC2209_DEV = RoomVersion(\n- \"org.matrix.msc2209\",\n- RoomDisposition.UNSTABLE,\n- EventFormatVersions.V3,\n- StateResolutionVersions.V2,\n- enforce_key_validity=True,\n- special_case_aliases_auth=True,\n- strict_canonicaljson=False,\n limit_notifications_power_levels=True,\n )\n \n@@ -160,8 +140,6 @@\n RoomVersions.V3,\n RoomVersions.V4,\n RoomVersions.V5,\n- RoomVersions.MSC2432_DEV,\n- RoomVersions.STRICT_CANONICALJSON,\n- RoomVersions.MSC2209_DEV,\n+ RoomVersions.V6,\n )\n } # type: Dict[str, RoomVersion]\n", "issue": "Implement room version 6\nWill contain additional features:\r\n* #6898\r\n* https://github.com/matrix-org/synapse/pull/7381\r\n* #7501\r\n\r\nDo not enable by default just yet.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2019 New Vector Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Dict\n\nimport attr\n\n\nclass EventFormatVersions(object):\n \"\"\"This is an internal enum for tracking the version of the event format,\n independently from the room version.\n \"\"\"\n\n V1 = 1 # $id:server event id format\n V2 = 2 # MSC1659-style $hash event id format: introduced for room v3\n V3 = 3 # MSC1884-style $hash format: introduced for room v4\n\n\nKNOWN_EVENT_FORMAT_VERSIONS = {\n EventFormatVersions.V1,\n EventFormatVersions.V2,\n EventFormatVersions.V3,\n}\n\n\nclass StateResolutionVersions(object):\n \"\"\"Enum to identify the state resolution algorithms\"\"\"\n\n V1 = 1 # room v1 state res\n V2 = 2 # MSC1442 state res: room v2 and later\n\n\nclass RoomDisposition(object):\n STABLE = \"stable\"\n UNSTABLE = \"unstable\"\n\n\[email protected](slots=True, frozen=True)\nclass RoomVersion(object):\n \"\"\"An object which describes the unique attributes of a room version.\"\"\"\n\n identifier = attr.ib() # str; the identifier for this version\n disposition = attr.ib() # str; one of the RoomDispositions\n event_format = attr.ib() # int; one of the EventFormatVersions\n state_res = attr.ib() # int; one of the StateResolutionVersions\n enforce_key_validity = attr.ib() # bool\n\n # bool: before MSC2261/MSC2432, m.room.aliases had special auth rules and redaction rules\n special_case_aliases_auth = attr.ib(type=bool)\n # Strictly enforce canonicaljson, do not allow:\n # * Integers outside the range of [-2 ^ 53 + 1, 2 ^ 53 - 1]\n # * Floats\n # * NaN, Infinity, -Infinity\n strict_canonicaljson = attr.ib(type=bool)\n # bool: MSC2209: Check 'notifications' key while verifying\n # m.room.power_levels auth rules.\n limit_notifications_power_levels = attr.ib(type=bool)\n\n\nclass RoomVersions(object):\n V1 = RoomVersion(\n \"1\",\n RoomDisposition.STABLE,\n EventFormatVersions.V1,\n StateResolutionVersions.V1,\n enforce_key_validity=False,\n special_case_aliases_auth=True,\n strict_canonicaljson=False,\n limit_notifications_power_levels=False,\n )\n V2 = RoomVersion(\n \"2\",\n RoomDisposition.STABLE,\n EventFormatVersions.V1,\n StateResolutionVersions.V2,\n enforce_key_validity=False,\n special_case_aliases_auth=True,\n strict_canonicaljson=False,\n limit_notifications_power_levels=False,\n )\n V3 = RoomVersion(\n \"3\",\n RoomDisposition.STABLE,\n EventFormatVersions.V2,\n StateResolutionVersions.V2,\n enforce_key_validity=False,\n special_case_aliases_auth=True,\n strict_canonicaljson=False,\n limit_notifications_power_levels=False,\n )\n V4 = RoomVersion(\n \"4\",\n RoomDisposition.STABLE,\n EventFormatVersions.V3,\n StateResolutionVersions.V2,\n enforce_key_validity=False,\n special_case_aliases_auth=True,\n strict_canonicaljson=False,\n limit_notifications_power_levels=False,\n )\n V5 = RoomVersion(\n \"5\",\n RoomDisposition.STABLE,\n EventFormatVersions.V3,\n StateResolutionVersions.V2,\n enforce_key_validity=True,\n special_case_aliases_auth=True,\n strict_canonicaljson=False,\n limit_notifications_power_levels=False,\n )\n MSC2432_DEV = RoomVersion(\n \"org.matrix.msc2432\",\n RoomDisposition.UNSTABLE,\n EventFormatVersions.V3,\n StateResolutionVersions.V2,\n enforce_key_validity=True,\n special_case_aliases_auth=False,\n strict_canonicaljson=False,\n limit_notifications_power_levels=False,\n )\n STRICT_CANONICALJSON = RoomVersion(\n \"org.matrix.strict_canonicaljson\",\n RoomDisposition.UNSTABLE,\n EventFormatVersions.V3,\n StateResolutionVersions.V2,\n enforce_key_validity=True,\n special_case_aliases_auth=True,\n strict_canonicaljson=True,\n limit_notifications_power_levels=False,\n )\n MSC2209_DEV = RoomVersion(\n \"org.matrix.msc2209\",\n RoomDisposition.UNSTABLE,\n EventFormatVersions.V3,\n StateResolutionVersions.V2,\n enforce_key_validity=True,\n special_case_aliases_auth=True,\n strict_canonicaljson=False,\n limit_notifications_power_levels=True,\n )\n\n\nKNOWN_ROOM_VERSIONS = {\n v.identifier: v\n for v in (\n RoomVersions.V1,\n RoomVersions.V2,\n RoomVersions.V3,\n RoomVersions.V4,\n RoomVersions.V5,\n RoomVersions.MSC2432_DEV,\n RoomVersions.STRICT_CANONICALJSON,\n RoomVersions.MSC2209_DEV,\n )\n} # type: Dict[str, RoomVersion]\n", "path": "synapse/api/room_versions.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2019 New Vector Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Dict\n\nimport attr\n\n\nclass EventFormatVersions(object):\n \"\"\"This is an internal enum for tracking the version of the event format,\n independently from the room version.\n \"\"\"\n\n V1 = 1 # $id:server event id format\n V2 = 2 # MSC1659-style $hash event id format: introduced for room v3\n V3 = 3 # MSC1884-style $hash format: introduced for room v4\n\n\nKNOWN_EVENT_FORMAT_VERSIONS = {\n EventFormatVersions.V1,\n EventFormatVersions.V2,\n EventFormatVersions.V3,\n}\n\n\nclass StateResolutionVersions(object):\n \"\"\"Enum to identify the state resolution algorithms\"\"\"\n\n V1 = 1 # room v1 state res\n V2 = 2 # MSC1442 state res: room v2 and later\n\n\nclass RoomDisposition(object):\n STABLE = \"stable\"\n UNSTABLE = \"unstable\"\n\n\[email protected](slots=True, frozen=True)\nclass RoomVersion(object):\n \"\"\"An object which describes the unique attributes of a room version.\"\"\"\n\n identifier = attr.ib() # str; the identifier for this version\n disposition = attr.ib() # str; one of the RoomDispositions\n event_format = attr.ib() # int; one of the EventFormatVersions\n state_res = attr.ib() # int; one of the StateResolutionVersions\n enforce_key_validity = attr.ib() # bool\n\n # bool: before MSC2261/MSC2432, m.room.aliases had special auth rules and redaction rules\n special_case_aliases_auth = attr.ib(type=bool)\n # Strictly enforce canonicaljson, do not allow:\n # * Integers outside the range of [-2 ^ 53 + 1, 2 ^ 53 - 1]\n # * Floats\n # * NaN, Infinity, -Infinity\n strict_canonicaljson = attr.ib(type=bool)\n # bool: MSC2209: Check 'notifications' key while verifying\n # m.room.power_levels auth rules.\n limit_notifications_power_levels = attr.ib(type=bool)\n\n\nclass RoomVersions(object):\n V1 = RoomVersion(\n \"1\",\n RoomDisposition.STABLE,\n EventFormatVersions.V1,\n StateResolutionVersions.V1,\n enforce_key_validity=False,\n special_case_aliases_auth=True,\n strict_canonicaljson=False,\n limit_notifications_power_levels=False,\n )\n V2 = RoomVersion(\n \"2\",\n RoomDisposition.STABLE,\n EventFormatVersions.V1,\n StateResolutionVersions.V2,\n enforce_key_validity=False,\n special_case_aliases_auth=True,\n strict_canonicaljson=False,\n limit_notifications_power_levels=False,\n )\n V3 = RoomVersion(\n \"3\",\n RoomDisposition.STABLE,\n EventFormatVersions.V2,\n StateResolutionVersions.V2,\n enforce_key_validity=False,\n special_case_aliases_auth=True,\n strict_canonicaljson=False,\n limit_notifications_power_levels=False,\n )\n V4 = RoomVersion(\n \"4\",\n RoomDisposition.STABLE,\n EventFormatVersions.V3,\n StateResolutionVersions.V2,\n enforce_key_validity=False,\n special_case_aliases_auth=True,\n strict_canonicaljson=False,\n limit_notifications_power_levels=False,\n )\n V5 = RoomVersion(\n \"5\",\n RoomDisposition.STABLE,\n EventFormatVersions.V3,\n StateResolutionVersions.V2,\n enforce_key_validity=True,\n special_case_aliases_auth=True,\n strict_canonicaljson=False,\n limit_notifications_power_levels=False,\n )\n V6 = RoomVersion(\n \"6\",\n RoomDisposition.STABLE,\n EventFormatVersions.V3,\n StateResolutionVersions.V2,\n enforce_key_validity=True,\n special_case_aliases_auth=False,\n strict_canonicaljson=True,\n limit_notifications_power_levels=True,\n )\n\n\nKNOWN_ROOM_VERSIONS = {\n v.identifier: v\n for v in (\n RoomVersions.V1,\n RoomVersions.V2,\n RoomVersions.V3,\n RoomVersions.V4,\n RoomVersions.V5,\n RoomVersions.V6,\n )\n} # type: Dict[str, RoomVersion]\n", "path": "synapse/api/room_versions.py"}]}
1,953
423
gh_patches_debug_15497
rasdani/github-patches
git_diff
ipython__ipython-4363
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `?` may generate hundreds of cell By mistake I have executed a cell like ``` for i in range(3): x= range? ``` but with ~70 instead of 3 which generated 70 code cell with just `x= range` in it... it was _really_ painfull to clean, it would be nice to prevent something like that --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `IPython/core/payload.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 """Payload system for IPython. 3 4 Authors: 5 6 * Fernando Perez 7 * Brian Granger 8 """ 9 10 #----------------------------------------------------------------------------- 11 # Copyright (C) 2008-2011 The IPython Development Team 12 # 13 # Distributed under the terms of the BSD License. The full license is in 14 # the file COPYING, distributed as part of this software. 15 #----------------------------------------------------------------------------- 16 17 #----------------------------------------------------------------------------- 18 # Imports 19 #----------------------------------------------------------------------------- 20 21 from IPython.config.configurable import Configurable 22 from IPython.utils.traitlets import List 23 24 #----------------------------------------------------------------------------- 25 # Main payload class 26 #----------------------------------------------------------------------------- 27 28 class PayloadManager(Configurable): 29 30 _payload = List([]) 31 32 def write_payload(self, data): 33 if not isinstance(data, dict): 34 raise TypeError('Each payload write must be a dict, got: %r' % data) 35 self._payload.append(data) 36 37 def read_payload(self): 38 return self._payload 39 40 def clear_payload(self): 41 self._payload = [] 42 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/IPython/core/payload.py b/IPython/core/payload.py --- a/IPython/core/payload.py +++ b/IPython/core/payload.py @@ -29,9 +29,23 @@ _payload = List([]) - def write_payload(self, data): + def write_payload(self, data, single=True): + """Include or update the specified `data` payload in the PayloadManager. + + If a previous payload with the same source exists and `single` is True, + it will be overwritten with the new one. + """ + if not isinstance(data, dict): raise TypeError('Each payload write must be a dict, got: %r' % data) + + if single and 'source' in data: + source = data['source'] + for i, pl in enumerate(self._payload): + if 'source' in pl and pl['source'] == source: + self._payload[i] = data + return + self._payload.append(data) def read_payload(self):
{"golden_diff": "diff --git a/IPython/core/payload.py b/IPython/core/payload.py\n--- a/IPython/core/payload.py\n+++ b/IPython/core/payload.py\n@@ -29,9 +29,23 @@\n \n _payload = List([])\n \n- def write_payload(self, data):\n+ def write_payload(self, data, single=True):\n+ \"\"\"Include or update the specified `data` payload in the PayloadManager.\n+\n+ If a previous payload with the same source exists and `single` is True,\n+ it will be overwritten with the new one.\n+ \"\"\"\n+\n if not isinstance(data, dict):\n raise TypeError('Each payload write must be a dict, got: %r' % data)\n+\n+ if single and 'source' in data:\n+ source = data['source']\n+ for i, pl in enumerate(self._payload):\n+ if 'source' in pl and pl['source'] == source:\n+ self._payload[i] = data\n+ return\n+\n self._payload.append(data)\n \n def read_payload(self):\n", "issue": "`?` may generate hundreds of cell \nBy mistake I have executed a cell like \r\n\r\n```\r\nfor i in range(3):\r\n x= range?\r\n```\r\n\r\nbut with ~70 instead of 3\r\nwhich generated 70 code cell with just `x= range` in it...\r\nit was _really_ painfull to clean, it would be nice to prevent something like that\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Payload system for IPython.\n\nAuthors:\n\n* Fernando Perez\n* Brian Granger\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (C) 2008-2011 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file COPYING, distributed as part of this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\nfrom IPython.config.configurable import Configurable\nfrom IPython.utils.traitlets import List\n\n#-----------------------------------------------------------------------------\n# Main payload class\n#-----------------------------------------------------------------------------\n\nclass PayloadManager(Configurable):\n\n _payload = List([])\n\n def write_payload(self, data):\n if not isinstance(data, dict):\n raise TypeError('Each payload write must be a dict, got: %r' % data)\n self._payload.append(data)\n\n def read_payload(self):\n return self._payload\n\n def clear_payload(self):\n self._payload = []\n", "path": "IPython/core/payload.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Payload system for IPython.\n\nAuthors:\n\n* Fernando Perez\n* Brian Granger\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (C) 2008-2011 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file COPYING, distributed as part of this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\nfrom IPython.config.configurable import Configurable\nfrom IPython.utils.traitlets import List\n\n#-----------------------------------------------------------------------------\n# Main payload class\n#-----------------------------------------------------------------------------\n\nclass PayloadManager(Configurable):\n\n _payload = List([])\n\n def write_payload(self, data, single=True):\n \"\"\"Include or update the specified `data` payload in the PayloadManager.\n\n If a previous payload with the same source exists and `single` is True,\n it will be overwritten with the new one.\n \"\"\"\n\n if not isinstance(data, dict):\n raise TypeError('Each payload write must be a dict, got: %r' % data)\n\n if single and 'source' in data:\n source = data['source']\n for i, pl in enumerate(self._payload):\n if 'source' in pl and pl['source'] == source:\n self._payload[i] = data\n return\n\n self._payload.append(data)\n\n def read_payload(self):\n return self._payload\n\n def clear_payload(self):\n self._payload = []\n", "path": "IPython/core/payload.py"}]}
635
234
gh_patches_debug_32362
rasdani/github-patches
git_diff
ansible__ansible-23872
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ec2_facts throws str error in Python 3.5+ <!--- Verify first that your issue/request is not already reported on GitHub. Also test if the latest release, and master branch are affected too. --> ##### ISSUE TYPE <!--- Pick one below and delete the rest: --> - Bug Report ##### COMPONENT NAME ec2_facts ##### ANSIBLE VERSION <!--- Paste verbatim output from “ansible --version” between quotes below --> ``` ansible 2.3.0.0 config file = /etc/ansible/ansible.cfg configured module search path = Default w/o overrides python version = 3.5.2 (default, Nov 17 2016, 17:05:23) [GCC 5.4.0 20160609] ``` ##### CONFIGURATION ``` # inventory file that I use via ansible-playbook -i [all] localhost [all:vars] ansible_python_interpreter=/usr/bin/python3 ``` ##### OS / ENVIRONMENT Ubuntu 16.04.2 LTS Python 3.5.2 ##### SUMMARY When trying to run `ec2_facts`, I get this error: `TypeError: a bytes-like object is required, not 'str'` <!--- Explain the problem briefly --> ##### STEPS TO REPRODUCE <!--- For bugs, show exactly how to reproduce the problem, using a minimal test-case. For new features, show how the feature would be used. --> <!--- Paste example playbooks or commands between quotes below --> ```yaml # in any playbook - name: Gather facts action: ec2_facts ``` <!--- You can also paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- What did you expect to happen when running the steps above? --> That ec2_facts would work. ##### ACTUAL RESULTS <!--- What actually happened? If possible run with extra verbosity (-vvvv) --> It errors out with `a bytes-like object is required, not 'str'` Just as a note, I thought this was connected to #17038, but perhaps not. ec2_facts throws str error in Python 3.5+ <!--- Verify first that your issue/request is not already reported on GitHub. Also test if the latest release, and master branch are affected too. --> ##### ISSUE TYPE <!--- Pick one below and delete the rest: --> - Bug Report ##### COMPONENT NAME ec2_facts ##### ANSIBLE VERSION <!--- Paste verbatim output from “ansible --version” between quotes below --> ``` ansible 2.3.0.0 config file = /etc/ansible/ansible.cfg configured module search path = Default w/o overrides python version = 3.5.2 (default, Nov 17 2016, 17:05:23) [GCC 5.4.0 20160609] ``` ##### CONFIGURATION ``` # inventory file that I use via ansible-playbook -i [all] localhost [all:vars] ansible_python_interpreter=/usr/bin/python3 ``` ##### OS / ENVIRONMENT Ubuntu 16.04.2 LTS Python 3.5.2 ##### SUMMARY When trying to run `ec2_facts`, I get this error: `TypeError: a bytes-like object is required, not 'str'` <!--- Explain the problem briefly --> ##### STEPS TO REPRODUCE <!--- For bugs, show exactly how to reproduce the problem, using a minimal test-case. For new features, show how the feature would be used. --> <!--- Paste example playbooks or commands between quotes below --> ```yaml # in any playbook - name: Gather facts action: ec2_facts ``` <!--- You can also paste gist.github.com links for larger files --> ##### EXPECTED RESULTS <!--- What did you expect to happen when running the steps above? --> That ec2_facts would work. ##### ACTUAL RESULTS <!--- What actually happened? If possible run with extra verbosity (-vvvv) --> It errors out with `a bytes-like object is required, not 'str'` Just as a note, I thought this was connected to #17038, but perhaps not. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lib/ansible/modules/cloud/amazon/ec2_facts.py` Content: ``` 1 #!/usr/bin/python 2 # -*- coding: utf-8 -*- 3 4 # This file is part of Ansible 5 # 6 # Ansible is free software: you can redistribute it and/or modify 7 # it under the terms of the GNU General Public License as published by 8 # the Free Software Foundation, either version 3 of the License, or 9 # (at your option) any later version. 10 # 11 # Ansible is distributed in the hope that it will be useful, 12 # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 # GNU General Public License for more details. 15 # 16 # You should have received a copy of the GNU General Public License 17 # along with Ansible. If not, see <http://www.gnu.org/licenses/>. 18 19 ANSIBLE_METADATA = {'metadata_version': '1.0', 20 'status': ['stableinterface'], 21 'supported_by': 'curated'} 22 23 24 DOCUMENTATION = ''' 25 --- 26 module: ec2_facts 27 short_description: Gathers facts about remote hosts within ec2 (aws) 28 version_added: "1.0" 29 options: 30 validate_certs: 31 description: 32 - If C(no), SSL certificates will not be validated. This should only be used 33 on personally controlled sites using self-signed certificates. 34 required: false 35 default: 'yes' 36 choices: ['yes', 'no'] 37 version_added: '1.5.1' 38 description: 39 - This module fetches data from the metadata servers in ec2 (aws) as per 40 http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html. 41 The module must be called from within the EC2 instance itself. 42 notes: 43 - Parameters to filter on ec2_facts may be added later. 44 author: "Silviu Dicu (@silviud) <[email protected]>" 45 ''' 46 47 EXAMPLES = ''' 48 # Conditional example 49 - name: Gather facts 50 ec2_facts: 51 52 - name: Conditional 53 debug: 54 msg: "This instance is a t1.micro" 55 when: ansible_ec2_instance_type == "t1.micro" 56 ''' 57 58 import socket 59 import re 60 61 socket.setdefaulttimeout(5) 62 63 64 class Ec2Metadata(object): 65 ec2_metadata_uri = 'http://169.254.169.254/latest/meta-data/' 66 ec2_sshdata_uri = 'http://169.254.169.254/latest/meta-data/public-keys/0/openssh-key' 67 ec2_userdata_uri = 'http://169.254.169.254/latest/user-data/' 68 69 AWS_REGIONS = ('ap-northeast-1', 70 'ap-northeast-2', 71 'ap-south-1', 72 'ap-southeast-1', 73 'ap-southeast-2', 74 'ca-central-1', 75 'eu-central-1', 76 'eu-west-1', 77 'eu-west-2', 78 'sa-east-1', 79 'us-east-1', 80 'us-east-2', 81 'us-west-1', 82 'us-west-2', 83 'us-gov-west-1', 84 ) 85 86 def __init__(self, module, ec2_metadata_uri=None, ec2_sshdata_uri=None, ec2_userdata_uri=None): 87 self.module = module 88 self.uri_meta = ec2_metadata_uri or self.ec2_metadata_uri 89 self.uri_user = ec2_userdata_uri or self.ec2_userdata_uri 90 self.uri_ssh = ec2_sshdata_uri or self.ec2_sshdata_uri 91 self._data = {} 92 self._prefix = 'ansible_ec2_%s' 93 94 def _fetch(self, url): 95 (response, info) = fetch_url(self.module, url, force=True) 96 if response: 97 data = response.read() 98 else: 99 data = None 100 return data 101 102 def _mangle_fields(self, fields, uri, filter_patterns=['public-keys-0']): 103 new_fields = {} 104 for key, value in fields.items(): 105 split_fields = key[len(uri):].split('/') 106 if len(split_fields) > 1 and split_fields[1]: 107 new_key = "-".join(split_fields) 108 new_fields[self._prefix % new_key] = value 109 else: 110 new_key = "".join(split_fields) 111 new_fields[self._prefix % new_key] = value 112 for pattern in filter_patterns: 113 for key in new_fields.keys(): 114 match = re.search(pattern, key) 115 if match: 116 new_fields.pop(key) 117 return new_fields 118 119 def fetch(self, uri, recurse=True): 120 raw_subfields = self._fetch(uri) 121 if not raw_subfields: 122 return 123 subfields = raw_subfields.split('\n') 124 for field in subfields: 125 if field.endswith('/') and recurse: 126 self.fetch(uri + field) 127 if uri.endswith('/'): 128 new_uri = uri + field 129 else: 130 new_uri = uri + '/' + field 131 if new_uri not in self._data and not new_uri.endswith('/'): 132 content = self._fetch(new_uri) 133 if field == 'security-groups': 134 sg_fields = ",".join(content.split('\n')) 135 self._data['%s' % (new_uri)] = sg_fields 136 else: 137 self._data['%s' % (new_uri)] = content 138 139 def fix_invalid_varnames(self, data): 140 """Change ':'' and '-' to '_' to ensure valid template variable names""" 141 for (key, value) in data.items(): 142 if ':' in key or '-' in key: 143 newkey = key.replace(':', '_').replace('-', '_') 144 del data[key] 145 data[newkey] = value 146 147 def add_ec2_region(self, data): 148 """Use the 'ansible_ec2_placement_availability_zone' key/value 149 pair to add 'ansible_ec2_placement_region' key/value pair with 150 the EC2 region name. 151 """ 152 153 # Only add a 'ansible_ec2_placement_region' key if the 154 # 'ansible_ec2_placement_availability_zone' exists. 155 zone = data.get('ansible_ec2_placement_availability_zone') 156 if zone is not None: 157 # Use the zone name as the region name unless the zone 158 # name starts with a known AWS region name. 159 region = zone 160 for r in self.AWS_REGIONS: 161 if zone.startswith(r): 162 region = r 163 break 164 data['ansible_ec2_placement_region'] = region 165 166 def run(self): 167 self.fetch(self.uri_meta) # populate _data 168 data = self._mangle_fields(self._data, self.uri_meta) 169 data[self._prefix % 'user-data'] = self._fetch(self.uri_user) 170 data[self._prefix % 'public-key'] = self._fetch(self.uri_ssh) 171 self.fix_invalid_varnames(data) 172 self.add_ec2_region(data) 173 return data 174 175 176 def main(): 177 argument_spec = url_argument_spec() 178 179 module = AnsibleModule( 180 argument_spec=argument_spec, 181 supports_check_mode=True, 182 ) 183 184 ec2_facts = Ec2Metadata(module).run() 185 ec2_facts_result = dict(changed=False, ansible_facts=ec2_facts) 186 187 module.exit_json(**ec2_facts_result) 188 189 190 # import module snippets 191 from ansible.module_utils.basic import * 192 from ansible.module_utils.urls import * 193 194 if __name__ == '__main__': 195 main() 196 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lib/ansible/modules/cloud/amazon/ec2_facts.py b/lib/ansible/modules/cloud/amazon/ec2_facts.py --- a/lib/ansible/modules/cloud/amazon/ec2_facts.py +++ b/lib/ansible/modules/cloud/amazon/ec2_facts.py @@ -57,7 +57,7 @@ import socket import re - +from ansible.module_utils._text import to_text socket.setdefaulttimeout(5) @@ -97,7 +97,7 @@ data = response.read() else: data = None - return data + return to_text(data) def _mangle_fields(self, fields, uri, filter_patterns=['public-keys-0']): new_fields = {} @@ -110,7 +110,7 @@ new_key = "".join(split_fields) new_fields[self._prefix % new_key] = value for pattern in filter_patterns: - for key in new_fields.keys(): + for key in dict(new_fields): match = re.search(pattern, key) if match: new_fields.pop(key) @@ -138,11 +138,10 @@ def fix_invalid_varnames(self, data): """Change ':'' and '-' to '_' to ensure valid template variable names""" - for (key, value) in data.items(): + for key in data: if ':' in key or '-' in key: newkey = key.replace(':', '_').replace('-', '_') - del data[key] - data[newkey] = value + data[newkey] = data.pop(key) def add_ec2_region(self, data): """Use the 'ansible_ec2_placement_availability_zone' key/value
{"golden_diff": "diff --git a/lib/ansible/modules/cloud/amazon/ec2_facts.py b/lib/ansible/modules/cloud/amazon/ec2_facts.py\n--- a/lib/ansible/modules/cloud/amazon/ec2_facts.py\n+++ b/lib/ansible/modules/cloud/amazon/ec2_facts.py\n@@ -57,7 +57,7 @@\n \n import socket\n import re\n-\n+from ansible.module_utils._text import to_text\n socket.setdefaulttimeout(5)\n \n \n@@ -97,7 +97,7 @@\n data = response.read()\n else:\n data = None\n- return data\n+ return to_text(data)\n \n def _mangle_fields(self, fields, uri, filter_patterns=['public-keys-0']):\n new_fields = {}\n@@ -110,7 +110,7 @@\n new_key = \"\".join(split_fields)\n new_fields[self._prefix % new_key] = value\n for pattern in filter_patterns:\n- for key in new_fields.keys():\n+ for key in dict(new_fields):\n match = re.search(pattern, key)\n if match:\n new_fields.pop(key)\n@@ -138,11 +138,10 @@\n \n def fix_invalid_varnames(self, data):\n \"\"\"Change ':'' and '-' to '_' to ensure valid template variable names\"\"\"\n- for (key, value) in data.items():\n+ for key in data:\n if ':' in key or '-' in key:\n newkey = key.replace(':', '_').replace('-', '_')\n- del data[key]\n- data[newkey] = value\n+ data[newkey] = data.pop(key)\n \n def add_ec2_region(self, data):\n \"\"\"Use the 'ansible_ec2_placement_availability_zone' key/value\n", "issue": "ec2_facts throws str error in Python 3.5+\n<!---\r\nVerify first that your issue/request is not already reported on GitHub.\r\nAlso test if the latest release, and master branch are affected too.\r\n-->\r\n\r\n##### ISSUE TYPE\r\n<!--- Pick one below and delete the rest: -->\r\n - Bug Report\r\n\r\n##### COMPONENT NAME\r\nec2_facts\r\n\r\n##### ANSIBLE VERSION\r\n<!--- Paste verbatim output from \u201cansible --version\u201d between quotes below -->\r\n```\r\nansible 2.3.0.0\r\n config file = /etc/ansible/ansible.cfg\r\n configured module search path = Default w/o overrides\r\n python version = 3.5.2 (default, Nov 17 2016, 17:05:23) [GCC 5.4.0 20160609]\r\n```\r\n\r\n##### CONFIGURATION\r\n```\r\n# inventory file that I use via ansible-playbook -i\r\n[all]\r\nlocalhost\r\n\r\n[all:vars]\r\nansible_python_interpreter=/usr/bin/python3\r\n```\r\n\r\n##### OS / ENVIRONMENT\r\nUbuntu 16.04.2 LTS\r\nPython 3.5.2\r\n\r\n##### SUMMARY\r\nWhen trying to run `ec2_facts`, I get this error: `TypeError: a bytes-like object is required, not 'str'`\r\n<!--- Explain the problem briefly -->\r\n\r\n##### STEPS TO REPRODUCE\r\n<!---\r\nFor bugs, show exactly how to reproduce the problem, using a minimal test-case.\r\nFor new features, show how the feature would be used.\r\n-->\r\n\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml\r\n# in any playbook\r\n - name: Gather facts\r\n action: ec2_facts\r\n```\r\n\r\n<!--- You can also paste gist.github.com links for larger files -->\r\n\r\n##### EXPECTED RESULTS\r\n<!--- What did you expect to happen when running the steps above? -->\r\nThat ec2_facts would work.\r\n\r\n##### ACTUAL RESULTS\r\n<!--- What actually happened? If possible run with extra verbosity (-vvvv) -->\r\nIt errors out with `a bytes-like object is required, not 'str'`\r\n\r\nJust as a note, I thought this was connected to #17038, but perhaps not.\nec2_facts throws str error in Python 3.5+\n<!---\r\nVerify first that your issue/request is not already reported on GitHub.\r\nAlso test if the latest release, and master branch are affected too.\r\n-->\r\n\r\n##### ISSUE TYPE\r\n<!--- Pick one below and delete the rest: -->\r\n - Bug Report\r\n\r\n##### COMPONENT NAME\r\nec2_facts\r\n\r\n##### ANSIBLE VERSION\r\n<!--- Paste verbatim output from \u201cansible --version\u201d between quotes below -->\r\n```\r\nansible 2.3.0.0\r\n config file = /etc/ansible/ansible.cfg\r\n configured module search path = Default w/o overrides\r\n python version = 3.5.2 (default, Nov 17 2016, 17:05:23) [GCC 5.4.0 20160609]\r\n```\r\n\r\n##### CONFIGURATION\r\n```\r\n# inventory file that I use via ansible-playbook -i\r\n[all]\r\nlocalhost\r\n\r\n[all:vars]\r\nansible_python_interpreter=/usr/bin/python3\r\n```\r\n\r\n##### OS / ENVIRONMENT\r\nUbuntu 16.04.2 LTS\r\nPython 3.5.2\r\n\r\n##### SUMMARY\r\nWhen trying to run `ec2_facts`, I get this error: `TypeError: a bytes-like object is required, not 'str'`\r\n<!--- Explain the problem briefly -->\r\n\r\n##### STEPS TO REPRODUCE\r\n<!---\r\nFor bugs, show exactly how to reproduce the problem, using a minimal test-case.\r\nFor new features, show how the feature would be used.\r\n-->\r\n\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml\r\n# in any playbook\r\n - name: Gather facts\r\n action: ec2_facts\r\n```\r\n\r\n<!--- You can also paste gist.github.com links for larger files -->\r\n\r\n##### EXPECTED RESULTS\r\n<!--- What did you expect to happen when running the steps above? -->\r\nThat ec2_facts would work.\r\n\r\n##### ACTUAL RESULTS\r\n<!--- What actually happened? If possible run with extra verbosity (-vvvv) -->\r\nIt errors out with `a bytes-like object is required, not 'str'`\r\n\r\nJust as a note, I thought this was connected to #17038, but perhaps not.\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\nANSIBLE_METADATA = {'metadata_version': '1.0',\n 'status': ['stableinterface'],\n 'supported_by': 'curated'}\n\n\nDOCUMENTATION = '''\n---\nmodule: ec2_facts\nshort_description: Gathers facts about remote hosts within ec2 (aws)\nversion_added: \"1.0\"\noptions:\n validate_certs:\n description:\n - If C(no), SSL certificates will not be validated. This should only be used\n on personally controlled sites using self-signed certificates.\n required: false\n default: 'yes'\n choices: ['yes', 'no']\n version_added: '1.5.1'\ndescription:\n - This module fetches data from the metadata servers in ec2 (aws) as per\n http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html.\n The module must be called from within the EC2 instance itself.\nnotes:\n - Parameters to filter on ec2_facts may be added later.\nauthor: \"Silviu Dicu (@silviud) <[email protected]>\"\n'''\n\nEXAMPLES = '''\n# Conditional example\n- name: Gather facts\n ec2_facts:\n\n- name: Conditional\n debug:\n msg: \"This instance is a t1.micro\"\n when: ansible_ec2_instance_type == \"t1.micro\"\n'''\n\nimport socket\nimport re\n\nsocket.setdefaulttimeout(5)\n\n\nclass Ec2Metadata(object):\n ec2_metadata_uri = 'http://169.254.169.254/latest/meta-data/'\n ec2_sshdata_uri = 'http://169.254.169.254/latest/meta-data/public-keys/0/openssh-key'\n ec2_userdata_uri = 'http://169.254.169.254/latest/user-data/'\n\n AWS_REGIONS = ('ap-northeast-1',\n 'ap-northeast-2',\n 'ap-south-1',\n 'ap-southeast-1',\n 'ap-southeast-2',\n 'ca-central-1',\n 'eu-central-1',\n 'eu-west-1',\n 'eu-west-2',\n 'sa-east-1',\n 'us-east-1',\n 'us-east-2',\n 'us-west-1',\n 'us-west-2',\n 'us-gov-west-1',\n )\n\n def __init__(self, module, ec2_metadata_uri=None, ec2_sshdata_uri=None, ec2_userdata_uri=None):\n self.module = module\n self.uri_meta = ec2_metadata_uri or self.ec2_metadata_uri\n self.uri_user = ec2_userdata_uri or self.ec2_userdata_uri\n self.uri_ssh = ec2_sshdata_uri or self.ec2_sshdata_uri\n self._data = {}\n self._prefix = 'ansible_ec2_%s'\n\n def _fetch(self, url):\n (response, info) = fetch_url(self.module, url, force=True)\n if response:\n data = response.read()\n else:\n data = None\n return data\n\n def _mangle_fields(self, fields, uri, filter_patterns=['public-keys-0']):\n new_fields = {}\n for key, value in fields.items():\n split_fields = key[len(uri):].split('/')\n if len(split_fields) > 1 and split_fields[1]:\n new_key = \"-\".join(split_fields)\n new_fields[self._prefix % new_key] = value\n else:\n new_key = \"\".join(split_fields)\n new_fields[self._prefix % new_key] = value\n for pattern in filter_patterns:\n for key in new_fields.keys():\n match = re.search(pattern, key)\n if match:\n new_fields.pop(key)\n return new_fields\n\n def fetch(self, uri, recurse=True):\n raw_subfields = self._fetch(uri)\n if not raw_subfields:\n return\n subfields = raw_subfields.split('\\n')\n for field in subfields:\n if field.endswith('/') and recurse:\n self.fetch(uri + field)\n if uri.endswith('/'):\n new_uri = uri + field\n else:\n new_uri = uri + '/' + field\n if new_uri not in self._data and not new_uri.endswith('/'):\n content = self._fetch(new_uri)\n if field == 'security-groups':\n sg_fields = \",\".join(content.split('\\n'))\n self._data['%s' % (new_uri)] = sg_fields\n else:\n self._data['%s' % (new_uri)] = content\n\n def fix_invalid_varnames(self, data):\n \"\"\"Change ':'' and '-' to '_' to ensure valid template variable names\"\"\"\n for (key, value) in data.items():\n if ':' in key or '-' in key:\n newkey = key.replace(':', '_').replace('-', '_')\n del data[key]\n data[newkey] = value\n\n def add_ec2_region(self, data):\n \"\"\"Use the 'ansible_ec2_placement_availability_zone' key/value\n pair to add 'ansible_ec2_placement_region' key/value pair with\n the EC2 region name.\n \"\"\"\n\n # Only add a 'ansible_ec2_placement_region' key if the\n # 'ansible_ec2_placement_availability_zone' exists.\n zone = data.get('ansible_ec2_placement_availability_zone')\n if zone is not None:\n # Use the zone name as the region name unless the zone\n # name starts with a known AWS region name.\n region = zone\n for r in self.AWS_REGIONS:\n if zone.startswith(r):\n region = r\n break\n data['ansible_ec2_placement_region'] = region\n\n def run(self):\n self.fetch(self.uri_meta) # populate _data\n data = self._mangle_fields(self._data, self.uri_meta)\n data[self._prefix % 'user-data'] = self._fetch(self.uri_user)\n data[self._prefix % 'public-key'] = self._fetch(self.uri_ssh)\n self.fix_invalid_varnames(data)\n self.add_ec2_region(data)\n return data\n\n\ndef main():\n argument_spec = url_argument_spec()\n\n module = AnsibleModule(\n argument_spec=argument_spec,\n supports_check_mode=True,\n )\n\n ec2_facts = Ec2Metadata(module).run()\n ec2_facts_result = dict(changed=False, ansible_facts=ec2_facts)\n\n module.exit_json(**ec2_facts_result)\n\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.urls import *\n\nif __name__ == '__main__':\n main()\n", "path": "lib/ansible/modules/cloud/amazon/ec2_facts.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\nANSIBLE_METADATA = {'metadata_version': '1.0',\n 'status': ['stableinterface'],\n 'supported_by': 'curated'}\n\n\nDOCUMENTATION = '''\n---\nmodule: ec2_facts\nshort_description: Gathers facts about remote hosts within ec2 (aws)\nversion_added: \"1.0\"\noptions:\n validate_certs:\n description:\n - If C(no), SSL certificates will not be validated. This should only be used\n on personally controlled sites using self-signed certificates.\n required: false\n default: 'yes'\n choices: ['yes', 'no']\n version_added: '1.5.1'\ndescription:\n - This module fetches data from the metadata servers in ec2 (aws) as per\n http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html.\n The module must be called from within the EC2 instance itself.\nnotes:\n - Parameters to filter on ec2_facts may be added later.\nauthor: \"Silviu Dicu (@silviud) <[email protected]>\"\n'''\n\nEXAMPLES = '''\n# Conditional example\n- name: Gather facts\n ec2_facts:\n\n- name: Conditional\n debug:\n msg: \"This instance is a t1.micro\"\n when: ansible_ec2_instance_type == \"t1.micro\"\n'''\n\nimport socket\nimport re\nfrom ansible.module_utils._text import to_text\nsocket.setdefaulttimeout(5)\n\n\nclass Ec2Metadata(object):\n ec2_metadata_uri = 'http://169.254.169.254/latest/meta-data/'\n ec2_sshdata_uri = 'http://169.254.169.254/latest/meta-data/public-keys/0/openssh-key'\n ec2_userdata_uri = 'http://169.254.169.254/latest/user-data/'\n\n AWS_REGIONS = ('ap-northeast-1',\n 'ap-northeast-2',\n 'ap-south-1',\n 'ap-southeast-1',\n 'ap-southeast-2',\n 'ca-central-1',\n 'eu-central-1',\n 'eu-west-1',\n 'eu-west-2',\n 'sa-east-1',\n 'us-east-1',\n 'us-east-2',\n 'us-west-1',\n 'us-west-2',\n 'us-gov-west-1',\n )\n\n def __init__(self, module, ec2_metadata_uri=None, ec2_sshdata_uri=None, ec2_userdata_uri=None):\n self.module = module\n self.uri_meta = ec2_metadata_uri or self.ec2_metadata_uri\n self.uri_user = ec2_userdata_uri or self.ec2_userdata_uri\n self.uri_ssh = ec2_sshdata_uri or self.ec2_sshdata_uri\n self._data = {}\n self._prefix = 'ansible_ec2_%s'\n\n def _fetch(self, url):\n (response, info) = fetch_url(self.module, url, force=True)\n if response:\n data = response.read()\n else:\n data = None\n return to_text(data)\n\n def _mangle_fields(self, fields, uri, filter_patterns=['public-keys-0']):\n new_fields = {}\n for key, value in fields.items():\n split_fields = key[len(uri):].split('/')\n if len(split_fields) > 1 and split_fields[1]:\n new_key = \"-\".join(split_fields)\n new_fields[self._prefix % new_key] = value\n else:\n new_key = \"\".join(split_fields)\n new_fields[self._prefix % new_key] = value\n for pattern in filter_patterns:\n for key in dict(new_fields):\n match = re.search(pattern, key)\n if match:\n new_fields.pop(key)\n return new_fields\n\n def fetch(self, uri, recurse=True):\n raw_subfields = self._fetch(uri)\n if not raw_subfields:\n return\n subfields = raw_subfields.split('\\n')\n for field in subfields:\n if field.endswith('/') and recurse:\n self.fetch(uri + field)\n if uri.endswith('/'):\n new_uri = uri + field\n else:\n new_uri = uri + '/' + field\n if new_uri not in self._data and not new_uri.endswith('/'):\n content = self._fetch(new_uri)\n if field == 'security-groups':\n sg_fields = \",\".join(content.split('\\n'))\n self._data['%s' % (new_uri)] = sg_fields\n else:\n self._data['%s' % (new_uri)] = content\n\n def fix_invalid_varnames(self, data):\n \"\"\"Change ':'' and '-' to '_' to ensure valid template variable names\"\"\"\n for key in data:\n if ':' in key or '-' in key:\n newkey = key.replace(':', '_').replace('-', '_')\n data[newkey] = data.pop(key)\n\n def add_ec2_region(self, data):\n \"\"\"Use the 'ansible_ec2_placement_availability_zone' key/value\n pair to add 'ansible_ec2_placement_region' key/value pair with\n the EC2 region name.\n \"\"\"\n\n # Only add a 'ansible_ec2_placement_region' key if the\n # 'ansible_ec2_placement_availability_zone' exists.\n zone = data.get('ansible_ec2_placement_availability_zone')\n if zone is not None:\n # Use the zone name as the region name unless the zone\n # name starts with a known AWS region name.\n region = zone\n for r in self.AWS_REGIONS:\n if zone.startswith(r):\n region = r\n break\n data['ansible_ec2_placement_region'] = region\n\n def run(self):\n self.fetch(self.uri_meta) # populate _data\n data = self._mangle_fields(self._data, self.uri_meta)\n data[self._prefix % 'user-data'] = self._fetch(self.uri_user)\n data[self._prefix % 'public-key'] = self._fetch(self.uri_ssh)\n self.fix_invalid_varnames(data)\n self.add_ec2_region(data)\n return data\n\n\ndef main():\n argument_spec = url_argument_spec()\n\n module = AnsibleModule(\n argument_spec=argument_spec,\n supports_check_mode=True,\n )\n\n ec2_facts = Ec2Metadata(module).run()\n ec2_facts_result = dict(changed=False, ansible_facts=ec2_facts)\n\n module.exit_json(**ec2_facts_result)\n\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.urls import *\n\nif __name__ == '__main__':\n main()\n", "path": "lib/ansible/modules/cloud/amazon/ec2_facts.py"}]}
3,306
380
gh_patches_debug_66590
rasdani/github-patches
git_diff
StackStorm__st2-3843
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Action 'linux.service' fails on Centos7 When I tried to execute restart some service on the Centos7 server got the following error: ``` Traceback (most recent call last): File "/tmp/5a0459bc07ac686fb813a920/service.py", line 24, in <module> subprocess.call(cmd, shell=True) NameError: name 'cmd' is not defined ``` After investigation the resolution has been found: in file /opt/stackstorm/packs/linux/actions/service.py the entry `elif re.search(distro, 'Redhat') or re.search(distro, 'Fedora'):` fixed to `elif re.search(distro, 'Redhat') or re.search(distro, 'Fedora') or re.search(distro, 'CentOS Linux'):` The issue has gone --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `contrib/linux/actions/service.py` Content: ``` 1 #!/usr/bin/env python 2 3 import re 4 import sys 5 import os 6 import platform 7 import subprocess 8 9 distro = platform.linux_distribution()[0] 10 11 args = {'act': sys.argv[1], 'service': sys.argv[2]} 12 13 if re.search(distro, 'Ubuntu'): 14 if os.path.isfile("/etc/init/%s.conf" % args['service']): 15 cmd = args['act'] + " " + args['service'] 16 elif os.path.isfile("/etc/init.d/%s" % args['service']): 17 cmd = "/etc/init.d/%s %s" % (args['service'], args['act']) 18 else: 19 print("Unknown service") 20 sys.exit(2) 21 elif re.search(distro, 'Redhat') or re.search(distro, 'Fedora'): 22 cmd = "systemctl %s %s" % (args['act'], args['service']) 23 24 subprocess.call(cmd, shell=True) 25 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/contrib/linux/actions/service.py b/contrib/linux/actions/service.py --- a/contrib/linux/actions/service.py +++ b/contrib/linux/actions/service.py @@ -18,7 +18,8 @@ else: print("Unknown service") sys.exit(2) -elif re.search(distro, 'Redhat') or re.search(distro, 'Fedora'): +elif re.search(distro, 'Redhat') or re.search(distro, 'Fedora') or \ + re.search(distro, 'CentOS Linux'): cmd = "systemctl %s %s" % (args['act'], args['service']) subprocess.call(cmd, shell=True)
{"golden_diff": "diff --git a/contrib/linux/actions/service.py b/contrib/linux/actions/service.py\n--- a/contrib/linux/actions/service.py\n+++ b/contrib/linux/actions/service.py\n@@ -18,7 +18,8 @@\n else:\n print(\"Unknown service\")\n sys.exit(2)\n-elif re.search(distro, 'Redhat') or re.search(distro, 'Fedora'):\n+elif re.search(distro, 'Redhat') or re.search(distro, 'Fedora') or \\\n+ re.search(distro, 'CentOS Linux'):\n cmd = \"systemctl %s %s\" % (args['act'], args['service'])\n \n subprocess.call(cmd, shell=True)\n", "issue": "Action 'linux.service' fails on Centos7\nWhen I tried to execute restart some service on the Centos7 server got the following error:\r\n```\r\nTraceback (most recent call last):\r\n File \"/tmp/5a0459bc07ac686fb813a920/service.py\", line 24, in <module>\r\n subprocess.call(cmd, shell=True)\r\nNameError: name 'cmd' is not defined\r\n```\r\nAfter investigation the resolution has been found:\r\nin file /opt/stackstorm/packs/linux/actions/service.py the entry\r\n\r\n`elif re.search(distro, 'Redhat') or re.search(distro, 'Fedora'):`\r\n\r\nfixed to \r\n\r\n`elif re.search(distro, 'Redhat') or re.search(distro, 'Fedora') or re.search(distro, 'CentOS Linux'):`\r\n\r\nThe issue has gone\r\n\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport re\nimport sys\nimport os\nimport platform\nimport subprocess\n\ndistro = platform.linux_distribution()[0]\n\nargs = {'act': sys.argv[1], 'service': sys.argv[2]}\n\nif re.search(distro, 'Ubuntu'):\n if os.path.isfile(\"/etc/init/%s.conf\" % args['service']):\n cmd = args['act'] + \" \" + args['service']\n elif os.path.isfile(\"/etc/init.d/%s\" % args['service']):\n cmd = \"/etc/init.d/%s %s\" % (args['service'], args['act'])\n else:\n print(\"Unknown service\")\n sys.exit(2)\nelif re.search(distro, 'Redhat') or re.search(distro, 'Fedora'):\n cmd = \"systemctl %s %s\" % (args['act'], args['service'])\n\nsubprocess.call(cmd, shell=True)\n", "path": "contrib/linux/actions/service.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nimport re\nimport sys\nimport os\nimport platform\nimport subprocess\n\ndistro = platform.linux_distribution()[0]\n\nargs = {'act': sys.argv[1], 'service': sys.argv[2]}\n\nif re.search(distro, 'Ubuntu'):\n if os.path.isfile(\"/etc/init/%s.conf\" % args['service']):\n cmd = args['act'] + \" \" + args['service']\n elif os.path.isfile(\"/etc/init.d/%s\" % args['service']):\n cmd = \"/etc/init.d/%s %s\" % (args['service'], args['act'])\n else:\n print(\"Unknown service\")\n sys.exit(2)\nelif re.search(distro, 'Redhat') or re.search(distro, 'Fedora') or \\\n re.search(distro, 'CentOS Linux'):\n cmd = \"systemctl %s %s\" % (args['act'], args['service'])\n\nsubprocess.call(cmd, shell=True)\n", "path": "contrib/linux/actions/service.py"}]}
688
148
gh_patches_debug_29344
rasdani/github-patches
git_diff
getnikola__nikola-2241
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- jinjify: convert a single mako template file to jinja? It looks like jinjify requires a full theme to work. For example, how do you synchronize the two templates in the [projectpages plugin](https://github.com/getnikola/plugins/tree/master/v7/projectpages/templates) with jinjify? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scripts/jinjify.py` Content: ``` 1 #!/usr/bin/env python 2 import io 3 import glob 4 import sys 5 import os 6 import re 7 import json 8 import shutil 9 10 import colorama 11 import jinja2 12 13 dumb_replacements = [ 14 ["{% if any(post.is_mathjax for post in posts) %}", '{% if posts|selectattr("is_mathjax")|list %}'], 15 ["json.dumps(title)", "title|tojson"], 16 ["{{ parent.extra_head() }}", "{{ super() }}"], 17 ["{{ parent.content() }}", "{{ super() }}"], 18 ["prefix='\\", "prefix='"], 19 ["og: http://ogp.me/ns# \\", "og: http://ogp.me/ns#"], 20 ["article: http://ogp.me/ns/article# \\", "article: http://ogp.me/ns/article#"], 21 ["fb: http://ogp.me/ns/fb# \\", "fb: http://ogp.me/ns/fb#"], 22 ['dir="rtl" \\', 'dir="rtl"'], 23 ['sorted(translations)', 'translations|sort'], 24 ] 25 26 dumber_replacements = [ 27 ['<%! import json %>\n\n', ''], 28 ["<html\n\\", "<html\n"], 29 ["\n'\\\n", "\n'\n"], 30 ["{% endif %}\n\\", "{% endif %}\n"] 31 ] 32 33 34 def jinjify(in_theme, out_theme): 35 """Convert in_theme into a jinja version and put it in out_theme""" 36 37 in_templates_path = os.path.join(in_theme, "templates") 38 out_templates_path = os.path.join(out_theme, "templates") 39 try: 40 os.makedirs(out_templates_path) 41 except: 42 pass 43 lookup = jinja2.Environment() 44 lookup.filters['tojson'] = json.dumps 45 lookup.loader = jinja2.FileSystemLoader([out_templates_path], encoding='utf-8') 46 for template in glob.glob(os.path.join(in_templates_path, "*.tmpl")): 47 out_template = os.path.join(out_templates_path, os.path.basename(template)) 48 with io.open(template, "r", encoding="utf-8") as inf: 49 data = mako2jinja(inf) 50 51 lines = [] 52 for line in data.splitlines(): 53 for repl in dumb_replacements: 54 line = line.replace(*repl) 55 lines.append(line) 56 data = '\n'.join(lines) 57 58 for repl in dumber_replacements: 59 data = data.replace(*repl) 60 61 with io.open(out_template, "w+", encoding="utf-8") as outf: 62 outf.write(data + '\n') 63 64 # Syntax check output 65 source, filename = lookup.loader.get_source(lookup, os.path.basename(template))[:2] 66 try: 67 lookup.parse(source) 68 except Exception as e: 69 error("Syntax error in {0}:{1}".format(out_template, e.lineno)) 70 71 parent = os.path.basename(in_theme.rstrip('/')) 72 child = os.path.basename(out_theme.rstrip('/')) 73 mappings = { 74 'base-jinja': 'base', 75 'bootstrap3-jinja': 'base-jinja', 76 } 77 78 if child in mappings: 79 parent = mappings[child] 80 81 with io.open(os.path.join(out_theme, "parent"), "w+", encoding='utf-8') as outf: 82 outf.write(u'{0}\n'.format(parent)) 83 84 with io.open(os.path.join(out_theme, "engine"), "w+", encoding='utf-8') as outf: 85 outf.write(u"jinja\n") 86 87 # Copy assets in bootstrap/bootstrap3 88 if child == 'bootstrap3-jinja': 89 shutil.rmtree(os.path.join(out_theme, "assets")) 90 shutil.copytree( 91 os.path.join(in_theme, "assets"), os.path.join(out_theme, "assets"), 92 symlinks=True) 93 94 # Copy bundles 95 # shutil.copy(os.path.join(in_theme, "bundles"), os.path.join(out_theme, "bundles")) 96 97 # Copy README 98 if os.path.isfile(os.path.join(in_theme, "README.md")): 99 shutil.copy(os.path.join(in_theme, "README.md"), os.path.join(out_theme, "README.md")) 100 101 102 def error(msg): 103 print(colorama.Fore.RED + "ERROR:" + msg) 104 105 106 def mako2jinja(input_file): 107 108 output = '' 109 110 # TODO: OMG, this code is so horrible. Look at it; just look at it: 111 112 macro_start = re.compile(r'(.*)<%.*def name="(.*?)".*>(.*)', re.IGNORECASE) 113 macro_end = re.compile(r'(.*)</%def>(.*)', re.IGNORECASE) 114 115 if_start = re.compile(r'(.*)% *if (.*):(.*)', re.IGNORECASE) 116 if_else = re.compile(r'(.*)% *else.*:(.*)', re.IGNORECASE) 117 if_elif = re.compile(r'(.*)% *elif (.*):(.*)', re.IGNORECASE) 118 if_end = re.compile(r'(.*)% *endif(.*)', re.IGNORECASE) 119 120 for_start = re.compile(r'(.*)% *for (.*):(.*)', re.IGNORECASE) 121 for_end = re.compile(r'(.*)% *endfor(.*)', re.IGNORECASE) 122 123 namespace = re.compile(r'(.*)<% *namespace name="(.*?)".* file="(.*?)".*/>(.*)', re.IGNORECASE) 124 inherit = re.compile(r'(.*)<% *inherit file="(.*?)".*/>(.*)', re.IGNORECASE) 125 126 block_single_line = re.compile(r'(.*)<% *block.*name="(.*?)".*>(.*)</% *block>(.*)', re.IGNORECASE) 127 block_start = re.compile(r'(.*)<% *block.*name="(.*?)".*>(.*)', re.IGNORECASE) 128 block_end = re.compile(r'(.*)</%block>(.*)', re.IGNORECASE) 129 130 val = re.compile(r'\$\{(.*?)\}', re.IGNORECASE) 131 func_len = re.compile(r'len\((.*?)\)', re.IGNORECASE) 132 filter_h = re.compile(r'\|h', re.IGNORECASE) 133 filter_striphtml = re.compile(r'\|striphtml', re.IGNORECASE) 134 filter_u = re.compile(r'\|u', re.IGNORECASE) 135 136 comment_single_line = re.compile(r'^.*##(.*?)$', re.IGNORECASE) 137 138 for line in input_file: 139 140 # Process line for repeated inline replacements 141 m_val = val.search(line) 142 m_func_len = func_len.search(line) 143 m_filter_h = filter_h.search(line) 144 m_filter_striphtml = filter_striphtml.search(line) 145 m_filter_u = filter_u.search(line) 146 147 if m_val: 148 line = val.sub(r'{{ \1 }}', line) 149 150 if m_filter_h: 151 line = filter_h.sub(r'|e', line) 152 153 if m_filter_striphtml: 154 line = filter_striphtml.sub(r'|e', line) 155 156 if m_filter_u: 157 line = filter_u.sub(r'|urlencode', line) 158 159 if m_func_len: 160 line = func_len.sub(r'\1|length', line) 161 162 # Process line for single 'whole line' replacements 163 m_macro_start = macro_start.search(line) 164 m_macro_end = macro_end.search(line) 165 m_if_start = if_start.search(line) 166 m_if_else = if_else.search(line) 167 m_if_elif = if_elif.search(line) 168 m_if_end = if_end.search(line) 169 m_for_start = for_start.search(line) 170 m_for_end = for_end.search(line) 171 m_namspace = namespace.search(line) 172 m_inherit = inherit.search(line) 173 m_block_single_line = block_single_line.search(line) 174 m_block_start = block_start.search(line) 175 m_block_end = block_end.search(line) 176 177 m_comment_single_line = comment_single_line.search(line) 178 179 if m_comment_single_line: 180 output += m_comment_single_line.expand(r'{# \1 #}') + '\n' 181 182 elif m_macro_start: 183 output += m_macro_start.expand(r'\1{% macro \2 %}\3') + '\n' 184 elif m_macro_end: 185 output += m_macro_end.expand(r'\1{% endmacro %}\1') + '\n' 186 187 elif m_if_start: 188 output += m_if_start.expand(r'\1{% if \2 %}\3') + '\n' 189 elif m_if_else: 190 output += m_if_else.expand(r'\1{% else %}\2') + '\n' 191 elif m_if_elif: 192 output += m_if_elif.expand(r'\1{% elif \2 %}\3') + '\n' 193 elif m_if_end: 194 output += m_if_end.expand(r'\1{% endif %}\2') + '\n' 195 196 elif m_for_start: 197 output += m_for_start.expand(r'\1{% for \2 %}\3') + '\n' 198 elif m_for_end: 199 output += m_for_end.expand(r'\1{% endfor %}\2') + '\n' 200 201 elif m_namspace: 202 output += m_namspace.expand(r"\1{% import '\3' as \2 with context %}\4") + '\n' 203 elif m_inherit: 204 output += m_inherit.expand(r"{% extends '\2' %}\3") + '\n' 205 206 elif m_block_single_line: 207 output += m_block_single_line.expand(r'\1{% block \2 %}\3{% endblock %}\4') + '\n' 208 elif m_block_start: 209 output += m_block_start.expand(r'\1{% block \2 %}\3') + '\n' 210 elif m_block_end: 211 output += m_block_end.expand(r'\1{% endblock %}\2') + '\n' 212 213 else: 214 # Doesn't match anything we're going to process, pass though 215 output += line 216 217 return output 218 219 220 def jinjify_shortcodes(in_dir, out_dir): 221 for fname in os.listdir(in_dir): 222 if not fname.endswith('.tmpl'): 223 continue 224 in_file = os.path.join(in_dir, fname) 225 out_file = os.path.join(out_dir, fname) 226 with open(in_file) as inf: 227 data = mako2jinja(inf) 228 with open(out_file, 'w') as outf: 229 outf.write(data) 230 231 232 if __name__ == "__main__": 233 if len(sys.argv) == 1: 234 print('Performing standard conversions:') 235 for m, j in ( 236 ('nikola/data/themes/base', 'nikola/data/themes/base-jinja'), 237 ('nikola/data/themes/bootstrap3', 'nikola/data/themes/bootstrap3-jinja') 238 ): 239 print(' {0} -> {1}'.format(m, j)) 240 jinjify(m, j) 241 jinjify_shortcodes('nikola/data/shortcodes/mako', 'nikola/data/shortcodes/jinja') 242 elif len(sys.argv) != 3: 243 print('ERROR: needs input and output directory, or no arguments for default conversions.') 244 else: 245 jinjify(sys.argv[1], sys.argv[2]) 246 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scripts/jinjify.py b/scripts/jinjify.py --- a/scripts/jinjify.py +++ b/scripts/jinjify.py @@ -6,6 +6,7 @@ import re import json import shutil +import tempfile import colorama import jinja2 @@ -229,6 +230,11 @@ outf.write(data) +def usage(): + print("Usage: python {} [in-dir] [out-dir]".format(sys.argv[0])) + print("OR") + print("Usage: python {} [in-file] [out-file]".format(sys.argv[0])) + if __name__ == "__main__": if len(sys.argv) == 1: print('Performing standard conversions:') @@ -240,6 +246,18 @@ jinjify(m, j) jinjify_shortcodes('nikola/data/shortcodes/mako', 'nikola/data/shortcodes/jinja') elif len(sys.argv) != 3: - print('ERROR: needs input and output directory, or no arguments for default conversions.') - else: + print('ERROR: needs input and output directory (file), or no arguments for default conversions.') + usage() + elif os.path.isdir(sys.argv[1]) and (os.path.isdir(sys.argv[2]) or not os.path.exists(sys.argv[2])): jinjify(sys.argv[1], sys.argv[2]) + elif os.path.isfile(sys.argv[1]) and (os.path.isfile(sys.argv[2]) or not os.path.exists(sys.argv[2])): + tmpdir = tempfile.mkdtemp() + indir = os.path.sep.join((tmpdir, 'in', 'templates')) + outdir = os.path.sep.join((tmpdir, 'out', 'templates')) + os.makedirs(indir) + shutil.copy(sys.argv[1], indir) + jinjify(os.path.dirname(indir), os.path.dirname(outdir)) + shutil.move(os.path.sep.join((outdir, os.path.basename(sys.argv[1]))), sys.argv[2]) + else: + print('ERROR: the two arguments must be both directories or files') + usage()
{"golden_diff": "diff --git a/scripts/jinjify.py b/scripts/jinjify.py\n--- a/scripts/jinjify.py\n+++ b/scripts/jinjify.py\n@@ -6,6 +6,7 @@\n import re\n import json\n import shutil\n+import tempfile\n \n import colorama\n import jinja2\n@@ -229,6 +230,11 @@\n outf.write(data)\n \n \n+def usage():\n+ print(\"Usage: python {} [in-dir] [out-dir]\".format(sys.argv[0]))\n+ print(\"OR\")\n+ print(\"Usage: python {} [in-file] [out-file]\".format(sys.argv[0]))\n+\n if __name__ == \"__main__\":\n if len(sys.argv) == 1:\n print('Performing standard conversions:')\n@@ -240,6 +246,18 @@\n jinjify(m, j)\n jinjify_shortcodes('nikola/data/shortcodes/mako', 'nikola/data/shortcodes/jinja')\n elif len(sys.argv) != 3:\n- print('ERROR: needs input and output directory, or no arguments for default conversions.')\n- else:\n+ print('ERROR: needs input and output directory (file), or no arguments for default conversions.')\n+ usage()\n+ elif os.path.isdir(sys.argv[1]) and (os.path.isdir(sys.argv[2]) or not os.path.exists(sys.argv[2])):\n jinjify(sys.argv[1], sys.argv[2])\n+ elif os.path.isfile(sys.argv[1]) and (os.path.isfile(sys.argv[2]) or not os.path.exists(sys.argv[2])):\n+ tmpdir = tempfile.mkdtemp()\n+ indir = os.path.sep.join((tmpdir, 'in', 'templates'))\n+ outdir = os.path.sep.join((tmpdir, 'out', 'templates'))\n+ os.makedirs(indir)\n+ shutil.copy(sys.argv[1], indir)\n+ jinjify(os.path.dirname(indir), os.path.dirname(outdir))\n+ shutil.move(os.path.sep.join((outdir, os.path.basename(sys.argv[1]))), sys.argv[2])\n+ else:\n+ print('ERROR: the two arguments must be both directories or files')\n+ usage()\n", "issue": "jinjify: convert a single mako template file to jinja?\nIt looks like jinjify requires a full theme to work. For example, how do you synchronize the two templates in the [projectpages plugin](https://github.com/getnikola/plugins/tree/master/v7/projectpages/templates) with jinjify?\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport io\nimport glob\nimport sys\nimport os\nimport re\nimport json\nimport shutil\n\nimport colorama\nimport jinja2\n\ndumb_replacements = [\n [\"{% if any(post.is_mathjax for post in posts) %}\", '{% if posts|selectattr(\"is_mathjax\")|list %}'],\n [\"json.dumps(title)\", \"title|tojson\"],\n [\"{{ parent.extra_head() }}\", \"{{ super() }}\"],\n [\"{{ parent.content() }}\", \"{{ super() }}\"],\n [\"prefix='\\\\\", \"prefix='\"],\n [\"og: http://ogp.me/ns# \\\\\", \"og: http://ogp.me/ns#\"],\n [\"article: http://ogp.me/ns/article# \\\\\", \"article: http://ogp.me/ns/article#\"],\n [\"fb: http://ogp.me/ns/fb# \\\\\", \"fb: http://ogp.me/ns/fb#\"],\n ['dir=\"rtl\" \\\\', 'dir=\"rtl\"'],\n ['sorted(translations)', 'translations|sort'],\n]\n\ndumber_replacements = [\n ['<%! import json %>\\n\\n', ''],\n [\"<html\\n\\\\\", \"<html\\n\"],\n [\"\\n'\\\\\\n\", \"\\n'\\n\"],\n [\"{% endif %}\\n\\\\\", \"{% endif %}\\n\"]\n]\n\n\ndef jinjify(in_theme, out_theme):\n \"\"\"Convert in_theme into a jinja version and put it in out_theme\"\"\"\n\n in_templates_path = os.path.join(in_theme, \"templates\")\n out_templates_path = os.path.join(out_theme, \"templates\")\n try:\n os.makedirs(out_templates_path)\n except:\n pass\n lookup = jinja2.Environment()\n lookup.filters['tojson'] = json.dumps\n lookup.loader = jinja2.FileSystemLoader([out_templates_path], encoding='utf-8')\n for template in glob.glob(os.path.join(in_templates_path, \"*.tmpl\")):\n out_template = os.path.join(out_templates_path, os.path.basename(template))\n with io.open(template, \"r\", encoding=\"utf-8\") as inf:\n data = mako2jinja(inf)\n\n lines = []\n for line in data.splitlines():\n for repl in dumb_replacements:\n line = line.replace(*repl)\n lines.append(line)\n data = '\\n'.join(lines)\n\n for repl in dumber_replacements:\n data = data.replace(*repl)\n\n with io.open(out_template, \"w+\", encoding=\"utf-8\") as outf:\n outf.write(data + '\\n')\n\n # Syntax check output\n source, filename = lookup.loader.get_source(lookup, os.path.basename(template))[:2]\n try:\n lookup.parse(source)\n except Exception as e:\n error(\"Syntax error in {0}:{1}\".format(out_template, e.lineno))\n\n parent = os.path.basename(in_theme.rstrip('/'))\n child = os.path.basename(out_theme.rstrip('/'))\n mappings = {\n 'base-jinja': 'base',\n 'bootstrap3-jinja': 'base-jinja',\n }\n\n if child in mappings:\n parent = mappings[child]\n\n with io.open(os.path.join(out_theme, \"parent\"), \"w+\", encoding='utf-8') as outf:\n outf.write(u'{0}\\n'.format(parent))\n\n with io.open(os.path.join(out_theme, \"engine\"), \"w+\", encoding='utf-8') as outf:\n outf.write(u\"jinja\\n\")\n\n # Copy assets in bootstrap/bootstrap3\n if child == 'bootstrap3-jinja':\n shutil.rmtree(os.path.join(out_theme, \"assets\"))\n shutil.copytree(\n os.path.join(in_theme, \"assets\"), os.path.join(out_theme, \"assets\"),\n symlinks=True)\n\n # Copy bundles\n # shutil.copy(os.path.join(in_theme, \"bundles\"), os.path.join(out_theme, \"bundles\"))\n\n # Copy README\n if os.path.isfile(os.path.join(in_theme, \"README.md\")):\n shutil.copy(os.path.join(in_theme, \"README.md\"), os.path.join(out_theme, \"README.md\"))\n\n\ndef error(msg):\n print(colorama.Fore.RED + \"ERROR:\" + msg)\n\n\ndef mako2jinja(input_file):\n\n output = ''\n\n # TODO: OMG, this code is so horrible. Look at it; just look at it:\n\n macro_start = re.compile(r'(.*)<%.*def name=\"(.*?)\".*>(.*)', re.IGNORECASE)\n macro_end = re.compile(r'(.*)</%def>(.*)', re.IGNORECASE)\n\n if_start = re.compile(r'(.*)% *if (.*):(.*)', re.IGNORECASE)\n if_else = re.compile(r'(.*)% *else.*:(.*)', re.IGNORECASE)\n if_elif = re.compile(r'(.*)% *elif (.*):(.*)', re.IGNORECASE)\n if_end = re.compile(r'(.*)% *endif(.*)', re.IGNORECASE)\n\n for_start = re.compile(r'(.*)% *for (.*):(.*)', re.IGNORECASE)\n for_end = re.compile(r'(.*)% *endfor(.*)', re.IGNORECASE)\n\n namespace = re.compile(r'(.*)<% *namespace name=\"(.*?)\".* file=\"(.*?)\".*/>(.*)', re.IGNORECASE)\n inherit = re.compile(r'(.*)<% *inherit file=\"(.*?)\".*/>(.*)', re.IGNORECASE)\n\n block_single_line = re.compile(r'(.*)<% *block.*name=\"(.*?)\".*>(.*)</% *block>(.*)', re.IGNORECASE)\n block_start = re.compile(r'(.*)<% *block.*name=\"(.*?)\".*>(.*)', re.IGNORECASE)\n block_end = re.compile(r'(.*)</%block>(.*)', re.IGNORECASE)\n\n val = re.compile(r'\\$\\{(.*?)\\}', re.IGNORECASE)\n func_len = re.compile(r'len\\((.*?)\\)', re.IGNORECASE)\n filter_h = re.compile(r'\\|h', re.IGNORECASE)\n filter_striphtml = re.compile(r'\\|striphtml', re.IGNORECASE)\n filter_u = re.compile(r'\\|u', re.IGNORECASE)\n\n comment_single_line = re.compile(r'^.*##(.*?)$', re.IGNORECASE)\n\n for line in input_file:\n\n # Process line for repeated inline replacements\n m_val = val.search(line)\n m_func_len = func_len.search(line)\n m_filter_h = filter_h.search(line)\n m_filter_striphtml = filter_striphtml.search(line)\n m_filter_u = filter_u.search(line)\n\n if m_val:\n line = val.sub(r'{{ \\1 }}', line)\n\n if m_filter_h:\n line = filter_h.sub(r'|e', line)\n\n if m_filter_striphtml:\n line = filter_striphtml.sub(r'|e', line)\n\n if m_filter_u:\n line = filter_u.sub(r'|urlencode', line)\n\n if m_func_len:\n line = func_len.sub(r'\\1|length', line)\n\n # Process line for single 'whole line' replacements\n m_macro_start = macro_start.search(line)\n m_macro_end = macro_end.search(line)\n m_if_start = if_start.search(line)\n m_if_else = if_else.search(line)\n m_if_elif = if_elif.search(line)\n m_if_end = if_end.search(line)\n m_for_start = for_start.search(line)\n m_for_end = for_end.search(line)\n m_namspace = namespace.search(line)\n m_inherit = inherit.search(line)\n m_block_single_line = block_single_line.search(line)\n m_block_start = block_start.search(line)\n m_block_end = block_end.search(line)\n\n m_comment_single_line = comment_single_line.search(line)\n\n if m_comment_single_line:\n output += m_comment_single_line.expand(r'{# \\1 #}') + '\\n'\n\n elif m_macro_start:\n output += m_macro_start.expand(r'\\1{% macro \\2 %}\\3') + '\\n'\n elif m_macro_end:\n output += m_macro_end.expand(r'\\1{% endmacro %}\\1') + '\\n'\n\n elif m_if_start:\n output += m_if_start.expand(r'\\1{% if \\2 %}\\3') + '\\n'\n elif m_if_else:\n output += m_if_else.expand(r'\\1{% else %}\\2') + '\\n'\n elif m_if_elif:\n output += m_if_elif.expand(r'\\1{% elif \\2 %}\\3') + '\\n'\n elif m_if_end:\n output += m_if_end.expand(r'\\1{% endif %}\\2') + '\\n'\n\n elif m_for_start:\n output += m_for_start.expand(r'\\1{% for \\2 %}\\3') + '\\n'\n elif m_for_end:\n output += m_for_end.expand(r'\\1{% endfor %}\\2') + '\\n'\n\n elif m_namspace:\n output += m_namspace.expand(r\"\\1{% import '\\3' as \\2 with context %}\\4\") + '\\n'\n elif m_inherit:\n output += m_inherit.expand(r\"{% extends '\\2' %}\\3\") + '\\n'\n\n elif m_block_single_line:\n output += m_block_single_line.expand(r'\\1{% block \\2 %}\\3{% endblock %}\\4') + '\\n'\n elif m_block_start:\n output += m_block_start.expand(r'\\1{% block \\2 %}\\3') + '\\n'\n elif m_block_end:\n output += m_block_end.expand(r'\\1{% endblock %}\\2') + '\\n'\n\n else:\n # Doesn't match anything we're going to process, pass though\n output += line\n\n return output\n\n\ndef jinjify_shortcodes(in_dir, out_dir):\n for fname in os.listdir(in_dir):\n if not fname.endswith('.tmpl'):\n continue\n in_file = os.path.join(in_dir, fname)\n out_file = os.path.join(out_dir, fname)\n with open(in_file) as inf:\n data = mako2jinja(inf)\n with open(out_file, 'w') as outf:\n outf.write(data)\n \n\nif __name__ == \"__main__\":\n if len(sys.argv) == 1:\n print('Performing standard conversions:')\n for m, j in (\n ('nikola/data/themes/base', 'nikola/data/themes/base-jinja'),\n ('nikola/data/themes/bootstrap3', 'nikola/data/themes/bootstrap3-jinja')\n ):\n print(' {0} -> {1}'.format(m, j))\n jinjify(m, j)\n jinjify_shortcodes('nikola/data/shortcodes/mako', 'nikola/data/shortcodes/jinja')\n elif len(sys.argv) != 3:\n print('ERROR: needs input and output directory, or no arguments for default conversions.')\n else:\n jinjify(sys.argv[1], sys.argv[2])\n", "path": "scripts/jinjify.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport io\nimport glob\nimport sys\nimport os\nimport re\nimport json\nimport shutil\nimport tempfile\n\nimport colorama\nimport jinja2\n\ndumb_replacements = [\n [\"{% if any(post.is_mathjax for post in posts) %}\", '{% if posts|selectattr(\"is_mathjax\")|list %}'],\n [\"json.dumps(title)\", \"title|tojson\"],\n [\"{{ parent.extra_head() }}\", \"{{ super() }}\"],\n [\"{{ parent.content() }}\", \"{{ super() }}\"],\n [\"prefix='\\\\\", \"prefix='\"],\n [\"og: http://ogp.me/ns# \\\\\", \"og: http://ogp.me/ns#\"],\n [\"article: http://ogp.me/ns/article# \\\\\", \"article: http://ogp.me/ns/article#\"],\n [\"fb: http://ogp.me/ns/fb# \\\\\", \"fb: http://ogp.me/ns/fb#\"],\n ['dir=\"rtl\" \\\\', 'dir=\"rtl\"'],\n ['sorted(translations)', 'translations|sort'],\n]\n\ndumber_replacements = [\n ['<%! import json %>\\n\\n', ''],\n [\"<html\\n\\\\\", \"<html\\n\"],\n [\"\\n'\\\\\\n\", \"\\n'\\n\"],\n [\"{% endif %}\\n\\\\\", \"{% endif %}\\n\"]\n]\n\n\ndef jinjify(in_theme, out_theme):\n \"\"\"Convert in_theme into a jinja version and put it in out_theme\"\"\"\n\n in_templates_path = os.path.join(in_theme, \"templates\")\n out_templates_path = os.path.join(out_theme, \"templates\")\n try:\n os.makedirs(out_templates_path)\n except:\n pass\n lookup = jinja2.Environment()\n lookup.filters['tojson'] = json.dumps\n lookup.loader = jinja2.FileSystemLoader([out_templates_path], encoding='utf-8')\n for template in glob.glob(os.path.join(in_templates_path, \"*.tmpl\")):\n out_template = os.path.join(out_templates_path, os.path.basename(template))\n with io.open(template, \"r\", encoding=\"utf-8\") as inf:\n data = mako2jinja(inf)\n\n lines = []\n for line in data.splitlines():\n for repl in dumb_replacements:\n line = line.replace(*repl)\n lines.append(line)\n data = '\\n'.join(lines)\n\n for repl in dumber_replacements:\n data = data.replace(*repl)\n\n with io.open(out_template, \"w+\", encoding=\"utf-8\") as outf:\n outf.write(data + '\\n')\n\n # Syntax check output\n source, filename = lookup.loader.get_source(lookup, os.path.basename(template))[:2]\n try:\n lookup.parse(source)\n except Exception as e:\n error(\"Syntax error in {0}:{1}\".format(out_template, e.lineno))\n\n parent = os.path.basename(in_theme.rstrip('/'))\n child = os.path.basename(out_theme.rstrip('/'))\n mappings = {\n 'base-jinja': 'base',\n 'bootstrap3-jinja': 'base-jinja',\n }\n\n if child in mappings:\n parent = mappings[child]\n\n with io.open(os.path.join(out_theme, \"parent\"), \"w+\", encoding='utf-8') as outf:\n outf.write(u'{0}\\n'.format(parent))\n\n with io.open(os.path.join(out_theme, \"engine\"), \"w+\", encoding='utf-8') as outf:\n outf.write(u\"jinja\\n\")\n\n # Copy assets in bootstrap/bootstrap3\n if child == 'bootstrap3-jinja':\n shutil.rmtree(os.path.join(out_theme, \"assets\"))\n shutil.copytree(\n os.path.join(in_theme, \"assets\"), os.path.join(out_theme, \"assets\"),\n symlinks=True)\n\n # Copy bundles\n # shutil.copy(os.path.join(in_theme, \"bundles\"), os.path.join(out_theme, \"bundles\"))\n\n # Copy README\n if os.path.isfile(os.path.join(in_theme, \"README.md\")):\n shutil.copy(os.path.join(in_theme, \"README.md\"), os.path.join(out_theme, \"README.md\"))\n\n\ndef error(msg):\n print(colorama.Fore.RED + \"ERROR:\" + msg)\n\n\ndef mako2jinja(input_file):\n\n output = ''\n\n # TODO: OMG, this code is so horrible. Look at it; just look at it:\n\n macro_start = re.compile(r'(.*)<%.*def name=\"(.*?)\".*>(.*)', re.IGNORECASE)\n macro_end = re.compile(r'(.*)</%def>(.*)', re.IGNORECASE)\n\n if_start = re.compile(r'(.*)% *if (.*):(.*)', re.IGNORECASE)\n if_else = re.compile(r'(.*)% *else.*:(.*)', re.IGNORECASE)\n if_elif = re.compile(r'(.*)% *elif (.*):(.*)', re.IGNORECASE)\n if_end = re.compile(r'(.*)% *endif(.*)', re.IGNORECASE)\n\n for_start = re.compile(r'(.*)% *for (.*):(.*)', re.IGNORECASE)\n for_end = re.compile(r'(.*)% *endfor(.*)', re.IGNORECASE)\n\n namespace = re.compile(r'(.*)<% *namespace name=\"(.*?)\".* file=\"(.*?)\".*/>(.*)', re.IGNORECASE)\n inherit = re.compile(r'(.*)<% *inherit file=\"(.*?)\".*/>(.*)', re.IGNORECASE)\n\n block_single_line = re.compile(r'(.*)<% *block.*name=\"(.*?)\".*>(.*)</% *block>(.*)', re.IGNORECASE)\n block_start = re.compile(r'(.*)<% *block.*name=\"(.*?)\".*>(.*)', re.IGNORECASE)\n block_end = re.compile(r'(.*)</%block>(.*)', re.IGNORECASE)\n\n val = re.compile(r'\\$\\{(.*?)\\}', re.IGNORECASE)\n func_len = re.compile(r'len\\((.*?)\\)', re.IGNORECASE)\n filter_h = re.compile(r'\\|h', re.IGNORECASE)\n filter_striphtml = re.compile(r'\\|striphtml', re.IGNORECASE)\n filter_u = re.compile(r'\\|u', re.IGNORECASE)\n\n comment_single_line = re.compile(r'^.*##(.*?)$', re.IGNORECASE)\n\n for line in input_file:\n\n # Process line for repeated inline replacements\n m_val = val.search(line)\n m_func_len = func_len.search(line)\n m_filter_h = filter_h.search(line)\n m_filter_striphtml = filter_striphtml.search(line)\n m_filter_u = filter_u.search(line)\n\n if m_val:\n line = val.sub(r'{{ \\1 }}', line)\n\n if m_filter_h:\n line = filter_h.sub(r'|e', line)\n\n if m_filter_striphtml:\n line = filter_striphtml.sub(r'|e', line)\n\n if m_filter_u:\n line = filter_u.sub(r'|urlencode', line)\n\n if m_func_len:\n line = func_len.sub(r'\\1|length', line)\n\n # Process line for single 'whole line' replacements\n m_macro_start = macro_start.search(line)\n m_macro_end = macro_end.search(line)\n m_if_start = if_start.search(line)\n m_if_else = if_else.search(line)\n m_if_elif = if_elif.search(line)\n m_if_end = if_end.search(line)\n m_for_start = for_start.search(line)\n m_for_end = for_end.search(line)\n m_namspace = namespace.search(line)\n m_inherit = inherit.search(line)\n m_block_single_line = block_single_line.search(line)\n m_block_start = block_start.search(line)\n m_block_end = block_end.search(line)\n\n m_comment_single_line = comment_single_line.search(line)\n\n if m_comment_single_line:\n output += m_comment_single_line.expand(r'{# \\1 #}') + '\\n'\n\n elif m_macro_start:\n output += m_macro_start.expand(r'\\1{% macro \\2 %}\\3') + '\\n'\n elif m_macro_end:\n output += m_macro_end.expand(r'\\1{% endmacro %}\\1') + '\\n'\n\n elif m_if_start:\n output += m_if_start.expand(r'\\1{% if \\2 %}\\3') + '\\n'\n elif m_if_else:\n output += m_if_else.expand(r'\\1{% else %}\\2') + '\\n'\n elif m_if_elif:\n output += m_if_elif.expand(r'\\1{% elif \\2 %}\\3') + '\\n'\n elif m_if_end:\n output += m_if_end.expand(r'\\1{% endif %}\\2') + '\\n'\n\n elif m_for_start:\n output += m_for_start.expand(r'\\1{% for \\2 %}\\3') + '\\n'\n elif m_for_end:\n output += m_for_end.expand(r'\\1{% endfor %}\\2') + '\\n'\n\n elif m_namspace:\n output += m_namspace.expand(r\"\\1{% import '\\3' as \\2 with context %}\\4\") + '\\n'\n elif m_inherit:\n output += m_inherit.expand(r\"{% extends '\\2' %}\\3\") + '\\n'\n\n elif m_block_single_line:\n output += m_block_single_line.expand(r'\\1{% block \\2 %}\\3{% endblock %}\\4') + '\\n'\n elif m_block_start:\n output += m_block_start.expand(r'\\1{% block \\2 %}\\3') + '\\n'\n elif m_block_end:\n output += m_block_end.expand(r'\\1{% endblock %}\\2') + '\\n'\n\n else:\n # Doesn't match anything we're going to process, pass though\n output += line\n\n return output\n\n\ndef jinjify_shortcodes(in_dir, out_dir):\n for fname in os.listdir(in_dir):\n if not fname.endswith('.tmpl'):\n continue\n in_file = os.path.join(in_dir, fname)\n out_file = os.path.join(out_dir, fname)\n with open(in_file) as inf:\n data = mako2jinja(inf)\n with open(out_file, 'w') as outf:\n outf.write(data)\n \n\ndef usage():\n print(\"Usage: python {} [in-dir] [out-dir]\".format(sys.argv[0]))\n print(\"OR\")\n print(\"Usage: python {} [in-file] [out-file]\".format(sys.argv[0]))\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 1:\n print('Performing standard conversions:')\n for m, j in (\n ('nikola/data/themes/base', 'nikola/data/themes/base-jinja'),\n ('nikola/data/themes/bootstrap3', 'nikola/data/themes/bootstrap3-jinja')\n ):\n print(' {0} -> {1}'.format(m, j))\n jinjify(m, j)\n jinjify_shortcodes('nikola/data/shortcodes/mako', 'nikola/data/shortcodes/jinja')\n elif len(sys.argv) != 3:\n print('ERROR: needs input and output directory (file), or no arguments for default conversions.')\n usage()\n elif os.path.isdir(sys.argv[1]) and (os.path.isdir(sys.argv[2]) or not os.path.exists(sys.argv[2])):\n jinjify(sys.argv[1], sys.argv[2])\n elif os.path.isfile(sys.argv[1]) and (os.path.isfile(sys.argv[2]) or not os.path.exists(sys.argv[2])):\n tmpdir = tempfile.mkdtemp()\n indir = os.path.sep.join((tmpdir, 'in', 'templates'))\n outdir = os.path.sep.join((tmpdir, 'out', 'templates'))\n os.makedirs(indir)\n shutil.copy(sys.argv[1], indir)\n jinjify(os.path.dirname(indir), os.path.dirname(outdir))\n shutil.move(os.path.sep.join((outdir, os.path.basename(sys.argv[1]))), sys.argv[2])\n else:\n print('ERROR: the two arguments must be both directories or files')\n usage()\n", "path": "scripts/jinjify.py"}]}
3,351
493
gh_patches_debug_13852
rasdani/github-patches
git_diff
ESMCI__cime-2700
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Issue downloading data (wildcards not supported in HTTP) I was wanting to have a case download all of the data it needs. First create an empty tmp inputdata directory, set the DIN env vars. However, I got error below which seems like a problem with wget and wildcards? ``` Refcase not found in /global/cscratch1/sd/ndk/inputdata/e3sm_init/20171228.beta3rc13_1850.ne30_oECv3_ICG.edison/0331-01-01, will attempt to download from inputdata Model refcase missing file refdir = '/global/cscratch1/sd/ndk/inputdata/e3sm_init/20171228.beta3rc13_1850.ne30_oECv3_ICG.edison/0331-01-01/' wget failed with output: and errput Warning: wildcards not supported in HTTP. --2018-06-29 14:11:00-- https://web.lcrc.anl.gov/public/e3sm/inputdata/e3sm_init/20171228.beta3rc13_1850.ne30_oECv3_ICG.edison/0331-01-01/* Resolving web.lcrc.anl.gov (web.lcrc.anl.gov)... 140.221.74.23 Connecting to web.lcrc.anl.gov (web.lcrc.anl.gov)|140.221.74.23|:443... connected. HTTP request sent, awaiting response... 404 Not Found 2018-06-29 14:11:00 ERROR 404: Not Found. ``` The test I was using: `SMS_Ld2.ne30_oECv3_ICG.A_WCYCL1850S_CMIP6.cori-knl_intel.allactive-v1cmip6` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scripts/lib/CIME/Servers/wget.py` Content: ``` 1 """ 2 WGET Server class. Interact with a server using WGET protocol 3 """ 4 # pylint: disable=super-init-not-called 5 from CIME.XML.standard_module_setup import * 6 from CIME.Servers.generic_server import GenericServer 7 8 logger = logging.getLogger(__name__) 9 10 class WGET(GenericServer): 11 def __init__(self, address, user='', passwd=''): 12 self._args = '' 13 if user: 14 self._args += "--user {}".format(user) 15 if passwd: 16 self._args += "--password {}".format(passwd) 17 18 err = run_cmd("wget {} --spider {}".format(self._args, address))[0] 19 expect(err == 0,"Could not connect to repo '{0}'\nThis is most likely either a proxy, or network issue .") 20 self._server_loc = address 21 22 def fileexists(self, rel_path): 23 full_url = os.path.join(self._server_loc, rel_path) 24 stat, out, err = run_cmd("wget {} --spider {}".format(self._args, full_url)) 25 if (stat != 0): 26 logging.warning("FAIL: Repo '{}' does not have file '{}'\nReason:{}\n{}\n".format(self._server_loc, full_url, out.encode('utf-8'), err.encode('utf-8'))) 27 return False 28 return True 29 30 def getfile(self, rel_path, full_path): 31 full_url = os.path.join(self._server_loc, rel_path) 32 stat, output, errput = \ 33 run_cmd("wget {} {} -nc --output-document {}".format(self._args, full_url, full_path)) 34 if (stat != 0): 35 logging.warning("wget failed with output: {} and errput {}\n".format(output, errput)) 36 # wget puts an empty file if it fails. 37 try: 38 os.remove(full_path) 39 except OSError: 40 pass 41 return False 42 else: 43 logging.info("SUCCESS\n") 44 return True 45 46 def getdirectory(self, rel_path, full_path): 47 full_url = os.path.join(self._server_loc, rel_path) 48 stat, output, errput = \ 49 run_cmd("wget {} {} -P {}".format(self._args, full_url+os.sep+'*', full_path+os.sep)) 50 if (stat != 0): 51 logging.warning("wget failed with output: {} and errput {}\n".format(output, errput)) 52 # wget puts an empty file if it fails. 53 try: 54 os.remove(full_path) 55 except OSError: 56 pass 57 return False 58 else: 59 logging.info("SUCCESS\n") 60 return True 61 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scripts/lib/CIME/Servers/wget.py b/scripts/lib/CIME/Servers/wget.py --- a/scripts/lib/CIME/Servers/wget.py +++ b/scripts/lib/CIME/Servers/wget.py @@ -46,7 +46,9 @@ def getdirectory(self, rel_path, full_path): full_url = os.path.join(self._server_loc, rel_path) stat, output, errput = \ - run_cmd("wget {} {} -P {}".format(self._args, full_url+os.sep+'*', full_path+os.sep)) + run_cmd("wget {} {} -r -N --no-directories ".format(self._args, full_url+os.sep), from_dir=full_path) + logger.debug(output) + logger.debug(errput) if (stat != 0): logging.warning("wget failed with output: {} and errput {}\n".format(output, errput)) # wget puts an empty file if it fails.
{"golden_diff": "diff --git a/scripts/lib/CIME/Servers/wget.py b/scripts/lib/CIME/Servers/wget.py\n--- a/scripts/lib/CIME/Servers/wget.py\n+++ b/scripts/lib/CIME/Servers/wget.py\n@@ -46,7 +46,9 @@\n def getdirectory(self, rel_path, full_path):\n full_url = os.path.join(self._server_loc, rel_path)\n stat, output, errput = \\\n- run_cmd(\"wget {} {} -P {}\".format(self._args, full_url+os.sep+'*', full_path+os.sep))\n+ run_cmd(\"wget {} {} -r -N --no-directories \".format(self._args, full_url+os.sep), from_dir=full_path)\n+ logger.debug(output)\n+ logger.debug(errput)\n if (stat != 0):\n logging.warning(\"wget failed with output: {} and errput {}\\n\".format(output, errput))\n # wget puts an empty file if it fails.\n", "issue": "Issue downloading data (wildcards not supported in HTTP)\nI was wanting to have a case download all of the data it needs. First create an empty tmp inputdata directory, set the DIN env vars. However, I got error below which seems like a problem with wget and wildcards?\r\n\r\n```\r\n Refcase not found in /global/cscratch1/sd/ndk/inputdata/e3sm_init/20171228.beta3rc13_1850.ne30_oECv3_ICG.edison/0331-01-01, will attempt to download from inputdata\r\n Model refcase missing file refdir = '/global/cscratch1/sd/ndk/inputdata/e3sm_init/20171228.beta3rc13_1850.ne30_oECv3_ICG.edison/0331-01-01/'\r\n wget failed with output: and errput Warning: wildcards not supported in HTTP.\r\n --2018-06-29 14:11:00-- https://web.lcrc.anl.gov/public/e3sm/inputdata/e3sm_init/20171228.beta3rc13_1850.ne30_oECv3_ICG.edison/0331-01-01/*\r\n Resolving web.lcrc.anl.gov (web.lcrc.anl.gov)... 140.221.74.23\r\n Connecting to web.lcrc.anl.gov (web.lcrc.anl.gov)|140.221.74.23|:443... connected.\r\n HTTP request sent, awaiting response... 404 Not Found\r\n 2018-06-29 14:11:00 ERROR 404: Not Found.\r\n```\r\n\r\nThe test I was using: `SMS_Ld2.ne30_oECv3_ICG.A_WCYCL1850S_CMIP6.cori-knl_intel.allactive-v1cmip6`\n", "before_files": [{"content": "\"\"\"\nWGET Server class. Interact with a server using WGET protocol\n\"\"\"\n# pylint: disable=super-init-not-called\nfrom CIME.XML.standard_module_setup import *\nfrom CIME.Servers.generic_server import GenericServer\n\nlogger = logging.getLogger(__name__)\n\nclass WGET(GenericServer):\n def __init__(self, address, user='', passwd=''):\n self._args = ''\n if user:\n self._args += \"--user {}\".format(user)\n if passwd:\n self._args += \"--password {}\".format(passwd)\n\n err = run_cmd(\"wget {} --spider {}\".format(self._args, address))[0]\n expect(err == 0,\"Could not connect to repo '{0}'\\nThis is most likely either a proxy, or network issue .\")\n self._server_loc = address\n\n def fileexists(self, rel_path):\n full_url = os.path.join(self._server_loc, rel_path)\n stat, out, err = run_cmd(\"wget {} --spider {}\".format(self._args, full_url))\n if (stat != 0):\n logging.warning(\"FAIL: Repo '{}' does not have file '{}'\\nReason:{}\\n{}\\n\".format(self._server_loc, full_url, out.encode('utf-8'), err.encode('utf-8')))\n return False\n return True\n\n def getfile(self, rel_path, full_path):\n full_url = os.path.join(self._server_loc, rel_path)\n stat, output, errput = \\\n run_cmd(\"wget {} {} -nc --output-document {}\".format(self._args, full_url, full_path))\n if (stat != 0):\n logging.warning(\"wget failed with output: {} and errput {}\\n\".format(output, errput))\n # wget puts an empty file if it fails.\n try:\n os.remove(full_path)\n except OSError:\n pass\n return False\n else:\n logging.info(\"SUCCESS\\n\")\n return True\n\n def getdirectory(self, rel_path, full_path):\n full_url = os.path.join(self._server_loc, rel_path)\n stat, output, errput = \\\n run_cmd(\"wget {} {} -P {}\".format(self._args, full_url+os.sep+'*', full_path+os.sep))\n if (stat != 0):\n logging.warning(\"wget failed with output: {} and errput {}\\n\".format(output, errput))\n # wget puts an empty file if it fails.\n try:\n os.remove(full_path)\n except OSError:\n pass\n return False\n else:\n logging.info(\"SUCCESS\\n\")\n return True\n", "path": "scripts/lib/CIME/Servers/wget.py"}], "after_files": [{"content": "\"\"\"\nWGET Server class. Interact with a server using WGET protocol\n\"\"\"\n# pylint: disable=super-init-not-called\nfrom CIME.XML.standard_module_setup import *\nfrom CIME.Servers.generic_server import GenericServer\n\nlogger = logging.getLogger(__name__)\n\nclass WGET(GenericServer):\n def __init__(self, address, user='', passwd=''):\n self._args = ''\n if user:\n self._args += \"--user {}\".format(user)\n if passwd:\n self._args += \"--password {}\".format(passwd)\n\n err = run_cmd(\"wget {} --spider {}\".format(self._args, address))[0]\n expect(err == 0,\"Could not connect to repo '{0}'\\nThis is most likely either a proxy, or network issue .\")\n self._server_loc = address\n\n def fileexists(self, rel_path):\n full_url = os.path.join(self._server_loc, rel_path)\n stat, out, err = run_cmd(\"wget {} --spider {}\".format(self._args, full_url))\n if (stat != 0):\n logging.warning(\"FAIL: Repo '{}' does not have file '{}'\\nReason:{}\\n{}\\n\".format(self._server_loc, full_url, out.encode('utf-8'), err.encode('utf-8')))\n return False\n return True\n\n def getfile(self, rel_path, full_path):\n full_url = os.path.join(self._server_loc, rel_path)\n stat, output, errput = \\\n run_cmd(\"wget {} {} -nc --output-document {}\".format(self._args, full_url, full_path))\n if (stat != 0):\n logging.warning(\"wget failed with output: {} and errput {}\\n\".format(output, errput))\n # wget puts an empty file if it fails.\n try:\n os.remove(full_path)\n except OSError:\n pass\n return False\n else:\n logging.info(\"SUCCESS\\n\")\n return True\n\n def getdirectory(self, rel_path, full_path):\n full_url = os.path.join(self._server_loc, rel_path)\n stat, output, errput = \\\n run_cmd(\"wget {} {} -r -N --no-directories \".format(self._args, full_url+os.sep), from_dir=full_path)\n logger.debug(output)\n logger.debug(errput)\n if (stat != 0):\n logging.warning(\"wget failed with output: {} and errput {}\\n\".format(output, errput))\n # wget puts an empty file if it fails.\n try:\n os.remove(full_path)\n except OSError:\n pass\n return False\n else:\n logging.info(\"SUCCESS\\n\")\n return True\n", "path": "scripts/lib/CIME/Servers/wget.py"}]}
1,404
215
gh_patches_debug_19063
rasdani/github-patches
git_diff
streamlink__streamlink-185
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Plugin for Livestream.com not working right? exit's quickly for hls and not at all for normal streams I am trying to get a live stream on livestreamer.com to work and i can't get it to play more then about 35 seconds... When I run this command: streamlink "http://livestream.com/Miraclenet/events/5004281" 270p --fifo --player omxplayer it gives me an error about an swf being needed. When I run this command: streamlink "http://livestream.com/Miraclenet/events/5004281" 270p_hls --fifo --player omxplayer it will play the stream but just for about 35 seconds or so... I kinda don't want to have to restart it every 35 seconds to watch this stream... I'd like it to run until I stop it myself... Any help for this non-python, non-linux guy would be very helpful... btw, this is running on a Raspberry Pi. Just got a nice little 7 inch lcd for it and set it up on my desk to be able to watch it while I work, but can't get it to play for long at a time... (edited to correct commands used) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/streamlink/plugins/livestream.py` Content: ``` 1 import re 2 3 from streamlink.compat import urljoin 4 from streamlink.plugin import Plugin 5 from streamlink.plugin.api import http, validate 6 from streamlink.plugin.api.utils import parse_json 7 from streamlink.stream import AkamaiHDStream, HLSStream 8 9 _url_re = re.compile("http(s)?://(www\.)?livestream.com/") 10 _stream_config_schema = validate.Schema({ 11 "event": { 12 "stream_info": validate.any({ 13 "is_live": bool, 14 "qualities": [{ 15 "bitrate": int, 16 "height": int 17 }], 18 validate.optional("play_url"): validate.url(scheme="http"), 19 validate.optional("m3u8_url"): validate.url( 20 scheme="http", 21 path=validate.endswith(".m3u8") 22 ), 23 }, None) 24 }, 25 validate.optional("playerUri"): validate.text 26 }) 27 _smil_schema = validate.Schema(validate.union({ 28 "http_base": validate.all( 29 validate.xml_find("{http://www.w3.org/2001/SMIL20/Language}head/" 30 "{http://www.w3.org/2001/SMIL20/Language}meta" 31 "[@name='httpBase']"), 32 validate.xml_element(attrib={ 33 "content": validate.text 34 }), 35 validate.get("content") 36 ), 37 "videos": validate.all( 38 validate.xml_findall("{http://www.w3.org/2001/SMIL20/Language}body/" 39 "{http://www.w3.org/2001/SMIL20/Language}switch/" 40 "{http://www.w3.org/2001/SMIL20/Language}video"), 41 [ 42 validate.all( 43 validate.xml_element(attrib={ 44 "src": validate.text, 45 "system-bitrate": validate.all( 46 validate.text, 47 validate.transform(int) 48 ) 49 }), 50 validate.transform( 51 lambda e: (e.attrib["src"], e.attrib["system-bitrate"]) 52 ) 53 ) 54 ], 55 ) 56 })) 57 58 59 class Livestream(Plugin): 60 @classmethod 61 def default_stream_types(cls, streams): 62 return ["akamaihd", "hls"] 63 64 @classmethod 65 def can_handle_url(self, url): 66 return _url_re.match(url) 67 68 def _get_stream_info(self): 69 res = http.get(self.url) 70 match = re.search("window.config = ({.+})", res.text) 71 if match: 72 config = match.group(1) 73 return parse_json(config, "config JSON", 74 schema=_stream_config_schema) 75 76 def _parse_smil(self, url, swf_url): 77 res = http.get(url) 78 smil = http.xml(res, "SMIL config", schema=_smil_schema) 79 80 for src, bitrate in smil["videos"]: 81 url = urljoin(smil["http_base"], src) 82 yield bitrate, AkamaiHDStream(self.session, url, swf=swf_url) 83 84 def _get_streams(self): 85 info = self._get_stream_info() 86 if not info: 87 return 88 89 stream_info = info["event"]["stream_info"] 90 if not (stream_info and stream_info["is_live"]): 91 # Stream is not live 92 return 93 94 play_url = stream_info.get("play_url") 95 if play_url: 96 swf_url = info.get("playerUri") 97 if swf_url: 98 if not swf_url.startswith("http"): 99 swf_url = "http://" + swf_url 100 101 # Work around broken SSL. 102 swf_url = swf_url.replace("https://", "http://") 103 104 qualities = stream_info["qualities"] 105 for bitrate, stream in self._parse_smil(play_url, swf_url): 106 name = "{0:d}k".format(int(bitrate / 1000)) 107 for quality in qualities: 108 if quality["bitrate"] == bitrate: 109 name = "{0}p".format(quality["height"]) 110 111 yield name, stream 112 113 m3u8_url = stream_info.get("m3u8_url") 114 if m3u8_url: 115 streams = HLSStream.parse_variant_playlist(self.session, m3u8_url, 116 namekey="pixels") 117 # TODO: Replace with "yield from" when dropping Python 2. 118 for stream in streams.items(): 119 yield stream 120 121 __plugin__ = Livestream 122 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/streamlink/plugins/livestream.py b/src/streamlink/plugins/livestream.py --- a/src/streamlink/plugins/livestream.py +++ b/src/streamlink/plugins/livestream.py @@ -22,7 +22,10 @@ ), }, None) }, - validate.optional("playerUri"): validate.text + validate.optional("playerUri"): validate.text, + validate.optional("viewerPlusSwfUrl"): validate.url(scheme="http"), + validate.optional("lsPlayerSwfUrl"): validate.text, + validate.optional("hdPlayerSwfUrl"): validate.text }) _smil_schema = validate.Schema(validate.union({ "http_base": validate.all( @@ -93,7 +96,7 @@ play_url = stream_info.get("play_url") if play_url: - swf_url = info.get("playerUri") + swf_url = info.get("playerUri") or info.get("hdPlayerSwfUrl") or info.get("lsPlayerSwfUrl") or info.get("viewerPlusSwfUrl") if swf_url: if not swf_url.startswith("http"): swf_url = "http://" + swf_url
{"golden_diff": "diff --git a/src/streamlink/plugins/livestream.py b/src/streamlink/plugins/livestream.py\n--- a/src/streamlink/plugins/livestream.py\n+++ b/src/streamlink/plugins/livestream.py\n@@ -22,7 +22,10 @@\n ),\n }, None)\n },\n- validate.optional(\"playerUri\"): validate.text\n+ validate.optional(\"playerUri\"): validate.text,\n+ validate.optional(\"viewerPlusSwfUrl\"): validate.url(scheme=\"http\"),\n+ validate.optional(\"lsPlayerSwfUrl\"): validate.text,\n+ validate.optional(\"hdPlayerSwfUrl\"): validate.text\n })\n _smil_schema = validate.Schema(validate.union({\n \"http_base\": validate.all(\n@@ -93,7 +96,7 @@\n \n play_url = stream_info.get(\"play_url\")\n if play_url:\n- swf_url = info.get(\"playerUri\")\n+ swf_url = info.get(\"playerUri\") or info.get(\"hdPlayerSwfUrl\") or info.get(\"lsPlayerSwfUrl\") or info.get(\"viewerPlusSwfUrl\")\n if swf_url:\n if not swf_url.startswith(\"http\"):\n swf_url = \"http://\" + swf_url\n", "issue": "Plugin for Livestream.com not working right? exit's quickly for hls and not at all for normal streams\nI am trying to get a live stream on livestreamer.com to work and i can't get it to play more then about 35 seconds...\r\n\r\nWhen I run this command:\r\nstreamlink \"http://livestream.com/Miraclenet/events/5004281\" 270p --fifo --player omxplayer\r\n\r\nit gives me an error about an swf being needed. When I run this command:\r\nstreamlink \"http://livestream.com/Miraclenet/events/5004281\" 270p_hls --fifo --player omxplayer\r\n\r\nit will play the stream but just for about 35 seconds or so... I kinda don't want to have to restart it every 35 seconds to watch this stream... I'd like it to run until I stop it myself...\r\n\r\nAny help for this non-python, non-linux guy would be very helpful...\r\n\r\nbtw, this is running on a Raspberry Pi. Just got a nice little 7 inch lcd for it and set it up on my desk to be able to watch it while I work, but can't get it to play for long at a time...\r\n\r\n(edited to correct commands used)\n", "before_files": [{"content": "import re\n\nfrom streamlink.compat import urljoin\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http, validate\nfrom streamlink.plugin.api.utils import parse_json\nfrom streamlink.stream import AkamaiHDStream, HLSStream\n\n_url_re = re.compile(\"http(s)?://(www\\.)?livestream.com/\")\n_stream_config_schema = validate.Schema({\n \"event\": {\n \"stream_info\": validate.any({\n \"is_live\": bool,\n \"qualities\": [{\n \"bitrate\": int,\n \"height\": int\n }],\n validate.optional(\"play_url\"): validate.url(scheme=\"http\"),\n validate.optional(\"m3u8_url\"): validate.url(\n scheme=\"http\",\n path=validate.endswith(\".m3u8\")\n ),\n }, None)\n },\n validate.optional(\"playerUri\"): validate.text\n})\n_smil_schema = validate.Schema(validate.union({\n \"http_base\": validate.all(\n validate.xml_find(\"{http://www.w3.org/2001/SMIL20/Language}head/\"\n \"{http://www.w3.org/2001/SMIL20/Language}meta\"\n \"[@name='httpBase']\"),\n validate.xml_element(attrib={\n \"content\": validate.text\n }),\n validate.get(\"content\")\n ),\n \"videos\": validate.all(\n validate.xml_findall(\"{http://www.w3.org/2001/SMIL20/Language}body/\"\n \"{http://www.w3.org/2001/SMIL20/Language}switch/\"\n \"{http://www.w3.org/2001/SMIL20/Language}video\"),\n [\n validate.all(\n validate.xml_element(attrib={\n \"src\": validate.text,\n \"system-bitrate\": validate.all(\n validate.text,\n validate.transform(int)\n )\n }),\n validate.transform(\n lambda e: (e.attrib[\"src\"], e.attrib[\"system-bitrate\"])\n )\n )\n ],\n )\n}))\n\n\nclass Livestream(Plugin):\n @classmethod\n def default_stream_types(cls, streams):\n return [\"akamaihd\", \"hls\"]\n\n @classmethod\n def can_handle_url(self, url):\n return _url_re.match(url)\n\n def _get_stream_info(self):\n res = http.get(self.url)\n match = re.search(\"window.config = ({.+})\", res.text)\n if match:\n config = match.group(1)\n return parse_json(config, \"config JSON\",\n schema=_stream_config_schema)\n\n def _parse_smil(self, url, swf_url):\n res = http.get(url)\n smil = http.xml(res, \"SMIL config\", schema=_smil_schema)\n\n for src, bitrate in smil[\"videos\"]:\n url = urljoin(smil[\"http_base\"], src)\n yield bitrate, AkamaiHDStream(self.session, url, swf=swf_url)\n\n def _get_streams(self):\n info = self._get_stream_info()\n if not info:\n return\n\n stream_info = info[\"event\"][\"stream_info\"]\n if not (stream_info and stream_info[\"is_live\"]):\n # Stream is not live\n return\n\n play_url = stream_info.get(\"play_url\")\n if play_url:\n swf_url = info.get(\"playerUri\")\n if swf_url:\n if not swf_url.startswith(\"http\"):\n swf_url = \"http://\" + swf_url\n\n # Work around broken SSL.\n swf_url = swf_url.replace(\"https://\", \"http://\")\n\n qualities = stream_info[\"qualities\"]\n for bitrate, stream in self._parse_smil(play_url, swf_url):\n name = \"{0:d}k\".format(int(bitrate / 1000))\n for quality in qualities:\n if quality[\"bitrate\"] == bitrate:\n name = \"{0}p\".format(quality[\"height\"])\n\n yield name, stream\n\n m3u8_url = stream_info.get(\"m3u8_url\")\n if m3u8_url:\n streams = HLSStream.parse_variant_playlist(self.session, m3u8_url,\n namekey=\"pixels\")\n # TODO: Replace with \"yield from\" when dropping Python 2.\n for stream in streams.items():\n yield stream\n\n__plugin__ = Livestream\n", "path": "src/streamlink/plugins/livestream.py"}], "after_files": [{"content": "import re\n\nfrom streamlink.compat import urljoin\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http, validate\nfrom streamlink.plugin.api.utils import parse_json\nfrom streamlink.stream import AkamaiHDStream, HLSStream\n\n_url_re = re.compile(\"http(s)?://(www\\.)?livestream.com/\")\n_stream_config_schema = validate.Schema({\n \"event\": {\n \"stream_info\": validate.any({\n \"is_live\": bool,\n \"qualities\": [{\n \"bitrate\": int,\n \"height\": int\n }],\n validate.optional(\"play_url\"): validate.url(scheme=\"http\"),\n validate.optional(\"m3u8_url\"): validate.url(\n scheme=\"http\",\n path=validate.endswith(\".m3u8\")\n ),\n }, None)\n },\n validate.optional(\"playerUri\"): validate.text,\n validate.optional(\"viewerPlusSwfUrl\"): validate.url(scheme=\"http\"),\n validate.optional(\"lsPlayerSwfUrl\"): validate.text,\n validate.optional(\"hdPlayerSwfUrl\"): validate.text\n})\n_smil_schema = validate.Schema(validate.union({\n \"http_base\": validate.all(\n validate.xml_find(\"{http://www.w3.org/2001/SMIL20/Language}head/\"\n \"{http://www.w3.org/2001/SMIL20/Language}meta\"\n \"[@name='httpBase']\"),\n validate.xml_element(attrib={\n \"content\": validate.text\n }),\n validate.get(\"content\")\n ),\n \"videos\": validate.all(\n validate.xml_findall(\"{http://www.w3.org/2001/SMIL20/Language}body/\"\n \"{http://www.w3.org/2001/SMIL20/Language}switch/\"\n \"{http://www.w3.org/2001/SMIL20/Language}video\"),\n [\n validate.all(\n validate.xml_element(attrib={\n \"src\": validate.text,\n \"system-bitrate\": validate.all(\n validate.text,\n validate.transform(int)\n )\n }),\n validate.transform(\n lambda e: (e.attrib[\"src\"], e.attrib[\"system-bitrate\"])\n )\n )\n ],\n )\n}))\n\n\nclass Livestream(Plugin):\n @classmethod\n def default_stream_types(cls, streams):\n return [\"akamaihd\", \"hls\"]\n\n @classmethod\n def can_handle_url(self, url):\n return _url_re.match(url)\n\n def _get_stream_info(self):\n res = http.get(self.url)\n match = re.search(\"window.config = ({.+})\", res.text)\n if match:\n config = match.group(1)\n return parse_json(config, \"config JSON\",\n schema=_stream_config_schema)\n\n def _parse_smil(self, url, swf_url):\n res = http.get(url)\n smil = http.xml(res, \"SMIL config\", schema=_smil_schema)\n\n for src, bitrate in smil[\"videos\"]:\n url = urljoin(smil[\"http_base\"], src)\n yield bitrate, AkamaiHDStream(self.session, url, swf=swf_url)\n\n def _get_streams(self):\n info = self._get_stream_info()\n if not info:\n return\n\n stream_info = info[\"event\"][\"stream_info\"]\n if not (stream_info and stream_info[\"is_live\"]):\n # Stream is not live\n return\n\n play_url = stream_info.get(\"play_url\")\n if play_url:\n swf_url = info.get(\"playerUri\") or info.get(\"hdPlayerSwfUrl\") or info.get(\"lsPlayerSwfUrl\") or info.get(\"viewerPlusSwfUrl\")\n if swf_url:\n if not swf_url.startswith(\"http\"):\n swf_url = \"http://\" + swf_url\n\n # Work around broken SSL.\n swf_url = swf_url.replace(\"https://\", \"http://\")\n\n qualities = stream_info[\"qualities\"]\n for bitrate, stream in self._parse_smil(play_url, swf_url):\n name = \"{0:d}k\".format(int(bitrate / 1000))\n for quality in qualities:\n if quality[\"bitrate\"] == bitrate:\n name = \"{0}p\".format(quality[\"height\"])\n\n yield name, stream\n\n m3u8_url = stream_info.get(\"m3u8_url\")\n if m3u8_url:\n streams = HLSStream.parse_variant_playlist(self.session, m3u8_url,\n namekey=\"pixels\")\n # TODO: Replace with \"yield from\" when dropping Python 2.\n for stream in streams.items():\n yield stream\n\n__plugin__ = Livestream\n", "path": "src/streamlink/plugins/livestream.py"}]}
1,758
268
gh_patches_debug_20419
rasdani/github-patches
git_diff
scrapy__scrapy-2091
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- DEFAULT_REQUEST_HEADERS can't set User-Agent If I use DEFAULT_REQUEST_HEADERS to set `User-Agent`, it doesn't work. ``` python DEFAULT_REQUEST_HEADERS = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.84 Safari/537.36', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate, sdch', 'Accept-Language': 'en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4', } ``` I know I can set `User-Agent` for crawler by using USER_AGENT setting key: ``` python # settings.py USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.84 Safari/537.36' ``` I think this behaviour is not corresponding with the doc [User Agent](http://doc.scrapy.org/en/latest/topics/settings.html#user-agent) and [DefaultHeadersMiddleware](http://doc.scrapy.org/en/latest/topics/downloader-middleware.html#scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware) (_This middleware sets all default requests headers specified in the DEFAULT_REQUEST_HEADERS setting._) If this behaviour is designed, maybe the doc should be modified. Thanks. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scrapy/settings/default_settings.py` Content: ``` 1 """ 2 This module contains the default values for all settings used by Scrapy. 3 4 For more information about these settings you can read the settings 5 documentation in docs/topics/settings.rst 6 7 Scrapy developers, if you add a setting here remember to: 8 9 * add it in alphabetical order 10 * group similar settings without leaving blank lines 11 * add its documentation to the available settings documentation 12 (docs/topics/settings.rst) 13 14 """ 15 16 import os 17 import sys 18 from importlib import import_module 19 from os.path import join, abspath, dirname 20 21 import six 22 23 AJAXCRAWL_ENABLED = False 24 25 AUTOTHROTTLE_ENABLED = False 26 AUTOTHROTTLE_DEBUG = False 27 AUTOTHROTTLE_MAX_DELAY = 60.0 28 AUTOTHROTTLE_START_DELAY = 5.0 29 AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 30 31 BOT_NAME = 'scrapybot' 32 33 CLOSESPIDER_TIMEOUT = 0 34 CLOSESPIDER_PAGECOUNT = 0 35 CLOSESPIDER_ITEMCOUNT = 0 36 CLOSESPIDER_ERRORCOUNT = 0 37 38 COMMANDS_MODULE = '' 39 40 COMPRESSION_ENABLED = True 41 42 CONCURRENT_ITEMS = 100 43 44 CONCURRENT_REQUESTS = 16 45 CONCURRENT_REQUESTS_PER_DOMAIN = 8 46 CONCURRENT_REQUESTS_PER_IP = 0 47 48 COOKIES_ENABLED = True 49 COOKIES_DEBUG = False 50 51 DEFAULT_ITEM_CLASS = 'scrapy.item.Item' 52 53 DEFAULT_REQUEST_HEADERS = { 54 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 55 'Accept-Language': 'en', 56 } 57 58 DEPTH_LIMIT = 0 59 DEPTH_STATS = True 60 DEPTH_PRIORITY = 0 61 62 DNSCACHE_ENABLED = True 63 DNSCACHE_SIZE = 10000 64 DNS_TIMEOUT = 60 65 66 DOWNLOAD_DELAY = 0 67 68 DOWNLOAD_HANDLERS = {} 69 DOWNLOAD_HANDLERS_BASE = { 70 'file': 'scrapy.core.downloader.handlers.file.FileDownloadHandler', 71 'http': 'scrapy.core.downloader.handlers.http.HTTPDownloadHandler', 72 'https': 'scrapy.core.downloader.handlers.http.HTTPDownloadHandler', 73 's3': 'scrapy.core.downloader.handlers.s3.S3DownloadHandler', 74 'ftp': 'scrapy.core.downloader.handlers.ftp.FTPDownloadHandler', 75 } 76 77 DOWNLOAD_TIMEOUT = 180 # 3mins 78 79 DOWNLOAD_MAXSIZE = 1024*1024*1024 # 1024m 80 DOWNLOAD_WARNSIZE = 32*1024*1024 # 32m 81 82 DOWNLOADER = 'scrapy.core.downloader.Downloader' 83 84 DOWNLOADER_HTTPCLIENTFACTORY = 'scrapy.core.downloader.webclient.ScrapyHTTPClientFactory' 85 DOWNLOADER_CLIENTCONTEXTFACTORY = 'scrapy.core.downloader.contextfactory.ScrapyClientContextFactory' 86 DOWNLOADER_CLIENT_TLS_METHOD = 'TLS' # Use highest TLS/SSL protocol version supported by the platform, 87 # also allowing negotiation 88 89 DOWNLOADER_MIDDLEWARES = {} 90 91 DOWNLOADER_MIDDLEWARES_BASE = { 92 # Engine side 93 'scrapy.downloadermiddlewares.robotstxt.RobotsTxtMiddleware': 100, 94 'scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware': 300, 95 'scrapy.downloadermiddlewares.downloadtimeout.DownloadTimeoutMiddleware': 350, 96 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': 400, 97 'scrapy.downloadermiddlewares.retry.RetryMiddleware': 500, 98 'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware': 550, 99 'scrapy.downloadermiddlewares.ajaxcrawl.AjaxCrawlMiddleware': 560, 100 'scrapy.downloadermiddlewares.redirect.MetaRefreshMiddleware': 580, 101 'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 590, 102 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware': 600, 103 'scrapy.downloadermiddlewares.cookies.CookiesMiddleware': 700, 104 'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': 750, 105 'scrapy.downloadermiddlewares.chunked.ChunkedTransferMiddleware': 830, 106 'scrapy.downloadermiddlewares.stats.DownloaderStats': 850, 107 'scrapy.downloadermiddlewares.httpcache.HttpCacheMiddleware': 900, 108 # Downloader side 109 } 110 111 DOWNLOADER_STATS = True 112 113 DUPEFILTER_CLASS = 'scrapy.dupefilters.RFPDupeFilter' 114 115 try: 116 EDITOR = os.environ['EDITOR'] 117 except KeyError: 118 if sys.platform == 'win32': 119 EDITOR = '%s -m idlelib.idle' 120 else: 121 EDITOR = 'vi' 122 123 EXTENSIONS = {} 124 125 EXTENSIONS_BASE = { 126 'scrapy.extensions.corestats.CoreStats': 0, 127 'scrapy.extensions.telnet.TelnetConsole': 0, 128 'scrapy.extensions.memusage.MemoryUsage': 0, 129 'scrapy.extensions.memdebug.MemoryDebugger': 0, 130 'scrapy.extensions.closespider.CloseSpider': 0, 131 'scrapy.extensions.feedexport.FeedExporter': 0, 132 'scrapy.extensions.logstats.LogStats': 0, 133 'scrapy.extensions.spiderstate.SpiderState': 0, 134 'scrapy.extensions.throttle.AutoThrottle': 0, 135 } 136 137 FEED_TEMPDIR = None 138 FEED_URI = None 139 FEED_URI_PARAMS = None # a function to extend uri arguments 140 FEED_FORMAT = 'jsonlines' 141 FEED_STORE_EMPTY = False 142 FEED_EXPORT_FIELDS = None 143 FEED_STORAGES = {} 144 FEED_STORAGES_BASE = { 145 '': 'scrapy.extensions.feedexport.FileFeedStorage', 146 'file': 'scrapy.extensions.feedexport.FileFeedStorage', 147 'stdout': 'scrapy.extensions.feedexport.StdoutFeedStorage', 148 's3': 'scrapy.extensions.feedexport.S3FeedStorage', 149 'ftp': 'scrapy.extensions.feedexport.FTPFeedStorage', 150 } 151 FEED_EXPORTERS = {} 152 FEED_EXPORTERS_BASE = { 153 'json': 'scrapy.exporters.JsonItemExporter', 154 'jsonlines': 'scrapy.exporters.JsonLinesItemExporter', 155 'jl': 'scrapy.exporters.JsonLinesItemExporter', 156 'csv': 'scrapy.exporters.CsvItemExporter', 157 'xml': 'scrapy.exporters.XmlItemExporter', 158 'marshal': 'scrapy.exporters.MarshalItemExporter', 159 'pickle': 'scrapy.exporters.PickleItemExporter', 160 } 161 162 FILES_STORE_S3_ACL = 'private' 163 FILES_EXPIRES = 90 164 FILES_URLS_FIELD = 'file_urls' 165 FILES_RESULT_FIELD = 'files' 166 167 HTTPCACHE_ENABLED = False 168 HTTPCACHE_DIR = 'httpcache' 169 HTTPCACHE_IGNORE_MISSING = False 170 HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' 171 HTTPCACHE_EXPIRATION_SECS = 0 172 HTTPCACHE_ALWAYS_STORE = False 173 HTTPCACHE_IGNORE_HTTP_CODES = [] 174 HTTPCACHE_IGNORE_SCHEMES = ['file'] 175 HTTPCACHE_IGNORE_RESPONSE_CACHE_CONTROLS = [] 176 HTTPCACHE_DBM_MODULE = 'anydbm' if six.PY2 else 'dbm' 177 HTTPCACHE_POLICY = 'scrapy.extensions.httpcache.DummyPolicy' 178 HTTPCACHE_GZIP = False 179 180 HTTPPROXY_AUTH_ENCODING = 'latin-1' 181 182 IMAGES_MIN_WIDTH = 0 183 IMAGES_MIN_HEIGHT = 0 184 IMAGES_EXPIRES = 90 185 IMAGES_THUMBS = {} 186 IMAGES_URLS_FIELD = 'image_urls' 187 IMAGES_RESULT_FIELD = 'images' 188 189 ITEM_PROCESSOR = 'scrapy.pipelines.ItemPipelineManager' 190 191 ITEM_PIPELINES = {} 192 ITEM_PIPELINES_BASE = {} 193 194 LOG_ENABLED = True 195 LOG_ENCODING = 'utf-8' 196 LOG_FORMATTER = 'scrapy.logformatter.LogFormatter' 197 LOG_FORMAT = '%(asctime)s [%(name)s] %(levelname)s: %(message)s' 198 LOG_DATEFORMAT = '%Y-%m-%d %H:%M:%S' 199 LOG_STDOUT = False 200 LOG_LEVEL = 'DEBUG' 201 LOG_FILE = None 202 203 LOG_UNSERIALIZABLE_REQUESTS = False 204 205 LOGSTATS_INTERVAL = 60.0 206 207 MAIL_HOST = 'localhost' 208 MAIL_PORT = 25 209 MAIL_FROM = 'scrapy@localhost' 210 MAIL_PASS = None 211 MAIL_USER = None 212 213 MEMDEBUG_ENABLED = False # enable memory debugging 214 MEMDEBUG_NOTIFY = [] # send memory debugging report by mail at engine shutdown 215 216 MEMUSAGE_CHECK_INTERVAL_SECONDS = 60.0 217 MEMUSAGE_ENABLED = False 218 MEMUSAGE_LIMIT_MB = 0 219 MEMUSAGE_NOTIFY_MAIL = [] 220 MEMUSAGE_REPORT = False 221 MEMUSAGE_WARNING_MB = 0 222 223 METAREFRESH_ENABLED = True 224 METAREFRESH_MAXDELAY = 100 225 226 NEWSPIDER_MODULE = '' 227 228 RANDOMIZE_DOWNLOAD_DELAY = True 229 230 REACTOR_THREADPOOL_MAXSIZE = 10 231 232 REDIRECT_ENABLED = True 233 REDIRECT_MAX_TIMES = 20 # uses Firefox default setting 234 REDIRECT_PRIORITY_ADJUST = +2 235 236 REFERER_ENABLED = True 237 238 RETRY_ENABLED = True 239 RETRY_TIMES = 2 # initial response + 2 retries = 3 requests 240 RETRY_HTTP_CODES = [500, 502, 503, 504, 408] 241 RETRY_PRIORITY_ADJUST = -1 242 243 ROBOTSTXT_OBEY = False 244 245 SCHEDULER = 'scrapy.core.scheduler.Scheduler' 246 SCHEDULER_DISK_QUEUE = 'scrapy.squeues.PickleLifoDiskQueue' 247 SCHEDULER_MEMORY_QUEUE = 'scrapy.squeues.LifoMemoryQueue' 248 SCHEDULER_PRIORITY_QUEUE = 'queuelib.PriorityQueue' 249 250 SPIDER_LOADER_CLASS = 'scrapy.spiderloader.SpiderLoader' 251 252 SPIDER_MIDDLEWARES = {} 253 254 SPIDER_MIDDLEWARES_BASE = { 255 # Engine side 256 'scrapy.spidermiddlewares.httperror.HttpErrorMiddleware': 50, 257 'scrapy.spidermiddlewares.offsite.OffsiteMiddleware': 500, 258 'scrapy.spidermiddlewares.referer.RefererMiddleware': 700, 259 'scrapy.spidermiddlewares.urllength.UrlLengthMiddleware': 800, 260 'scrapy.spidermiddlewares.depth.DepthMiddleware': 900, 261 # Spider side 262 } 263 264 SPIDER_MODULES = [] 265 266 STATS_CLASS = 'scrapy.statscollectors.MemoryStatsCollector' 267 STATS_DUMP = True 268 269 STATSMAILER_RCPTS = [] 270 271 TEMPLATES_DIR = abspath(join(dirname(__file__), '..', 'templates')) 272 273 URLLENGTH_LIMIT = 2083 274 275 USER_AGENT = 'Scrapy/%s (+http://scrapy.org)' % import_module('scrapy').__version__ 276 277 TELNETCONSOLE_ENABLED = 1 278 TELNETCONSOLE_PORT = [6023, 6073] 279 TELNETCONSOLE_HOST = '127.0.0.1' 280 281 SPIDER_CONTRACTS = {} 282 SPIDER_CONTRACTS_BASE = { 283 'scrapy.contracts.default.UrlContract': 1, 284 'scrapy.contracts.default.ReturnsContract': 2, 285 'scrapy.contracts.default.ScrapesContract': 3, 286 } 287 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scrapy/settings/default_settings.py b/scrapy/settings/default_settings.py --- a/scrapy/settings/default_settings.py +++ b/scrapy/settings/default_settings.py @@ -93,9 +93,9 @@ 'scrapy.downloadermiddlewares.robotstxt.RobotsTxtMiddleware': 100, 'scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware': 300, 'scrapy.downloadermiddlewares.downloadtimeout.DownloadTimeoutMiddleware': 350, - 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': 400, - 'scrapy.downloadermiddlewares.retry.RetryMiddleware': 500, - 'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware': 550, + 'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware': 400, + 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': 500, + 'scrapy.downloadermiddlewares.retry.RetryMiddleware': 550, 'scrapy.downloadermiddlewares.ajaxcrawl.AjaxCrawlMiddleware': 560, 'scrapy.downloadermiddlewares.redirect.MetaRefreshMiddleware': 580, 'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 590,
{"golden_diff": "diff --git a/scrapy/settings/default_settings.py b/scrapy/settings/default_settings.py\n--- a/scrapy/settings/default_settings.py\n+++ b/scrapy/settings/default_settings.py\n@@ -93,9 +93,9 @@\n 'scrapy.downloadermiddlewares.robotstxt.RobotsTxtMiddleware': 100,\n 'scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware': 300,\n 'scrapy.downloadermiddlewares.downloadtimeout.DownloadTimeoutMiddleware': 350,\n- 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': 400,\n- 'scrapy.downloadermiddlewares.retry.RetryMiddleware': 500,\n- 'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware': 550,\n+ 'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware': 400,\n+ 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': 500,\n+ 'scrapy.downloadermiddlewares.retry.RetryMiddleware': 550,\n 'scrapy.downloadermiddlewares.ajaxcrawl.AjaxCrawlMiddleware': 560,\n 'scrapy.downloadermiddlewares.redirect.MetaRefreshMiddleware': 580,\n 'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 590,\n", "issue": "DEFAULT_REQUEST_HEADERS can't set User-Agent\nIf I use DEFAULT_REQUEST_HEADERS to set `User-Agent`, it doesn't work.\n\n``` python\nDEFAULT_REQUEST_HEADERS = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.84 Safari/537.36',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate, sdch',\n 'Accept-Language': 'en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4',\n}\n```\n\nI know I can set `User-Agent` for crawler by using USER_AGENT setting key:\n\n``` python\n# settings.py\nUSER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.84 Safari/537.36'\n```\n\nI think this behaviour is not corresponding with the doc [User Agent](http://doc.scrapy.org/en/latest/topics/settings.html#user-agent) and [DefaultHeadersMiddleware](http://doc.scrapy.org/en/latest/topics/downloader-middleware.html#scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware) (_This middleware sets all default requests headers specified in the DEFAULT_REQUEST_HEADERS setting._)\n\nIf this behaviour is designed, maybe the doc should be modified.\n\nThanks.\n\n", "before_files": [{"content": "\"\"\"\nThis module contains the default values for all settings used by Scrapy.\n\nFor more information about these settings you can read the settings\ndocumentation in docs/topics/settings.rst\n\nScrapy developers, if you add a setting here remember to:\n\n* add it in alphabetical order\n* group similar settings without leaving blank lines\n* add its documentation to the available settings documentation\n (docs/topics/settings.rst)\n\n\"\"\"\n\nimport os\nimport sys\nfrom importlib import import_module\nfrom os.path import join, abspath, dirname\n\nimport six\n\nAJAXCRAWL_ENABLED = False\n\nAUTOTHROTTLE_ENABLED = False\nAUTOTHROTTLE_DEBUG = False\nAUTOTHROTTLE_MAX_DELAY = 60.0\nAUTOTHROTTLE_START_DELAY = 5.0\nAUTOTHROTTLE_TARGET_CONCURRENCY = 1.0\n\nBOT_NAME = 'scrapybot'\n\nCLOSESPIDER_TIMEOUT = 0\nCLOSESPIDER_PAGECOUNT = 0\nCLOSESPIDER_ITEMCOUNT = 0\nCLOSESPIDER_ERRORCOUNT = 0\n\nCOMMANDS_MODULE = ''\n\nCOMPRESSION_ENABLED = True\n\nCONCURRENT_ITEMS = 100\n\nCONCURRENT_REQUESTS = 16\nCONCURRENT_REQUESTS_PER_DOMAIN = 8\nCONCURRENT_REQUESTS_PER_IP = 0\n\nCOOKIES_ENABLED = True\nCOOKIES_DEBUG = False\n\nDEFAULT_ITEM_CLASS = 'scrapy.item.Item'\n\nDEFAULT_REQUEST_HEADERS = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Language': 'en',\n}\n\nDEPTH_LIMIT = 0\nDEPTH_STATS = True\nDEPTH_PRIORITY = 0\n\nDNSCACHE_ENABLED = True\nDNSCACHE_SIZE = 10000\nDNS_TIMEOUT = 60\n\nDOWNLOAD_DELAY = 0\n\nDOWNLOAD_HANDLERS = {}\nDOWNLOAD_HANDLERS_BASE = {\n 'file': 'scrapy.core.downloader.handlers.file.FileDownloadHandler',\n 'http': 'scrapy.core.downloader.handlers.http.HTTPDownloadHandler',\n 'https': 'scrapy.core.downloader.handlers.http.HTTPDownloadHandler',\n 's3': 'scrapy.core.downloader.handlers.s3.S3DownloadHandler',\n 'ftp': 'scrapy.core.downloader.handlers.ftp.FTPDownloadHandler',\n}\n\nDOWNLOAD_TIMEOUT = 180 # 3mins\n\nDOWNLOAD_MAXSIZE = 1024*1024*1024 # 1024m\nDOWNLOAD_WARNSIZE = 32*1024*1024 # 32m\n\nDOWNLOADER = 'scrapy.core.downloader.Downloader'\n\nDOWNLOADER_HTTPCLIENTFACTORY = 'scrapy.core.downloader.webclient.ScrapyHTTPClientFactory'\nDOWNLOADER_CLIENTCONTEXTFACTORY = 'scrapy.core.downloader.contextfactory.ScrapyClientContextFactory'\nDOWNLOADER_CLIENT_TLS_METHOD = 'TLS' # Use highest TLS/SSL protocol version supported by the platform,\n # also allowing negotiation\n\nDOWNLOADER_MIDDLEWARES = {}\n\nDOWNLOADER_MIDDLEWARES_BASE = {\n # Engine side\n 'scrapy.downloadermiddlewares.robotstxt.RobotsTxtMiddleware': 100,\n 'scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware': 300,\n 'scrapy.downloadermiddlewares.downloadtimeout.DownloadTimeoutMiddleware': 350,\n 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': 400,\n 'scrapy.downloadermiddlewares.retry.RetryMiddleware': 500,\n 'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware': 550,\n 'scrapy.downloadermiddlewares.ajaxcrawl.AjaxCrawlMiddleware': 560,\n 'scrapy.downloadermiddlewares.redirect.MetaRefreshMiddleware': 580,\n 'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 590,\n 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware': 600,\n 'scrapy.downloadermiddlewares.cookies.CookiesMiddleware': 700,\n 'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': 750,\n 'scrapy.downloadermiddlewares.chunked.ChunkedTransferMiddleware': 830,\n 'scrapy.downloadermiddlewares.stats.DownloaderStats': 850,\n 'scrapy.downloadermiddlewares.httpcache.HttpCacheMiddleware': 900,\n # Downloader side\n}\n\nDOWNLOADER_STATS = True\n\nDUPEFILTER_CLASS = 'scrapy.dupefilters.RFPDupeFilter'\n\ntry:\n EDITOR = os.environ['EDITOR']\nexcept KeyError:\n if sys.platform == 'win32':\n EDITOR = '%s -m idlelib.idle'\n else:\n EDITOR = 'vi'\n\nEXTENSIONS = {}\n\nEXTENSIONS_BASE = {\n 'scrapy.extensions.corestats.CoreStats': 0,\n 'scrapy.extensions.telnet.TelnetConsole': 0,\n 'scrapy.extensions.memusage.MemoryUsage': 0,\n 'scrapy.extensions.memdebug.MemoryDebugger': 0,\n 'scrapy.extensions.closespider.CloseSpider': 0,\n 'scrapy.extensions.feedexport.FeedExporter': 0,\n 'scrapy.extensions.logstats.LogStats': 0,\n 'scrapy.extensions.spiderstate.SpiderState': 0,\n 'scrapy.extensions.throttle.AutoThrottle': 0,\n}\n\nFEED_TEMPDIR = None\nFEED_URI = None\nFEED_URI_PARAMS = None # a function to extend uri arguments\nFEED_FORMAT = 'jsonlines'\nFEED_STORE_EMPTY = False\nFEED_EXPORT_FIELDS = None\nFEED_STORAGES = {}\nFEED_STORAGES_BASE = {\n '': 'scrapy.extensions.feedexport.FileFeedStorage',\n 'file': 'scrapy.extensions.feedexport.FileFeedStorage',\n 'stdout': 'scrapy.extensions.feedexport.StdoutFeedStorage',\n 's3': 'scrapy.extensions.feedexport.S3FeedStorage',\n 'ftp': 'scrapy.extensions.feedexport.FTPFeedStorage',\n}\nFEED_EXPORTERS = {}\nFEED_EXPORTERS_BASE = {\n 'json': 'scrapy.exporters.JsonItemExporter',\n 'jsonlines': 'scrapy.exporters.JsonLinesItemExporter',\n 'jl': 'scrapy.exporters.JsonLinesItemExporter',\n 'csv': 'scrapy.exporters.CsvItemExporter',\n 'xml': 'scrapy.exporters.XmlItemExporter',\n 'marshal': 'scrapy.exporters.MarshalItemExporter',\n 'pickle': 'scrapy.exporters.PickleItemExporter',\n}\n\nFILES_STORE_S3_ACL = 'private'\nFILES_EXPIRES = 90\nFILES_URLS_FIELD = 'file_urls'\nFILES_RESULT_FIELD = 'files'\n\nHTTPCACHE_ENABLED = False\nHTTPCACHE_DIR = 'httpcache'\nHTTPCACHE_IGNORE_MISSING = False\nHTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'\nHTTPCACHE_EXPIRATION_SECS = 0\nHTTPCACHE_ALWAYS_STORE = False\nHTTPCACHE_IGNORE_HTTP_CODES = []\nHTTPCACHE_IGNORE_SCHEMES = ['file']\nHTTPCACHE_IGNORE_RESPONSE_CACHE_CONTROLS = []\nHTTPCACHE_DBM_MODULE = 'anydbm' if six.PY2 else 'dbm'\nHTTPCACHE_POLICY = 'scrapy.extensions.httpcache.DummyPolicy'\nHTTPCACHE_GZIP = False\n\nHTTPPROXY_AUTH_ENCODING = 'latin-1'\n\nIMAGES_MIN_WIDTH = 0\nIMAGES_MIN_HEIGHT = 0\nIMAGES_EXPIRES = 90\nIMAGES_THUMBS = {}\nIMAGES_URLS_FIELD = 'image_urls'\nIMAGES_RESULT_FIELD = 'images'\n\nITEM_PROCESSOR = 'scrapy.pipelines.ItemPipelineManager'\n\nITEM_PIPELINES = {}\nITEM_PIPELINES_BASE = {}\n\nLOG_ENABLED = True\nLOG_ENCODING = 'utf-8'\nLOG_FORMATTER = 'scrapy.logformatter.LogFormatter'\nLOG_FORMAT = '%(asctime)s [%(name)s] %(levelname)s: %(message)s'\nLOG_DATEFORMAT = '%Y-%m-%d %H:%M:%S'\nLOG_STDOUT = False\nLOG_LEVEL = 'DEBUG'\nLOG_FILE = None\n\nLOG_UNSERIALIZABLE_REQUESTS = False\n\nLOGSTATS_INTERVAL = 60.0\n\nMAIL_HOST = 'localhost'\nMAIL_PORT = 25\nMAIL_FROM = 'scrapy@localhost'\nMAIL_PASS = None\nMAIL_USER = None\n\nMEMDEBUG_ENABLED = False # enable memory debugging\nMEMDEBUG_NOTIFY = [] # send memory debugging report by mail at engine shutdown\n\nMEMUSAGE_CHECK_INTERVAL_SECONDS = 60.0\nMEMUSAGE_ENABLED = False\nMEMUSAGE_LIMIT_MB = 0\nMEMUSAGE_NOTIFY_MAIL = []\nMEMUSAGE_REPORT = False\nMEMUSAGE_WARNING_MB = 0\n\nMETAREFRESH_ENABLED = True\nMETAREFRESH_MAXDELAY = 100\n\nNEWSPIDER_MODULE = ''\n\nRANDOMIZE_DOWNLOAD_DELAY = True\n\nREACTOR_THREADPOOL_MAXSIZE = 10\n\nREDIRECT_ENABLED = True\nREDIRECT_MAX_TIMES = 20 # uses Firefox default setting\nREDIRECT_PRIORITY_ADJUST = +2\n\nREFERER_ENABLED = True\n\nRETRY_ENABLED = True\nRETRY_TIMES = 2 # initial response + 2 retries = 3 requests\nRETRY_HTTP_CODES = [500, 502, 503, 504, 408]\nRETRY_PRIORITY_ADJUST = -1\n\nROBOTSTXT_OBEY = False\n\nSCHEDULER = 'scrapy.core.scheduler.Scheduler'\nSCHEDULER_DISK_QUEUE = 'scrapy.squeues.PickleLifoDiskQueue'\nSCHEDULER_MEMORY_QUEUE = 'scrapy.squeues.LifoMemoryQueue'\nSCHEDULER_PRIORITY_QUEUE = 'queuelib.PriorityQueue'\n\nSPIDER_LOADER_CLASS = 'scrapy.spiderloader.SpiderLoader'\n\nSPIDER_MIDDLEWARES = {}\n\nSPIDER_MIDDLEWARES_BASE = {\n # Engine side\n 'scrapy.spidermiddlewares.httperror.HttpErrorMiddleware': 50,\n 'scrapy.spidermiddlewares.offsite.OffsiteMiddleware': 500,\n 'scrapy.spidermiddlewares.referer.RefererMiddleware': 700,\n 'scrapy.spidermiddlewares.urllength.UrlLengthMiddleware': 800,\n 'scrapy.spidermiddlewares.depth.DepthMiddleware': 900,\n # Spider side\n}\n\nSPIDER_MODULES = []\n\nSTATS_CLASS = 'scrapy.statscollectors.MemoryStatsCollector'\nSTATS_DUMP = True\n\nSTATSMAILER_RCPTS = []\n\nTEMPLATES_DIR = abspath(join(dirname(__file__), '..', 'templates'))\n\nURLLENGTH_LIMIT = 2083\n\nUSER_AGENT = 'Scrapy/%s (+http://scrapy.org)' % import_module('scrapy').__version__\n\nTELNETCONSOLE_ENABLED = 1\nTELNETCONSOLE_PORT = [6023, 6073]\nTELNETCONSOLE_HOST = '127.0.0.1'\n\nSPIDER_CONTRACTS = {}\nSPIDER_CONTRACTS_BASE = {\n 'scrapy.contracts.default.UrlContract': 1,\n 'scrapy.contracts.default.ReturnsContract': 2,\n 'scrapy.contracts.default.ScrapesContract': 3,\n}\n", "path": "scrapy/settings/default_settings.py"}], "after_files": [{"content": "\"\"\"\nThis module contains the default values for all settings used by Scrapy.\n\nFor more information about these settings you can read the settings\ndocumentation in docs/topics/settings.rst\n\nScrapy developers, if you add a setting here remember to:\n\n* add it in alphabetical order\n* group similar settings without leaving blank lines\n* add its documentation to the available settings documentation\n (docs/topics/settings.rst)\n\n\"\"\"\n\nimport os\nimport sys\nfrom importlib import import_module\nfrom os.path import join, abspath, dirname\n\nimport six\n\nAJAXCRAWL_ENABLED = False\n\nAUTOTHROTTLE_ENABLED = False\nAUTOTHROTTLE_DEBUG = False\nAUTOTHROTTLE_MAX_DELAY = 60.0\nAUTOTHROTTLE_START_DELAY = 5.0\nAUTOTHROTTLE_TARGET_CONCURRENCY = 1.0\n\nBOT_NAME = 'scrapybot'\n\nCLOSESPIDER_TIMEOUT = 0\nCLOSESPIDER_PAGECOUNT = 0\nCLOSESPIDER_ITEMCOUNT = 0\nCLOSESPIDER_ERRORCOUNT = 0\n\nCOMMANDS_MODULE = ''\n\nCOMPRESSION_ENABLED = True\n\nCONCURRENT_ITEMS = 100\n\nCONCURRENT_REQUESTS = 16\nCONCURRENT_REQUESTS_PER_DOMAIN = 8\nCONCURRENT_REQUESTS_PER_IP = 0\n\nCOOKIES_ENABLED = True\nCOOKIES_DEBUG = False\n\nDEFAULT_ITEM_CLASS = 'scrapy.item.Item'\n\nDEFAULT_REQUEST_HEADERS = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Language': 'en',\n}\n\nDEPTH_LIMIT = 0\nDEPTH_STATS = True\nDEPTH_PRIORITY = 0\n\nDNSCACHE_ENABLED = True\nDNSCACHE_SIZE = 10000\nDNS_TIMEOUT = 60\n\nDOWNLOAD_DELAY = 0\n\nDOWNLOAD_HANDLERS = {}\nDOWNLOAD_HANDLERS_BASE = {\n 'file': 'scrapy.core.downloader.handlers.file.FileDownloadHandler',\n 'http': 'scrapy.core.downloader.handlers.http.HTTPDownloadHandler',\n 'https': 'scrapy.core.downloader.handlers.http.HTTPDownloadHandler',\n 's3': 'scrapy.core.downloader.handlers.s3.S3DownloadHandler',\n 'ftp': 'scrapy.core.downloader.handlers.ftp.FTPDownloadHandler',\n}\n\nDOWNLOAD_TIMEOUT = 180 # 3mins\n\nDOWNLOAD_MAXSIZE = 1024*1024*1024 # 1024m\nDOWNLOAD_WARNSIZE = 32*1024*1024 # 32m\n\nDOWNLOADER = 'scrapy.core.downloader.Downloader'\n\nDOWNLOADER_HTTPCLIENTFACTORY = 'scrapy.core.downloader.webclient.ScrapyHTTPClientFactory'\nDOWNLOADER_CLIENTCONTEXTFACTORY = 'scrapy.core.downloader.contextfactory.ScrapyClientContextFactory'\nDOWNLOADER_CLIENT_TLS_METHOD = 'TLS' # Use highest TLS/SSL protocol version supported by the platform,\n # also allowing negotiation\n\nDOWNLOADER_MIDDLEWARES = {}\n\nDOWNLOADER_MIDDLEWARES_BASE = {\n # Engine side\n 'scrapy.downloadermiddlewares.robotstxt.RobotsTxtMiddleware': 100,\n 'scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware': 300,\n 'scrapy.downloadermiddlewares.downloadtimeout.DownloadTimeoutMiddleware': 350,\n 'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware': 400,\n 'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': 500,\n 'scrapy.downloadermiddlewares.retry.RetryMiddleware': 550,\n 'scrapy.downloadermiddlewares.ajaxcrawl.AjaxCrawlMiddleware': 560,\n 'scrapy.downloadermiddlewares.redirect.MetaRefreshMiddleware': 580,\n 'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 590,\n 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware': 600,\n 'scrapy.downloadermiddlewares.cookies.CookiesMiddleware': 700,\n 'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': 750,\n 'scrapy.downloadermiddlewares.chunked.ChunkedTransferMiddleware': 830,\n 'scrapy.downloadermiddlewares.stats.DownloaderStats': 850,\n 'scrapy.downloadermiddlewares.httpcache.HttpCacheMiddleware': 900,\n # Downloader side\n}\n\nDOWNLOADER_STATS = True\n\nDUPEFILTER_CLASS = 'scrapy.dupefilters.RFPDupeFilter'\n\ntry:\n EDITOR = os.environ['EDITOR']\nexcept KeyError:\n if sys.platform == 'win32':\n EDITOR = '%s -m idlelib.idle'\n else:\n EDITOR = 'vi'\n\nEXTENSIONS = {}\n\nEXTENSIONS_BASE = {\n 'scrapy.extensions.corestats.CoreStats': 0,\n 'scrapy.extensions.telnet.TelnetConsole': 0,\n 'scrapy.extensions.memusage.MemoryUsage': 0,\n 'scrapy.extensions.memdebug.MemoryDebugger': 0,\n 'scrapy.extensions.closespider.CloseSpider': 0,\n 'scrapy.extensions.feedexport.FeedExporter': 0,\n 'scrapy.extensions.logstats.LogStats': 0,\n 'scrapy.extensions.spiderstate.SpiderState': 0,\n 'scrapy.extensions.throttle.AutoThrottle': 0,\n}\n\nFEED_TEMPDIR = None\nFEED_URI = None\nFEED_URI_PARAMS = None # a function to extend uri arguments\nFEED_FORMAT = 'jsonlines'\nFEED_STORE_EMPTY = False\nFEED_EXPORT_FIELDS = None\nFEED_STORAGES = {}\nFEED_STORAGES_BASE = {\n '': 'scrapy.extensions.feedexport.FileFeedStorage',\n 'file': 'scrapy.extensions.feedexport.FileFeedStorage',\n 'stdout': 'scrapy.extensions.feedexport.StdoutFeedStorage',\n 's3': 'scrapy.extensions.feedexport.S3FeedStorage',\n 'ftp': 'scrapy.extensions.feedexport.FTPFeedStorage',\n}\nFEED_EXPORTERS = {}\nFEED_EXPORTERS_BASE = {\n 'json': 'scrapy.exporters.JsonItemExporter',\n 'jsonlines': 'scrapy.exporters.JsonLinesItemExporter',\n 'jl': 'scrapy.exporters.JsonLinesItemExporter',\n 'csv': 'scrapy.exporters.CsvItemExporter',\n 'xml': 'scrapy.exporters.XmlItemExporter',\n 'marshal': 'scrapy.exporters.MarshalItemExporter',\n 'pickle': 'scrapy.exporters.PickleItemExporter',\n}\n\nFILES_STORE_S3_ACL = 'private'\nFILES_EXPIRES = 90\nFILES_URLS_FIELD = 'file_urls'\nFILES_RESULT_FIELD = 'files'\n\nHTTPCACHE_ENABLED = False\nHTTPCACHE_DIR = 'httpcache'\nHTTPCACHE_IGNORE_MISSING = False\nHTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'\nHTTPCACHE_EXPIRATION_SECS = 0\nHTTPCACHE_ALWAYS_STORE = False\nHTTPCACHE_IGNORE_HTTP_CODES = []\nHTTPCACHE_IGNORE_SCHEMES = ['file']\nHTTPCACHE_IGNORE_RESPONSE_CACHE_CONTROLS = []\nHTTPCACHE_DBM_MODULE = 'anydbm' if six.PY2 else 'dbm'\nHTTPCACHE_POLICY = 'scrapy.extensions.httpcache.DummyPolicy'\nHTTPCACHE_GZIP = False\n\nHTTPPROXY_AUTH_ENCODING = 'latin-1'\n\nIMAGES_MIN_WIDTH = 0\nIMAGES_MIN_HEIGHT = 0\nIMAGES_EXPIRES = 90\nIMAGES_THUMBS = {}\nIMAGES_URLS_FIELD = 'image_urls'\nIMAGES_RESULT_FIELD = 'images'\n\nITEM_PROCESSOR = 'scrapy.pipelines.ItemPipelineManager'\n\nITEM_PIPELINES = {}\nITEM_PIPELINES_BASE = {}\n\nLOG_ENABLED = True\nLOG_ENCODING = 'utf-8'\nLOG_FORMATTER = 'scrapy.logformatter.LogFormatter'\nLOG_FORMAT = '%(asctime)s [%(name)s] %(levelname)s: %(message)s'\nLOG_DATEFORMAT = '%Y-%m-%d %H:%M:%S'\nLOG_STDOUT = False\nLOG_LEVEL = 'DEBUG'\nLOG_FILE = None\n\nLOG_UNSERIALIZABLE_REQUESTS = False\n\nLOGSTATS_INTERVAL = 60.0\n\nMAIL_HOST = 'localhost'\nMAIL_PORT = 25\nMAIL_FROM = 'scrapy@localhost'\nMAIL_PASS = None\nMAIL_USER = None\n\nMEMDEBUG_ENABLED = False # enable memory debugging\nMEMDEBUG_NOTIFY = [] # send memory debugging report by mail at engine shutdown\n\nMEMUSAGE_CHECK_INTERVAL_SECONDS = 60.0\nMEMUSAGE_ENABLED = False\nMEMUSAGE_LIMIT_MB = 0\nMEMUSAGE_NOTIFY_MAIL = []\nMEMUSAGE_REPORT = False\nMEMUSAGE_WARNING_MB = 0\n\nMETAREFRESH_ENABLED = True\nMETAREFRESH_MAXDELAY = 100\n\nNEWSPIDER_MODULE = ''\n\nRANDOMIZE_DOWNLOAD_DELAY = True\n\nREACTOR_THREADPOOL_MAXSIZE = 10\n\nREDIRECT_ENABLED = True\nREDIRECT_MAX_TIMES = 20 # uses Firefox default setting\nREDIRECT_PRIORITY_ADJUST = +2\n\nREFERER_ENABLED = True\n\nRETRY_ENABLED = True\nRETRY_TIMES = 2 # initial response + 2 retries = 3 requests\nRETRY_HTTP_CODES = [500, 502, 503, 504, 408]\nRETRY_PRIORITY_ADJUST = -1\n\nROBOTSTXT_OBEY = False\n\nSCHEDULER = 'scrapy.core.scheduler.Scheduler'\nSCHEDULER_DISK_QUEUE = 'scrapy.squeues.PickleLifoDiskQueue'\nSCHEDULER_MEMORY_QUEUE = 'scrapy.squeues.LifoMemoryQueue'\nSCHEDULER_PRIORITY_QUEUE = 'queuelib.PriorityQueue'\n\nSPIDER_LOADER_CLASS = 'scrapy.spiderloader.SpiderLoader'\n\nSPIDER_MIDDLEWARES = {}\n\nSPIDER_MIDDLEWARES_BASE = {\n # Engine side\n 'scrapy.spidermiddlewares.httperror.HttpErrorMiddleware': 50,\n 'scrapy.spidermiddlewares.offsite.OffsiteMiddleware': 500,\n 'scrapy.spidermiddlewares.referer.RefererMiddleware': 700,\n 'scrapy.spidermiddlewares.urllength.UrlLengthMiddleware': 800,\n 'scrapy.spidermiddlewares.depth.DepthMiddleware': 900,\n # Spider side\n}\n\nSPIDER_MODULES = []\n\nSTATS_CLASS = 'scrapy.statscollectors.MemoryStatsCollector'\nSTATS_DUMP = True\n\nSTATSMAILER_RCPTS = []\n\nTEMPLATES_DIR = abspath(join(dirname(__file__), '..', 'templates'))\n\nURLLENGTH_LIMIT = 2083\n\nUSER_AGENT = 'Scrapy/%s (+http://scrapy.org)' % import_module('scrapy').__version__\n\nTELNETCONSOLE_ENABLED = 1\nTELNETCONSOLE_PORT = [6023, 6073]\nTELNETCONSOLE_HOST = '127.0.0.1'\n\nSPIDER_CONTRACTS = {}\nSPIDER_CONTRACTS_BASE = {\n 'scrapy.contracts.default.UrlContract': 1,\n 'scrapy.contracts.default.ReturnsContract': 2,\n 'scrapy.contracts.default.ScrapesContract': 3,\n}\n", "path": "scrapy/settings/default_settings.py"}]}
3,806
276
gh_patches_debug_24176
rasdani/github-patches
git_diff
pystiche__pystiche-325
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Download buttons for all examples are broken for RTD ![Screenshot from 2020-06-11 19-10-01](https://user-images.githubusercontent.com/6849766/84418262-34e6fd00-ac17-11ea-8fdc-7a5cb5f9034d.png) This is a bug in `sphinx-gallery` and should be fixed with sphinx-gallery/sphinx-gallery#706. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/source/conf.py` Content: ``` 1 # Configuration file for the Sphinx documentation builder. 2 # 3 # This file only contains a selection of the most common options. For a full list see 4 # the documentation: 5 # https://www.sphinx-doc.org/en/master/usage/configuration.html 6 7 # -- Imports --------------------------------------------------------------------------- 8 9 import os 10 import shutil 11 import warnings 12 from datetime import datetime 13 from distutils.util import strtobool 14 from os import path 15 from urllib.parse import urljoin 16 17 from sphinx_gallery.sorting import ExampleTitleSortKey, ExplicitOrder 18 19 import torch 20 21 import pystiche 22 from pystiche.misc import download_file 23 24 # -- Run config ------------------------------------------------------------------------ 25 26 27 def get_bool_env_var(name, default=False): 28 try: 29 return bool(strtobool(os.environ[name])) 30 except KeyError: 31 return default 32 33 34 run_by_github_actions = get_bool_env_var("GITHUB_ACTIONS") 35 run_by_travis_ci = get_bool_env_var("TRAVIS") 36 run_by_appveyor = get_bool_env_var("APPVEYOR") 37 run_by_rtd = get_bool_env_var("READTHEDOCS") 38 run_by_ci = ( 39 run_by_github_actions 40 or run_by_travis_ci 41 or run_by_appveyor 42 or run_by_rtd 43 or get_bool_env_var("CI") 44 ) 45 46 # -- Path setup ------------------------------------------------------------------------ 47 48 # If extensions (or modules to document with autodoc) are in another directory, add 49 # these directories to sys.path here. If the directory is relative to the documentation 50 # root, use os.path.abspath to make it absolute, like shown here. 51 # 52 # import os 53 # import sys 54 # sys.path.insert(0, os.path.abspath('.')) 55 56 PROJECT_ROOT = path.abspath(path.join(path.dirname(__file__), "..", "..")) 57 58 59 # -- Project information --------------------------------------------------------------- 60 61 project = pystiche.__name__ 62 author = pystiche.__author__ 63 copyright = f"2019 - {datetime.now().year}, {author}" 64 version = release = pystiche.__version__ 65 66 67 # -- General configuration ------------------------------------------------------------- 68 69 # Add any Sphinx extension module names here, as strings. They can be extensions coming 70 # with Sphinx (named 'sphinx.ext.*') or your custom ones. 71 extensions = [ 72 "sphinx.ext.autodoc", 73 "sphinx.ext.napoleon", 74 "sphinx.ext.coverage", 75 "sphinx.ext.intersphinx", 76 "sphinxcontrib.bibtex", 77 "sphinx_gallery.gen_gallery", 78 "sphinx_autodoc_typehints", 79 ] 80 81 # Add any paths that contain templates here, relative to this directory. 82 templates_path = ["_templates"] 83 84 # List of patterns, relative to source directory, that match files and directories to 85 # ignore when looking for source files. This pattern also affects html_static_path and 86 # html_extra_path. 87 exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] 88 89 # -- intersphinx configuration --------------------------------------------------------- 90 91 intersphinx_mapping = { 92 "python": ("https://docs.python.org/3.6", None), 93 "torch": ("https://pytorch.org/docs/stable/", None), 94 "torchvision": ("https://pytorch.org/docs/stable/", None), 95 "PIL": ("https://pillow.readthedocs.io/en/stable/", None), 96 "numpy": ("https://numpy.org/doc/1.18/", None), 97 "requests": ("https://requests.readthedocs.io/en/stable/", None), 98 "matplotlib": ("https://matplotlib.org", None), 99 } 100 101 102 # -- sphinx-gallery configuration ------------------------------------------------------ 103 104 plot_gallery = get_bool_env_var("PYSTICHE_PLOT_GALLERY", default=True) and not run_by_ci 105 download_gallery = get_bool_env_var("PYSTICHE_DOWNLOAD_GALLERY") or run_by_ci 106 107 if download_gallery: 108 base = "https://download.pystiche.org/galleries/" 109 file = ( 110 "master.zip" 111 if pystiche.__is_dev_version__ 112 else f"v{pystiche.__base_version__}.zip" 113 ) 114 115 url = urljoin(base, file) 116 print(f"Downloading pre-built galleries from {url}") 117 download_file(url, file) 118 119 shutil.unpack_archive(file, extract_dir=".") 120 os.remove(file) 121 122 extensions.remove("sphinx_gallery.gen_gallery") 123 extensions.append("sphinx_gallery.load_style") 124 plot_gallery = False 125 126 if plot_gallery and not torch.cuda.is_available(): 127 msg = ( 128 "The galleries will be built, but CUDA is not available. " 129 "This will take a long time." 130 ) 131 print(msg) 132 133 134 def show_cuda_memory(func): 135 torch.cuda.reset_peak_memory_stats() 136 out = func() 137 138 stats = torch.cuda.memory_stats() 139 peak_bytes_usage = stats["allocated_bytes.all.peak"] 140 memory = peak_bytes_usage / 1024 ** 2 141 142 return memory, out 143 144 145 class PysticheExampleTitleSortKey(ExampleTitleSortKey): 146 def __call__(self, filename): 147 # The beginner example *without* pystiche is placed before the example *with* 148 # to clarify the narrative. 149 if filename == "example_nst_without_pystiche.py": 150 return "1" 151 elif filename == "example_nst_with_pystiche.py": 152 return "2" 153 else: 154 return super().__call__(filename) 155 156 157 sphinx_gallery_conf = { 158 "examples_dirs": path.join(PROJECT_ROOT, "examples"), 159 "gallery_dirs": path.join("galleries", "examples"), 160 "filename_pattern": os.sep + "example_", 161 "line_numbers": True, 162 "remove_config_comments": True, 163 "plot_gallery": plot_gallery, 164 "subsection_order": ExplicitOrder( 165 [ 166 path.join("..", "..", "examples", sub_gallery) 167 for sub_gallery in ("beginner", "advanced") 168 ] 169 ), 170 "within_subsection_order": PysticheExampleTitleSortKey, 171 "show_memory": show_cuda_memory if torch.cuda.is_available() else True, 172 } 173 174 # Remove matplotlib agg warnings from generated doc when using plt.show 175 warnings.filterwarnings( 176 "ignore", 177 category=UserWarning, 178 message=( 179 "Matplotlib is currently using agg, which is a non-GUI backend, so cannot show " 180 "the figure." 181 ), 182 ) 183 184 185 # -- Options for HTML output ----------------------------------------------------------- 186 187 # The theme to use for HTML and HTML Help pages. See the documentation for a list of 188 # builtin themes. 189 html_theme = "sphinx_rtd_theme" 190 191 # Add any paths that contain custom static files (such as style sheets) here, relative 192 # to this directory. They are copied after the builtin static files, so a file named 193 # "default.css" will overwrite the builtin "default.css". 194 # html_static_path = ["_static"] 195 196 197 # -- Latex / Mathjax config ------------------------------------------------------------ 198 199 with open("custom_cmds.tex", "r") as fh: 200 custom_cmds = fh.read() 201 202 latex_elements = {"preamble": custom_cmds} 203 204 mathjax_inline = [r"\(" + custom_cmds, r"\)"] 205 mathjax_display = [r"\[" + custom_cmds, r"\]"] 206 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/source/conf.py b/docs/source/conf.py --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -7,6 +7,7 @@ # -- Imports --------------------------------------------------------------------------- import os +import re import shutil import warnings from datetime import datetime @@ -116,9 +117,26 @@ print(f"Downloading pre-built galleries from {url}") download_file(url, file) + try: + shutil.rmtree("galleries") + except FileNotFoundError: + pass shutil.unpack_archive(file, extract_dir=".") os.remove(file) + # This is workaround for a bug in sphinx-gallery that replaces absolute with + # relative paths. See https://github.com/pmeier/pystiche/pull/325 for details. + index_file = path.join("galleries", "examples", "index.rst") + with open(index_file, "r") as fh: + content = fh.read() + content = re.sub( + r"(?P<file>examples_(python|jupyter)\.zip) <[\w/.]+>", + r"\g<file> <\g<file>>", + content, + ) + with open(index_file, "w") as fh: + fh.write(content) + extensions.remove("sphinx_gallery.gen_gallery") extensions.append("sphinx_gallery.load_style") plot_gallery = False
{"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -7,6 +7,7 @@\n # -- Imports ---------------------------------------------------------------------------\n \n import os\n+import re\n import shutil\n import warnings\n from datetime import datetime\n@@ -116,9 +117,26 @@\n print(f\"Downloading pre-built galleries from {url}\")\n download_file(url, file)\n \n+ try:\n+ shutil.rmtree(\"galleries\")\n+ except FileNotFoundError:\n+ pass\n shutil.unpack_archive(file, extract_dir=\".\")\n os.remove(file)\n \n+ # This is workaround for a bug in sphinx-gallery that replaces absolute with\n+ # relative paths. See https://github.com/pmeier/pystiche/pull/325 for details.\n+ index_file = path.join(\"galleries\", \"examples\", \"index.rst\")\n+ with open(index_file, \"r\") as fh:\n+ content = fh.read()\n+ content = re.sub(\n+ r\"(?P<file>examples_(python|jupyter)\\.zip) <[\\w/.]+>\",\n+ r\"\\g<file> <\\g<file>>\",\n+ content,\n+ )\n+ with open(index_file, \"w\") as fh:\n+ fh.write(content)\n+\n extensions.remove(\"sphinx_gallery.gen_gallery\")\n extensions.append(\"sphinx_gallery.load_style\")\n plot_gallery = False\n", "issue": "Download buttons for all examples are broken for RTD\n![Screenshot from 2020-06-11 19-10-01](https://user-images.githubusercontent.com/6849766/84418262-34e6fd00-ac17-11ea-8fdc-7a5cb5f9034d.png)\r\n\r\nThis is a bug in `sphinx-gallery` and should be fixed with sphinx-gallery/sphinx-gallery#706.\n", "before_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full list see\n# the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Imports ---------------------------------------------------------------------------\n\nimport os\nimport shutil\nimport warnings\nfrom datetime import datetime\nfrom distutils.util import strtobool\nfrom os import path\nfrom urllib.parse import urljoin\n\nfrom sphinx_gallery.sorting import ExampleTitleSortKey, ExplicitOrder\n\nimport torch\n\nimport pystiche\nfrom pystiche.misc import download_file\n\n# -- Run config ------------------------------------------------------------------------\n\n\ndef get_bool_env_var(name, default=False):\n try:\n return bool(strtobool(os.environ[name]))\n except KeyError:\n return default\n\n\nrun_by_github_actions = get_bool_env_var(\"GITHUB_ACTIONS\")\nrun_by_travis_ci = get_bool_env_var(\"TRAVIS\")\nrun_by_appveyor = get_bool_env_var(\"APPVEYOR\")\nrun_by_rtd = get_bool_env_var(\"READTHEDOCS\")\nrun_by_ci = (\n run_by_github_actions\n or run_by_travis_ci\n or run_by_appveyor\n or run_by_rtd\n or get_bool_env_var(\"CI\")\n)\n\n# -- Path setup ------------------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory, add\n# these directories to sys.path here. If the directory is relative to the documentation\n# root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\nPROJECT_ROOT = path.abspath(path.join(path.dirname(__file__), \"..\", \"..\"))\n\n\n# -- Project information ---------------------------------------------------------------\n\nproject = pystiche.__name__\nauthor = pystiche.__author__\ncopyright = f\"2019 - {datetime.now().year}, {author}\"\nversion = release = pystiche.__version__\n\n\n# -- General configuration -------------------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be extensions coming\n# with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.intersphinx\",\n \"sphinxcontrib.bibtex\",\n \"sphinx_gallery.gen_gallery\",\n \"sphinx_autodoc_typehints\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and directories to\n# ignore when looking for source files. This pattern also affects html_static_path and\n# html_extra_path.\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\n# -- intersphinx configuration ---------------------------------------------------------\n\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3.6\", None),\n \"torch\": (\"https://pytorch.org/docs/stable/\", None),\n \"torchvision\": (\"https://pytorch.org/docs/stable/\", None),\n \"PIL\": (\"https://pillow.readthedocs.io/en/stable/\", None),\n \"numpy\": (\"https://numpy.org/doc/1.18/\", None),\n \"requests\": (\"https://requests.readthedocs.io/en/stable/\", None),\n \"matplotlib\": (\"https://matplotlib.org\", None),\n}\n\n\n# -- sphinx-gallery configuration ------------------------------------------------------\n\nplot_gallery = get_bool_env_var(\"PYSTICHE_PLOT_GALLERY\", default=True) and not run_by_ci\ndownload_gallery = get_bool_env_var(\"PYSTICHE_DOWNLOAD_GALLERY\") or run_by_ci\n\nif download_gallery:\n base = \"https://download.pystiche.org/galleries/\"\n file = (\n \"master.zip\"\n if pystiche.__is_dev_version__\n else f\"v{pystiche.__base_version__}.zip\"\n )\n\n url = urljoin(base, file)\n print(f\"Downloading pre-built galleries from {url}\")\n download_file(url, file)\n\n shutil.unpack_archive(file, extract_dir=\".\")\n os.remove(file)\n\n extensions.remove(\"sphinx_gallery.gen_gallery\")\n extensions.append(\"sphinx_gallery.load_style\")\n plot_gallery = False\n\nif plot_gallery and not torch.cuda.is_available():\n msg = (\n \"The galleries will be built, but CUDA is not available. \"\n \"This will take a long time.\"\n )\n print(msg)\n\n\ndef show_cuda_memory(func):\n torch.cuda.reset_peak_memory_stats()\n out = func()\n\n stats = torch.cuda.memory_stats()\n peak_bytes_usage = stats[\"allocated_bytes.all.peak\"]\n memory = peak_bytes_usage / 1024 ** 2\n\n return memory, out\n\n\nclass PysticheExampleTitleSortKey(ExampleTitleSortKey):\n def __call__(self, filename):\n # The beginner example *without* pystiche is placed before the example *with*\n # to clarify the narrative.\n if filename == \"example_nst_without_pystiche.py\":\n return \"1\"\n elif filename == \"example_nst_with_pystiche.py\":\n return \"2\"\n else:\n return super().__call__(filename)\n\n\nsphinx_gallery_conf = {\n \"examples_dirs\": path.join(PROJECT_ROOT, \"examples\"),\n \"gallery_dirs\": path.join(\"galleries\", \"examples\"),\n \"filename_pattern\": os.sep + \"example_\",\n \"line_numbers\": True,\n \"remove_config_comments\": True,\n \"plot_gallery\": plot_gallery,\n \"subsection_order\": ExplicitOrder(\n [\n path.join(\"..\", \"..\", \"examples\", sub_gallery)\n for sub_gallery in (\"beginner\", \"advanced\")\n ]\n ),\n \"within_subsection_order\": PysticheExampleTitleSortKey,\n \"show_memory\": show_cuda_memory if torch.cuda.is_available() else True,\n}\n\n# Remove matplotlib agg warnings from generated doc when using plt.show\nwarnings.filterwarnings(\n \"ignore\",\n category=UserWarning,\n message=(\n \"Matplotlib is currently using agg, which is a non-GUI backend, so cannot show \"\n \"the figure.\"\n ),\n)\n\n\n# -- Options for HTML output -----------------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for a list of\n# builtin themes.\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Add any paths that contain custom static files (such as style sheets) here, relative\n# to this directory. They are copied after the builtin static files, so a file named\n# \"default.css\" will overwrite the builtin \"default.css\".\n# html_static_path = [\"_static\"]\n\n\n# -- Latex / Mathjax config ------------------------------------------------------------\n\nwith open(\"custom_cmds.tex\", \"r\") as fh:\n custom_cmds = fh.read()\n\nlatex_elements = {\"preamble\": custom_cmds}\n\nmathjax_inline = [r\"\\(\" + custom_cmds, r\"\\)\"]\nmathjax_display = [r\"\\[\" + custom_cmds, r\"\\]\"]\n", "path": "docs/source/conf.py"}], "after_files": [{"content": "# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full list see\n# the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Imports ---------------------------------------------------------------------------\n\nimport os\nimport re\nimport shutil\nimport warnings\nfrom datetime import datetime\nfrom distutils.util import strtobool\nfrom os import path\nfrom urllib.parse import urljoin\n\nfrom sphinx_gallery.sorting import ExampleTitleSortKey, ExplicitOrder\n\nimport torch\n\nimport pystiche\nfrom pystiche.misc import download_file\n\n# -- Run config ------------------------------------------------------------------------\n\n\ndef get_bool_env_var(name, default=False):\n try:\n return bool(strtobool(os.environ[name]))\n except KeyError:\n return default\n\n\nrun_by_github_actions = get_bool_env_var(\"GITHUB_ACTIONS\")\nrun_by_travis_ci = get_bool_env_var(\"TRAVIS\")\nrun_by_appveyor = get_bool_env_var(\"APPVEYOR\")\nrun_by_rtd = get_bool_env_var(\"READTHEDOCS\")\nrun_by_ci = (\n run_by_github_actions\n or run_by_travis_ci\n or run_by_appveyor\n or run_by_rtd\n or get_bool_env_var(\"CI\")\n)\n\n# -- Path setup ------------------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory, add\n# these directories to sys.path here. If the directory is relative to the documentation\n# root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\n\nPROJECT_ROOT = path.abspath(path.join(path.dirname(__file__), \"..\", \"..\"))\n\n\n# -- Project information ---------------------------------------------------------------\n\nproject = pystiche.__name__\nauthor = pystiche.__author__\ncopyright = f\"2019 - {datetime.now().year}, {author}\"\nversion = release = pystiche.__version__\n\n\n# -- General configuration -------------------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be extensions coming\n# with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.coverage\",\n \"sphinx.ext.intersphinx\",\n \"sphinxcontrib.bibtex\",\n \"sphinx_gallery.gen_gallery\",\n \"sphinx_autodoc_typehints\",\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and directories to\n# ignore when looking for source files. This pattern also affects html_static_path and\n# html_extra_path.\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\n# -- intersphinx configuration ---------------------------------------------------------\n\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3.6\", None),\n \"torch\": (\"https://pytorch.org/docs/stable/\", None),\n \"torchvision\": (\"https://pytorch.org/docs/stable/\", None),\n \"PIL\": (\"https://pillow.readthedocs.io/en/stable/\", None),\n \"numpy\": (\"https://numpy.org/doc/1.18/\", None),\n \"requests\": (\"https://requests.readthedocs.io/en/stable/\", None),\n \"matplotlib\": (\"https://matplotlib.org\", None),\n}\n\n\n# -- sphinx-gallery configuration ------------------------------------------------------\n\nplot_gallery = get_bool_env_var(\"PYSTICHE_PLOT_GALLERY\", default=True) and not run_by_ci\ndownload_gallery = get_bool_env_var(\"PYSTICHE_DOWNLOAD_GALLERY\") or run_by_ci\n\nif download_gallery:\n base = \"https://download.pystiche.org/galleries/\"\n file = (\n \"master.zip\"\n if pystiche.__is_dev_version__\n else f\"v{pystiche.__base_version__}.zip\"\n )\n\n url = urljoin(base, file)\n print(f\"Downloading pre-built galleries from {url}\")\n download_file(url, file)\n\n try:\n shutil.rmtree(\"galleries\")\n except FileNotFoundError:\n pass\n shutil.unpack_archive(file, extract_dir=\".\")\n os.remove(file)\n\n # This is workaround for a bug in sphinx-gallery that replaces absolute with\n # relative paths. See https://github.com/pmeier/pystiche/pull/325 for details.\n index_file = path.join(\"galleries\", \"examples\", \"index.rst\")\n with open(index_file, \"r\") as fh:\n content = fh.read()\n content = re.sub(\n r\"(?P<file>examples_(python|jupyter)\\.zip) <[\\w/.]+>\",\n r\"\\g<file> <\\g<file>>\",\n content,\n )\n with open(index_file, \"w\") as fh:\n fh.write(content)\n\n extensions.remove(\"sphinx_gallery.gen_gallery\")\n extensions.append(\"sphinx_gallery.load_style\")\n plot_gallery = False\n\nif plot_gallery and not torch.cuda.is_available():\n msg = (\n \"The galleries will be built, but CUDA is not available. \"\n \"This will take a long time.\"\n )\n print(msg)\n\n\ndef show_cuda_memory(func):\n torch.cuda.reset_peak_memory_stats()\n out = func()\n\n stats = torch.cuda.memory_stats()\n peak_bytes_usage = stats[\"allocated_bytes.all.peak\"]\n memory = peak_bytes_usage / 1024 ** 2\n\n return memory, out\n\n\nclass PysticheExampleTitleSortKey(ExampleTitleSortKey):\n def __call__(self, filename):\n # The beginner example *without* pystiche is placed before the example *with*\n # to clarify the narrative.\n if filename == \"example_nst_without_pystiche.py\":\n return \"1\"\n elif filename == \"example_nst_with_pystiche.py\":\n return \"2\"\n else:\n return super().__call__(filename)\n\n\nsphinx_gallery_conf = {\n \"examples_dirs\": path.join(PROJECT_ROOT, \"examples\"),\n \"gallery_dirs\": path.join(\"galleries\", \"examples\"),\n \"filename_pattern\": os.sep + \"example_\",\n \"line_numbers\": True,\n \"remove_config_comments\": True,\n \"plot_gallery\": plot_gallery,\n \"subsection_order\": ExplicitOrder(\n [\n path.join(\"..\", \"..\", \"examples\", sub_gallery)\n for sub_gallery in (\"beginner\", \"advanced\")\n ]\n ),\n \"within_subsection_order\": PysticheExampleTitleSortKey,\n \"show_memory\": show_cuda_memory if torch.cuda.is_available() else True,\n}\n\n# Remove matplotlib agg warnings from generated doc when using plt.show\nwarnings.filterwarnings(\n \"ignore\",\n category=UserWarning,\n message=(\n \"Matplotlib is currently using agg, which is a non-GUI backend, so cannot show \"\n \"the figure.\"\n ),\n)\n\n\n# -- Options for HTML output -----------------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for a list of\n# builtin themes.\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Add any paths that contain custom static files (such as style sheets) here, relative\n# to this directory. They are copied after the builtin static files, so a file named\n# \"default.css\" will overwrite the builtin \"default.css\".\n# html_static_path = [\"_static\"]\n\n\n# -- Latex / Mathjax config ------------------------------------------------------------\n\nwith open(\"custom_cmds.tex\", \"r\") as fh:\n custom_cmds = fh.read()\n\nlatex_elements = {\"preamble\": custom_cmds}\n\nmathjax_inline = [r\"\\(\" + custom_cmds, r\"\\)\"]\nmathjax_display = [r\"\\[\" + custom_cmds, r\"\\]\"]\n", "path": "docs/source/conf.py"}]}
2,408
316
gh_patches_debug_282
rasdani/github-patches
git_diff
opendatacube__datacube-core-1331
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Code includes Type Annotations, but they're not made available for type checking (PEP561) **Summary** The ODC code is fairly well annotated with [Python type hints](https://typing.readthedocs.io/en/latest/), but these type hints aren't made availble for use in downstream packages by following [PEP 561 – Distributing and Packaging Type Information | peps.python.org](https://peps.python.org/pep-0561/). **Proposal** Since ODC Core includes inline type hints with the code, we need to follow [packaging type information - PEP561](https://peps.python.org/pep-0561/#packaging-type-information) by adding an empty `datacube/py.typed` file, and ensuring it's distributed by adding it to `package_data` in `setup.py`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 3 from setuptools import setup, find_packages 4 5 tests_require = [ 6 'hypothesis', 7 'pycodestyle', 8 'pylint', 9 'pytest', 10 'pytest-cov', 11 'pytest-timeout', 12 'pytest-httpserver', 13 'moto', 14 ] 15 doc_require = [ 16 'Sphinx', 17 'sphinx_rtd_theme', 18 'sphinx_autodoc_typehints', # Propagate mypy info into docs 19 'sphinx-click', 20 'recommonmark', 21 'setuptools', # version related dependencies 22 'setuptools_scm[toml]', 23 ] 24 25 extras_require = { 26 'performance': ['ciso8601', 'bottleneck'], 27 'distributed': ['distributed', 'dask[distributed]'], 28 'doc': doc_require, 29 's3': ['boto3', 'botocore'], 30 'test': tests_require, 31 'cf': ['compliance-checker>=4.0.0'], 32 } 33 34 extras_require['dev'] = sorted(set(sum([extras_require[k] for k in [ 35 'test', 36 'doc', 37 'performance', 38 's3', 39 'distributed', 40 ]], []))) 41 42 # An 'all' option, following ipython naming conventions. 43 extras_require['all'] = sorted(set(sum(extras_require.values(), []))) 44 45 extra_plugins = dict(read=[], write=[], index=[]) 46 47 setup( 48 name='datacube', 49 python_requires='>=3.8.0', 50 51 url='https://github.com/opendatacube/datacube-core', 52 author='Open Data Cube', 53 maintainer='Open Data Cube', 54 maintainer_email='', 55 description='An analysis environment for satellite and other earth observation data', 56 long_description=open('README.rst').read(), 57 long_description_content_type='text/x-rst', 58 license='Apache License 2.0', 59 classifiers=[ 60 "Development Status :: 4 - Beta", 61 "Intended Audience :: Developers", 62 "Intended Audience :: Science/Research", 63 "License :: OSI Approved :: Apache Software License", 64 "Natural Language :: English", 65 "Operating System :: MacOS :: MacOS X", 66 "Operating System :: POSIX", 67 "Operating System :: POSIX :: BSD", 68 "Operating System :: POSIX :: Linux", 69 "Operating System :: Microsoft :: Windows", 70 "Programming Language :: Python", 71 "Programming Language :: Python :: 3", 72 "Programming Language :: Python :: 3.8", 73 "Programming Language :: Python :: 3.9", 74 "Programming Language :: Python :: 3.10", 75 "Topic :: Scientific/Engineering :: GIS", 76 "Topic :: Scientific/Engineering :: Information Analysis", 77 ], 78 79 packages=find_packages( 80 exclude=('tests', 'tests.*', 81 'integration_tests', 'integration_tests.*') 82 ), 83 package_data={ 84 '': ['*.yaml', '*/*.yaml'], 85 }, 86 scripts=[], 87 install_requires=[ 88 'affine', 89 'pyproj>=2.5', 90 'shapely>=1.6.4', 91 'cachetools', 92 'click>=5.0', 93 'cloudpickle>=0.4', 94 'dask[array]', 95 'distributed', 96 'jsonschema', 97 'netcdf4', 98 'numpy', 99 'psycopg2', 100 'lark', 101 'pandas', 102 'python-dateutil', 103 'pyyaml', 104 'rasterio>=1.3.2', # Warping broken in 1.3.0 and 1.3.1 105 'sqlalchemy', 106 'GeoAlchemy2', 107 'toolz', 108 'xarray>=0.9,!=2022.6.0', # >0.9 fixes most problems with `crs` attributes being lost 109 ], 110 extras_require=extras_require, 111 tests_require=tests_require, 112 113 entry_points={ 114 'console_scripts': [ 115 'datacube = datacube.scripts.cli_app:cli', 116 'datacube-search = datacube.scripts.search_tool:cli', 117 'datacube-worker = datacube.execution.worker:main', 118 ], 119 'datacube.plugins.io.read': [ 120 'netcdf = datacube.drivers.netcdf.driver:reader_driver_init', 121 *extra_plugins['read'], 122 ], 123 'datacube.plugins.io.write': [ 124 'netcdf = datacube.drivers.netcdf.driver:writer_driver_init', 125 *extra_plugins['write'], 126 ], 127 'datacube.plugins.index': [ 128 'default = datacube.index.postgres.index:index_driver_init', 129 'null = datacube.index.null.index:index_driver_init', 130 'memory = datacube.index.memory.index:index_driver_init', 131 'postgis = datacube.index.postgis.index:index_driver_init', 132 *extra_plugins['index'], 133 ], 134 }, 135 ) 136 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -82,6 +82,7 @@ ), package_data={ '': ['*.yaml', '*/*.yaml'], + 'datacube': ['py.typed'], }, scripts=[], install_requires=[
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -82,6 +82,7 @@\n ),\n package_data={\n '': ['*.yaml', '*/*.yaml'],\n+ 'datacube': ['py.typed'],\n },\n scripts=[],\n install_requires=[\n", "issue": "Code includes Type Annotations, but they're not made available for type checking (PEP561)\n**Summary**\r\n\r\nThe ODC code is fairly well annotated with [Python type hints](https://typing.readthedocs.io/en/latest/), but these type hints aren't made availble for use in downstream packages by following [PEP 561 \u2013 Distributing and Packaging Type Information | peps.python.org](https://peps.python.org/pep-0561/).\r\n\r\n**Proposal**\r\n\r\nSince ODC Core includes inline type hints with the code, we need to follow [packaging type information - PEP561](https://peps.python.org/pep-0561/#packaging-type-information) by adding an empty `datacube/py.typed` file, and ensuring it's distributed by adding it to `package_data` in `setup.py`.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'hypothesis',\n 'pycodestyle',\n 'pylint',\n 'pytest',\n 'pytest-cov',\n 'pytest-timeout',\n 'pytest-httpserver',\n 'moto',\n]\ndoc_require = [\n 'Sphinx',\n 'sphinx_rtd_theme',\n 'sphinx_autodoc_typehints', # Propagate mypy info into docs\n 'sphinx-click',\n 'recommonmark',\n 'setuptools', # version related dependencies\n 'setuptools_scm[toml]',\n]\n\nextras_require = {\n 'performance': ['ciso8601', 'bottleneck'],\n 'distributed': ['distributed', 'dask[distributed]'],\n 'doc': doc_require,\n 's3': ['boto3', 'botocore'],\n 'test': tests_require,\n 'cf': ['compliance-checker>=4.0.0'],\n}\n\nextras_require['dev'] = sorted(set(sum([extras_require[k] for k in [\n 'test',\n 'doc',\n 'performance',\n 's3',\n 'distributed',\n]], [])))\n\n# An 'all' option, following ipython naming conventions.\nextras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n\nextra_plugins = dict(read=[], write=[], index=[])\n\nsetup(\n name='datacube',\n python_requires='>=3.8.0',\n\n url='https://github.com/opendatacube/datacube-core',\n author='Open Data Cube',\n maintainer='Open Data Cube',\n maintainer_email='',\n description='An analysis environment for satellite and other earth observation data',\n long_description=open('README.rst').read(),\n long_description_content_type='text/x-rst',\n license='Apache License 2.0',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Scientific/Engineering :: GIS\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n ],\n\n packages=find_packages(\n exclude=('tests', 'tests.*',\n 'integration_tests', 'integration_tests.*')\n ),\n package_data={\n '': ['*.yaml', '*/*.yaml'],\n },\n scripts=[],\n install_requires=[\n 'affine',\n 'pyproj>=2.5',\n 'shapely>=1.6.4',\n 'cachetools',\n 'click>=5.0',\n 'cloudpickle>=0.4',\n 'dask[array]',\n 'distributed',\n 'jsonschema',\n 'netcdf4',\n 'numpy',\n 'psycopg2',\n 'lark',\n 'pandas',\n 'python-dateutil',\n 'pyyaml',\n 'rasterio>=1.3.2', # Warping broken in 1.3.0 and 1.3.1\n 'sqlalchemy',\n 'GeoAlchemy2',\n 'toolz',\n 'xarray>=0.9,!=2022.6.0', # >0.9 fixes most problems with `crs` attributes being lost\n ],\n extras_require=extras_require,\n tests_require=tests_require,\n\n entry_points={\n 'console_scripts': [\n 'datacube = datacube.scripts.cli_app:cli',\n 'datacube-search = datacube.scripts.search_tool:cli',\n 'datacube-worker = datacube.execution.worker:main',\n ],\n 'datacube.plugins.io.read': [\n 'netcdf = datacube.drivers.netcdf.driver:reader_driver_init',\n *extra_plugins['read'],\n ],\n 'datacube.plugins.io.write': [\n 'netcdf = datacube.drivers.netcdf.driver:writer_driver_init',\n *extra_plugins['write'],\n ],\n 'datacube.plugins.index': [\n 'default = datacube.index.postgres.index:index_driver_init',\n 'null = datacube.index.null.index:index_driver_init',\n 'memory = datacube.index.memory.index:index_driver_init',\n 'postgis = datacube.index.postgis.index:index_driver_init',\n *extra_plugins['index'],\n ],\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'hypothesis',\n 'pycodestyle',\n 'pylint',\n 'pytest',\n 'pytest-cov',\n 'pytest-timeout',\n 'pytest-httpserver',\n 'moto',\n]\ndoc_require = [\n 'Sphinx',\n 'sphinx_rtd_theme',\n 'sphinx_autodoc_typehints', # Propagate mypy info into docs\n 'sphinx-click',\n 'recommonmark',\n 'setuptools', # version related dependencies\n 'setuptools_scm[toml]',\n]\n\nextras_require = {\n 'performance': ['ciso8601', 'bottleneck'],\n 'distributed': ['distributed', 'dask[distributed]'],\n 'doc': doc_require,\n 's3': ['boto3', 'botocore'],\n 'test': tests_require,\n 'cf': ['compliance-checker>=4.0.0'],\n}\n\nextras_require['dev'] = sorted(set(sum([extras_require[k] for k in [\n 'test',\n 'doc',\n 'performance',\n 's3',\n 'distributed',\n]], [])))\n\n# An 'all' option, following ipython naming conventions.\nextras_require['all'] = sorted(set(sum(extras_require.values(), [])))\n\nextra_plugins = dict(read=[], write=[], index=[])\n\nsetup(\n name='datacube',\n python_requires='>=3.8.0',\n\n url='https://github.com/opendatacube/datacube-core',\n author='Open Data Cube',\n maintainer='Open Data Cube',\n maintainer_email='',\n description='An analysis environment for satellite and other earth observation data',\n long_description=open('README.rst').read(),\n long_description_content_type='text/x-rst',\n license='Apache License 2.0',\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Natural Language :: English\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: BSD\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: Microsoft :: Windows\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Topic :: Scientific/Engineering :: GIS\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n ],\n\n packages=find_packages(\n exclude=('tests', 'tests.*',\n 'integration_tests', 'integration_tests.*')\n ),\n package_data={\n '': ['*.yaml', '*/*.yaml'],\n 'datacube': ['py.typed'],\n },\n scripts=[],\n install_requires=[\n 'affine',\n 'pyproj>=2.5',\n 'shapely>=1.6.4',\n 'cachetools',\n 'click>=5.0',\n 'cloudpickle>=0.4',\n 'dask[array]',\n 'distributed',\n 'jsonschema',\n 'netcdf4',\n 'numpy',\n 'psycopg2',\n 'lark',\n 'pandas',\n 'python-dateutil',\n 'pyyaml',\n 'rasterio>=1.3.2', # Warping broken in 1.3.0 and 1.3.1\n 'sqlalchemy',\n 'GeoAlchemy2',\n 'toolz',\n 'xarray>=0.9,!=2022.6.0', # >0.9 fixes most problems with `crs` attributes being lost\n ],\n extras_require=extras_require,\n tests_require=tests_require,\n\n entry_points={\n 'console_scripts': [\n 'datacube = datacube.scripts.cli_app:cli',\n 'datacube-search = datacube.scripts.search_tool:cli',\n 'datacube-worker = datacube.execution.worker:main',\n ],\n 'datacube.plugins.io.read': [\n 'netcdf = datacube.drivers.netcdf.driver:reader_driver_init',\n *extra_plugins['read'],\n ],\n 'datacube.plugins.io.write': [\n 'netcdf = datacube.drivers.netcdf.driver:writer_driver_init',\n *extra_plugins['write'],\n ],\n 'datacube.plugins.index': [\n 'default = datacube.index.postgres.index:index_driver_init',\n 'null = datacube.index.null.index:index_driver_init',\n 'memory = datacube.index.memory.index:index_driver_init',\n 'postgis = datacube.index.postgis.index:index_driver_init',\n *extra_plugins['index'],\n ],\n },\n)\n", "path": "setup.py"}]}
1,777
70
gh_patches_debug_26282
rasdani/github-patches
git_diff
rotki__rotki-5256
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Think of a way to keep development accounts separately ## Abstract Between multiple development machines and between `production`/`develop` it becomes harder to keep track of which database has been used by which branch. This can lead to partially updated databases being used. As a proposal, we could somehow separate where the `develop` accounts and the production accounts are stored so that they are not sharing the same place. We can always copy accounts from production to develop manually (this can go to the guide). We should also consider how this affects nightlies and how nightlies are treated. For example we might want to treat nightlies as development to avoid having users mess accidentally with their production accounts. ## Motivation Helps better track which accounts are used in `develop`/`production`. ## Specification - TDB --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `rotkehlchen/config.py` Content: ``` 1 import logging 2 import os 3 import platform 4 import shutil 5 from pathlib import Path 6 7 from rotkehlchen.logging import RotkehlchenLogsAdapter 8 9 logger = logging.getLogger(__name__) 10 log = RotkehlchenLogsAdapter(logger) 11 12 13 def get_xdg_data_home() -> Path: 14 directory = os.environ.get('XDG_DATA_HOME', None) 15 if directory is None: 16 home = os.path.expanduser("~") 17 directory = os.path.join(home, '.local', 'share') 18 19 return Path(directory) 20 21 22 def get_win32_appdata() -> Path: 23 directory = os.environ.get('LOCALAPPDATA', None) 24 if not directory: 25 # In windows XP there is no localappdata 26 directory = os.environ.get('APPDATA', None) 27 if not directory: 28 raise AssertionError('Could not detect an APPDATA directory') 29 30 return Path(directory) 31 32 33 def old_data_directory() -> Path: 34 home = os.path.expanduser("~") 35 directory = os.path.join(home, '.rotkehlchen') 36 return Path(directory) 37 38 39 def default_data_directory() -> Path: 40 """Find the default data directory for rotki for each different OS 41 42 An interesting lirary that finds the data directories per OS is this: 43 https://github.com/ActiveState/appdirs/blob/master/appdirs.py 44 """ 45 if platform.system() == 'Linux': 46 xdgconfig = get_xdg_data_home() 47 datadir = xdgconfig / 'rotki' / 'data' 48 elif platform.system() == 'Windows': 49 appdata = get_win32_appdata() 50 datadir = appdata / 'rotki' / 'data' 51 elif platform.system() == 'Darwin': 52 datadir = Path(os.path.expanduser('~/Library/Application Support/rotki/data')) 53 else: 54 raise AssertionError(f'rotki running in unknown system: {platform.system()}') 55 56 # If old data directory exists and new does not exist copy stuff 57 old_dir = old_data_directory() 58 if old_dir.exists() and not datadir.exists(): 59 log.info(f'First time using standard data directory. Copying from {old_dir} to {datadir}') 60 shutil.copytree(old_dir, datadir) 61 62 datadir.mkdir(parents=True, exist_ok=True) 63 return datadir 64 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/rotkehlchen/config.py b/rotkehlchen/config.py --- a/rotkehlchen/config.py +++ b/rotkehlchen/config.py @@ -2,6 +2,7 @@ import os import platform import shutil +import sys from pathlib import Path from rotkehlchen.logging import RotkehlchenLogsAdapter @@ -42,14 +43,18 @@ An interesting lirary that finds the data directories per OS is this: https://github.com/ActiveState/appdirs/blob/master/appdirs.py """ + data_dir_name = 'data' + if getattr(sys, 'frozen', False) is False: + data_dir_name = 'develop_data' + if platform.system() == 'Linux': xdgconfig = get_xdg_data_home() - datadir = xdgconfig / 'rotki' / 'data' + datadir = xdgconfig / 'rotki' / data_dir_name elif platform.system() == 'Windows': appdata = get_win32_appdata() - datadir = appdata / 'rotki' / 'data' + datadir = appdata / 'rotki' / data_dir_name elif platform.system() == 'Darwin': - datadir = Path(os.path.expanduser('~/Library/Application Support/rotki/data')) + datadir = Path(os.path.expanduser(f'~/Library/Application Support/rotki/{data_dir_name}')) # noqa: E501 else: raise AssertionError(f'rotki running in unknown system: {platform.system()}')
{"golden_diff": "diff --git a/rotkehlchen/config.py b/rotkehlchen/config.py\n--- a/rotkehlchen/config.py\n+++ b/rotkehlchen/config.py\n@@ -2,6 +2,7 @@\n import os\n import platform\n import shutil\n+import sys\n from pathlib import Path\n \n from rotkehlchen.logging import RotkehlchenLogsAdapter\n@@ -42,14 +43,18 @@\n An interesting lirary that finds the data directories per OS is this:\n https://github.com/ActiveState/appdirs/blob/master/appdirs.py\n \"\"\"\n+ data_dir_name = 'data'\n+ if getattr(sys, 'frozen', False) is False:\n+ data_dir_name = 'develop_data'\n+\n if platform.system() == 'Linux':\n xdgconfig = get_xdg_data_home()\n- datadir = xdgconfig / 'rotki' / 'data'\n+ datadir = xdgconfig / 'rotki' / data_dir_name\n elif platform.system() == 'Windows':\n appdata = get_win32_appdata()\n- datadir = appdata / 'rotki' / 'data'\n+ datadir = appdata / 'rotki' / data_dir_name\n elif platform.system() == 'Darwin':\n- datadir = Path(os.path.expanduser('~/Library/Application Support/rotki/data'))\n+ datadir = Path(os.path.expanduser(f'~/Library/Application Support/rotki/{data_dir_name}')) # noqa: E501\n else:\n raise AssertionError(f'rotki running in unknown system: {platform.system()}')\n", "issue": "Think of a way to keep development accounts separately \n## Abstract\r\n\r\nBetween multiple development machines and between `production`/`develop` it becomes harder to keep track of which database has been used by which branch. This can lead to partially updated databases being used.\r\n\r\nAs a proposal, we could somehow separate where the `develop` accounts and the production accounts are stored so that they are not sharing the same place.\r\n\r\nWe can always copy accounts from production to develop manually (this can go to the guide).\r\n\r\nWe should also consider how this affects nightlies and how nightlies are treated. For example we might want to treat nightlies as development to avoid having users mess accidentally with their production accounts. \r\n\r\n## Motivation\r\n\r\nHelps better track which accounts are used in `develop`/`production`.\r\n\r\n## Specification\r\n\r\n- TDB\r\n\n", "before_files": [{"content": "import logging\nimport os\nimport platform\nimport shutil\nfrom pathlib import Path\n\nfrom rotkehlchen.logging import RotkehlchenLogsAdapter\n\nlogger = logging.getLogger(__name__)\nlog = RotkehlchenLogsAdapter(logger)\n\n\ndef get_xdg_data_home() -> Path:\n directory = os.environ.get('XDG_DATA_HOME', None)\n if directory is None:\n home = os.path.expanduser(\"~\")\n directory = os.path.join(home, '.local', 'share')\n\n return Path(directory)\n\n\ndef get_win32_appdata() -> Path:\n directory = os.environ.get('LOCALAPPDATA', None)\n if not directory:\n # In windows XP there is no localappdata\n directory = os.environ.get('APPDATA', None)\n if not directory:\n raise AssertionError('Could not detect an APPDATA directory')\n\n return Path(directory)\n\n\ndef old_data_directory() -> Path:\n home = os.path.expanduser(\"~\")\n directory = os.path.join(home, '.rotkehlchen')\n return Path(directory)\n\n\ndef default_data_directory() -> Path:\n \"\"\"Find the default data directory for rotki for each different OS\n\n An interesting lirary that finds the data directories per OS is this:\n https://github.com/ActiveState/appdirs/blob/master/appdirs.py\n \"\"\"\n if platform.system() == 'Linux':\n xdgconfig = get_xdg_data_home()\n datadir = xdgconfig / 'rotki' / 'data'\n elif platform.system() == 'Windows':\n appdata = get_win32_appdata()\n datadir = appdata / 'rotki' / 'data'\n elif platform.system() == 'Darwin':\n datadir = Path(os.path.expanduser('~/Library/Application Support/rotki/data'))\n else:\n raise AssertionError(f'rotki running in unknown system: {platform.system()}')\n\n # If old data directory exists and new does not exist copy stuff\n old_dir = old_data_directory()\n if old_dir.exists() and not datadir.exists():\n log.info(f'First time using standard data directory. Copying from {old_dir} to {datadir}')\n shutil.copytree(old_dir, datadir)\n\n datadir.mkdir(parents=True, exist_ok=True)\n return datadir\n", "path": "rotkehlchen/config.py"}], "after_files": [{"content": "import logging\nimport os\nimport platform\nimport shutil\nimport sys\nfrom pathlib import Path\n\nfrom rotkehlchen.logging import RotkehlchenLogsAdapter\n\nlogger = logging.getLogger(__name__)\nlog = RotkehlchenLogsAdapter(logger)\n\n\ndef get_xdg_data_home() -> Path:\n directory = os.environ.get('XDG_DATA_HOME', None)\n if directory is None:\n home = os.path.expanduser(\"~\")\n directory = os.path.join(home, '.local', 'share')\n\n return Path(directory)\n\n\ndef get_win32_appdata() -> Path:\n directory = os.environ.get('LOCALAPPDATA', None)\n if not directory:\n # In windows XP there is no localappdata\n directory = os.environ.get('APPDATA', None)\n if not directory:\n raise AssertionError('Could not detect an APPDATA directory')\n\n return Path(directory)\n\n\ndef old_data_directory() -> Path:\n home = os.path.expanduser(\"~\")\n directory = os.path.join(home, '.rotkehlchen')\n return Path(directory)\n\n\ndef default_data_directory() -> Path:\n \"\"\"Find the default data directory for rotki for each different OS\n\n An interesting lirary that finds the data directories per OS is this:\n https://github.com/ActiveState/appdirs/blob/master/appdirs.py\n \"\"\"\n data_dir_name = 'data'\n if getattr(sys, 'frozen', False) is False:\n data_dir_name = 'develop_data'\n\n if platform.system() == 'Linux':\n xdgconfig = get_xdg_data_home()\n datadir = xdgconfig / 'rotki' / data_dir_name\n elif platform.system() == 'Windows':\n appdata = get_win32_appdata()\n datadir = appdata / 'rotki' / data_dir_name\n elif platform.system() == 'Darwin':\n datadir = Path(os.path.expanduser(f'~/Library/Application Support/rotki/{data_dir_name}')) # noqa: E501\n else:\n raise AssertionError(f'rotki running in unknown system: {platform.system()}')\n\n # If old data directory exists and new does not exist copy stuff\n old_dir = old_data_directory()\n if old_dir.exists() and not datadir.exists():\n log.info(f'First time using standard data directory. Copying from {old_dir} to {datadir}')\n shutil.copytree(old_dir, datadir)\n\n datadir.mkdir(parents=True, exist_ok=True)\n return datadir\n", "path": "rotkehlchen/config.py"}]}
1,043
357
gh_patches_debug_9457
rasdani/github-patches
git_diff
pypa__setuptools-3296
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [BUG] ImportError when using `importlib` with setuptools 60.9.0+ ### setuptools version `setuptools==62.1.0` (The repro below uses the latest setuptools version, however, the first affected version was `setuptools==60.9.0`) ### Python version Python 3.7 ### OS Docker image `python:3.7.13` which is Debian 11 (it also occurs on Ubuntu on Heroku) ### Additional environment information Reduced `requirements.txt`: ``` celery==5.2.2 Django==3.2.8 importlib-metadata==0.20 ``` ### Description Between setuptools 60.8.2 and 60.9.0, a previously working Django project now fails when using `importlib.import_module()`: ``` Traceback (most recent call last): File "<string>", line 1, in <module> File "/usr/local/lib/python3.7/importlib/__init__.py", line 127, in import_module return _bootstrap._gcd_import(name[level:], package, level) File "<frozen importlib._bootstrap>", line 1006, in _gcd_import File "<frozen importlib._bootstrap>", line 983, in _find_and_load File "<frozen importlib._bootstrap>", line 967, in _find_and_load_unlocked File "<frozen importlib._bootstrap>", line 677, in _load_unlocked File "<frozen importlib._bootstrap_external>", line 728, in exec_module File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed File "/testcase.py", line 2, in <module> from celery import Celery ImportError: cannot import name 'Celery' from 'celery' (/usr/local/lib/python3.7/site-packages/celery/__init__.py) ``` In the original project (which was from a customer), the `importlib.import_module()` usage was inside gunicorn, when it loads the provided WSGI application file. However, I've removed gunicorn (and much of the rest of the project) as part of creating a reduced testcase. The changes between those two setuptools versions are: https://github.com/pypa/setuptools/compare/v60.8.2...v60.9.0 In addition to the workaround of downgrading setuptools to `60.8.2`, I also found that making any of the following changes (each in isolation) prevents the error from occurring: - Upgrading Python to 3.8+ - Upgrading importlib-metadata from 0.20 to 0.21+ ([changelog](https://github.com/python/importlib_metadata/compare/0.20...0.21)) - Upgrading Django from 3.2.8 to 3.2.9+ ([changelog](https://github.com/django/django/compare/3.2.8...3.2.9), of which [this](https://github.com/django/django/commit/8bebb1c04a40a8bb45368415cd01ccbf32fd3236) seems relevant) - Upgrading Celery from 5.2.2 to 5.2.5 ([changelog](https://github.com/celery/celery/compare/v5.2.2...v5.2.5), of which [this](https://github.com/celery/celery/pull/7218) seems relevant) (skipped 5.2.3 and 5.2.4 since they pin setuptools to an earlier version that isn't affected by this) ### Expected behavior Either: 1. The import succeed with newer versions of setuptools, as it did before. 2. Or, setuptools outputs a warning/error to make debugging this easier, or otherwise blocks usage with incompatible `importlib-metadata` versions. ### How to Reproduce 1. `docker run --rm -it python:3.7.13 bash` 2. `echo -e "from django.core.wsgi import get_wsgi_application\nfrom celery import Celery" > testcase.py` 3. `pip install setuptools==62.1.0` 4. `pip install celery==5.2.2 Django==3.2.8 importlib-metadata==0.20` 5. `python -c 'import importlib; importlib.import_module("testcase")'` ### Output ```console $ docker run --rm -it python:3.7.13 bash ... root@030ec79ad5e2:/# echo -e "from django.core.wsgi import get_wsgi_application\nfrom celery import Celery" > testcase.py root@030ec79ad5e2:/# pip install setuptools==62.1.0 ... root@030ec79ad5e2:/# pip install celery==5.2.2 Django==3.2.8 importlib-metadata==0.20 ... Installing collected packages: wcwidth, pytz, cached-property, billiard, zipp, vine, typing-extensions, sqlparse, six, prompt-toolkit, importlib-metadata, asgiref, amqp, kombu, Django, click, click-repl, click-plugins, click-didyoumean, celery Successfully installed Django-3.2.8 amqp-5.1.1 asgiref-3.5.0 billiard-3.6.4.0 cached-property-1.5.2 celery-5.2.2 click-8.1.2 click-didyoumean-0.3.0 click-plugins-1.1.1 click-repl-0.2.0 importlib-metadata-0.20 kombu-5.2.4 prompt-toolkit-3.0.29 pytz-2022.1 six-1.16.0 sqlparse-0.4.2 typing-extensions-4.2.0 vine-5.0.0 wcwidth-0.2.5 zipp-3.8.0 root@030ec79ad5e2:/# python -c 'import importlib; importlib.import_module("testcase")' Traceback (most recent call last): File "<string>", line 1, in <module> File "/usr/local/lib/python3.7/importlib/__init__.py", line 127, in import_module return _bootstrap._gcd_import(name[level:], package, level) File "<frozen importlib._bootstrap>", line 1006, in _gcd_import File "<frozen importlib._bootstrap>", line 983, in _find_and_load File "<frozen importlib._bootstrap>", line 967, in _find_and_load_unlocked File "<frozen importlib._bootstrap>", line 677, in _load_unlocked File "<frozen importlib._bootstrap_external>", line 728, in exec_module File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed File "/testcase.py", line 2, in <module> from celery import Celery ImportError: cannot import name 'Celery' from 'celery' (/usr/local/lib/python3.7/site-packages/celery/__init__.py) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setuptools/_importlib.py` Content: ``` 1 import sys 2 3 4 def disable_importlib_metadata_finder(metadata): 5 """ 6 Ensure importlib_metadata doesn't provide older, incompatible 7 Distributions. 8 9 Workaround for #3102. 10 """ 11 try: 12 import importlib_metadata 13 except ImportError: 14 return 15 if importlib_metadata is metadata: 16 return 17 to_remove = [ 18 ob 19 for ob in sys.meta_path 20 if isinstance(ob, importlib_metadata.MetadataPathFinder) 21 ] 22 for item in to_remove: 23 sys.meta_path.remove(item) 24 25 26 if sys.version_info < (3, 10): 27 from setuptools.extern import importlib_metadata as metadata 28 disable_importlib_metadata_finder(metadata) 29 else: 30 import importlib.metadata as metadata # noqa: F401 31 32 33 if sys.version_info < (3, 9): 34 from setuptools.extern import importlib_resources as resources 35 else: 36 import importlib.resources as resources # noqa: F401 37 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setuptools/_importlib.py b/setuptools/_importlib.py --- a/setuptools/_importlib.py +++ b/setuptools/_importlib.py @@ -12,6 +12,17 @@ import importlib_metadata except ImportError: return + except AttributeError: + import warnings + + msg = ( + "`importlib-metadata` version is incompatible with `setuptools`.\n" + "This problem is likely to be solved by installing an updated version of " + "`importlib-metadata`." + ) + warnings.warn(msg) # Ensure a descriptive message is shown. + raise # This exception can be suppressed by _distutils_hack + if importlib_metadata is metadata: return to_remove = [
{"golden_diff": "diff --git a/setuptools/_importlib.py b/setuptools/_importlib.py\n--- a/setuptools/_importlib.py\n+++ b/setuptools/_importlib.py\n@@ -12,6 +12,17 @@\n import importlib_metadata\n except ImportError:\n return\n+ except AttributeError:\n+ import warnings\n+\n+ msg = (\n+ \"`importlib-metadata` version is incompatible with `setuptools`.\\n\"\n+ \"This problem is likely to be solved by installing an updated version of \"\n+ \"`importlib-metadata`.\"\n+ )\n+ warnings.warn(msg) # Ensure a descriptive message is shown.\n+ raise # This exception can be suppressed by _distutils_hack\n+\n if importlib_metadata is metadata:\n return\n to_remove = [\n", "issue": "[BUG] ImportError when using `importlib` with setuptools 60.9.0+\n### setuptools version\r\n\r\n`setuptools==62.1.0`\r\n\r\n(The repro below uses the latest setuptools version, however, the first affected version was `setuptools==60.9.0`)\r\n\r\n### Python version\r\n\r\nPython 3.7\r\n\r\n### OS\r\n\r\nDocker image `python:3.7.13` which is Debian 11 (it also occurs on Ubuntu on Heroku)\r\n\r\n### Additional environment information\r\n\r\nReduced `requirements.txt`:\r\n\r\n```\r\ncelery==5.2.2\r\nDjango==3.2.8\r\nimportlib-metadata==0.20\r\n```\r\n\r\n### Description\r\n\r\nBetween setuptools 60.8.2 and 60.9.0, a previously working Django project now fails when using `importlib.import_module()`:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\n File \"/usr/local/lib/python3.7/importlib/__init__.py\", line 127, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n File \"<frozen importlib._bootstrap>\", line 1006, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 983, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 967, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 677, in _load_unlocked\r\n File \"<frozen importlib._bootstrap_external>\", line 728, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 219, in _call_with_frames_removed\r\n File \"/testcase.py\", line 2, in <module>\r\n from celery import Celery\r\nImportError: cannot import name 'Celery' from 'celery' (/usr/local/lib/python3.7/site-packages/celery/__init__.py)\r\n```\r\n\r\nIn the original project (which was from a customer), the `importlib.import_module()` usage was inside gunicorn, when it loads the provided WSGI application file. However, I've removed gunicorn (and much of the rest of the project) as part of creating a reduced testcase.\r\n\r\nThe changes between those two setuptools versions are:\r\nhttps://github.com/pypa/setuptools/compare/v60.8.2...v60.9.0\r\n\r\nIn addition to the workaround of downgrading setuptools to `60.8.2`, I also found that making any of the following changes (each in isolation) prevents the error from occurring:\r\n- Upgrading Python to 3.8+\r\n- Upgrading importlib-metadata from 0.20 to 0.21+ ([changelog](https://github.com/python/importlib_metadata/compare/0.20...0.21))\r\n- Upgrading Django from 3.2.8 to 3.2.9+ ([changelog](https://github.com/django/django/compare/3.2.8...3.2.9), of which [this](https://github.com/django/django/commit/8bebb1c04a40a8bb45368415cd01ccbf32fd3236) seems relevant)\r\n- Upgrading Celery from 5.2.2 to 5.2.5 ([changelog](https://github.com/celery/celery/compare/v5.2.2...v5.2.5), of which [this](https://github.com/celery/celery/pull/7218) seems relevant) (skipped 5.2.3 and 5.2.4 since they pin setuptools to an earlier version that isn't affected by this)\r\n\r\n### Expected behavior\r\n\r\nEither:\r\n1. The import succeed with newer versions of setuptools, as it did before.\r\n2. Or, setuptools outputs a warning/error to make debugging this easier, or otherwise blocks usage with incompatible `importlib-metadata` versions.\r\n\r\n### How to Reproduce\r\n\r\n1. `docker run --rm -it python:3.7.13 bash`\r\n2. `echo -e \"from django.core.wsgi import get_wsgi_application\\nfrom celery import Celery\" > testcase.py`\r\n3. `pip install setuptools==62.1.0`\r\n4. `pip install celery==5.2.2 Django==3.2.8 importlib-metadata==0.20`\r\n5. `python -c 'import importlib; importlib.import_module(\"testcase\")'`\r\n\r\n### Output\r\n\r\n```console\r\n$ docker run --rm -it python:3.7.13 bash\r\n...\r\nroot@030ec79ad5e2:/# echo -e \"from django.core.wsgi import get_wsgi_application\\nfrom celery import Celery\" > testcase.py\r\nroot@030ec79ad5e2:/# pip install setuptools==62.1.0\r\n...\r\nroot@030ec79ad5e2:/# pip install celery==5.2.2 Django==3.2.8 importlib-metadata==0.20\r\n...\r\nInstalling collected packages: wcwidth, pytz, cached-property, billiard, zipp, vine, typing-extensions, sqlparse, six, prompt-toolkit, importlib-metadata, asgiref, amqp, kombu, Django, click, click-repl, click-plugins, click-didyoumean, celery\r\nSuccessfully installed Django-3.2.8 amqp-5.1.1 asgiref-3.5.0 billiard-3.6.4.0 cached-property-1.5.2 celery-5.2.2 click-8.1.2 click-didyoumean-0.3.0 click-plugins-1.1.1 click-repl-0.2.0 importlib-metadata-0.20 kombu-5.2.4 prompt-toolkit-3.0.29 pytz-2022.1 six-1.16.0 sqlparse-0.4.2 typing-extensions-4.2.0 vine-5.0.0 wcwidth-0.2.5 zipp-3.8.0\r\n\r\nroot@030ec79ad5e2:/# python -c 'import importlib; importlib.import_module(\"testcase\")'\r\nTraceback (most recent call last):\r\n File \"<string>\", line 1, in <module>\r\n File \"/usr/local/lib/python3.7/importlib/__init__.py\", line 127, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n File \"<frozen importlib._bootstrap>\", line 1006, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 983, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 967, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 677, in _load_unlocked\r\n File \"<frozen importlib._bootstrap_external>\", line 728, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 219, in _call_with_frames_removed\r\n File \"/testcase.py\", line 2, in <module>\r\n from celery import Celery\r\nImportError: cannot import name 'Celery' from 'celery' (/usr/local/lib/python3.7/site-packages/celery/__init__.py)\r\n```\n", "before_files": [{"content": "import sys\n\n\ndef disable_importlib_metadata_finder(metadata):\n \"\"\"\n Ensure importlib_metadata doesn't provide older, incompatible\n Distributions.\n\n Workaround for #3102.\n \"\"\"\n try:\n import importlib_metadata\n except ImportError:\n return\n if importlib_metadata is metadata:\n return\n to_remove = [\n ob\n for ob in sys.meta_path\n if isinstance(ob, importlib_metadata.MetadataPathFinder)\n ]\n for item in to_remove:\n sys.meta_path.remove(item)\n\n\nif sys.version_info < (3, 10):\n from setuptools.extern import importlib_metadata as metadata\n disable_importlib_metadata_finder(metadata)\nelse:\n import importlib.metadata as metadata # noqa: F401\n\n\nif sys.version_info < (3, 9):\n from setuptools.extern import importlib_resources as resources\nelse:\n import importlib.resources as resources # noqa: F401\n", "path": "setuptools/_importlib.py"}], "after_files": [{"content": "import sys\n\n\ndef disable_importlib_metadata_finder(metadata):\n \"\"\"\n Ensure importlib_metadata doesn't provide older, incompatible\n Distributions.\n\n Workaround for #3102.\n \"\"\"\n try:\n import importlib_metadata\n except ImportError:\n return\n except AttributeError:\n import warnings\n\n msg = (\n \"`importlib-metadata` version is incompatible with `setuptools`.\\n\"\n \"This problem is likely to be solved by installing an updated version of \"\n \"`importlib-metadata`.\"\n )\n warnings.warn(msg) # Ensure a descriptive message is shown.\n raise # This exception can be suppressed by _distutils_hack\n\n if importlib_metadata is metadata:\n return\n to_remove = [\n ob\n for ob in sys.meta_path\n if isinstance(ob, importlib_metadata.MetadataPathFinder)\n ]\n for item in to_remove:\n sys.meta_path.remove(item)\n\n\nif sys.version_info < (3, 10):\n from setuptools.extern import importlib_metadata as metadata\n disable_importlib_metadata_finder(metadata)\nelse:\n import importlib.metadata as metadata # noqa: F401\n\n\nif sys.version_info < (3, 9):\n from setuptools.extern import importlib_resources as resources\nelse:\n import importlib.resources as resources # noqa: F401\n", "path": "setuptools/_importlib.py"}]}
2,188
173
gh_patches_debug_12204
rasdani/github-patches
git_diff
conda__conda-5273
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- conda env export under python2 is ug ``` $ python2 -m conda_env export -p /conda name: null channels: - !!python/unicode 'file:///Users/kfranz/.conda/conda-bld' - !!python/unicode 'file:///conda/conda-bld' - !!python/unicode 'bkreider' - !!python/unicode 'conda-canary' - !!python/unicode 'conda-forge' - !!python/unicode 'defaults' dependencies: - !!python/unicode 'wget=1.15=2' - !!python/unicode 'conda=4.3.0=py27_0' - !!python/unicode 'conda-env=2.6.0=0' - !!python/unicode 'filelock=2.0.6=py27_0' - !!python/unicode 'boltons=16.3.1=py27_0' - !!python/unicode 'ca-certificates=2016.8.31=0' - !!python/unicode 'certifi=2016.8.31=py27_0' - !!python/unicode 'functools32=3.2.3.2=py27_1' ... ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `conda_env/yaml.py` Content: ``` 1 """ 2 Wrapper around yaml to ensure that everything is ordered correctly. 3 4 This is based on the answer at http://stackoverflow.com/a/16782282 5 """ 6 from __future__ import absolute_import, print_function 7 from collections import OrderedDict 8 9 from conda.common.yaml import get_yaml 10 yaml = get_yaml() 11 12 13 def represent_ordereddict(dumper, data): 14 value = [] 15 16 for item_key, item_value in data.items(): 17 node_key = dumper.represent_data(item_key) 18 node_value = dumper.represent_data(item_value) 19 20 value.append((node_key, node_value)) 21 22 return yaml.nodes.MappingNode(u'tag:yaml.org,2002:map', value) 23 24 25 yaml.add_representer(OrderedDict, represent_ordereddict) 26 27 dump = yaml.dump 28 load = yaml.load 29 dict = OrderedDict 30 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/conda_env/yaml.py b/conda_env/yaml.py --- a/conda_env/yaml.py +++ b/conda_env/yaml.py @@ -6,6 +6,7 @@ from __future__ import absolute_import, print_function from collections import OrderedDict +from conda.common.compat import PY2 from conda.common.yaml import get_yaml yaml = get_yaml() @@ -24,6 +25,12 @@ yaml.add_representer(OrderedDict, represent_ordereddict) +if PY2: + def represent_unicode(self, data): + return self.represent_str(data.encode('utf-8')) + + yaml.add_representer(unicode, represent_unicode) # NOQA + dump = yaml.dump load = yaml.load dict = OrderedDict
{"golden_diff": "diff --git a/conda_env/yaml.py b/conda_env/yaml.py\n--- a/conda_env/yaml.py\n+++ b/conda_env/yaml.py\n@@ -6,6 +6,7 @@\n from __future__ import absolute_import, print_function\n from collections import OrderedDict\n \n+from conda.common.compat import PY2\n from conda.common.yaml import get_yaml\n yaml = get_yaml()\n \n@@ -24,6 +25,12 @@\n \n yaml.add_representer(OrderedDict, represent_ordereddict)\n \n+if PY2:\n+ def represent_unicode(self, data):\n+ return self.represent_str(data.encode('utf-8'))\n+\n+ yaml.add_representer(unicode, represent_unicode) # NOQA\n+\n dump = yaml.dump\n load = yaml.load\n dict = OrderedDict\n", "issue": "conda env export under python2 is ug\n```\r\n$ python2 -m conda_env export -p /conda\r\nname: null\r\nchannels:\r\n- !!python/unicode\r\n 'file:///Users/kfranz/.conda/conda-bld'\r\n- !!python/unicode\r\n 'file:///conda/conda-bld'\r\n- !!python/unicode\r\n 'bkreider'\r\n- !!python/unicode\r\n 'conda-canary'\r\n- !!python/unicode\r\n 'conda-forge'\r\n- !!python/unicode\r\n 'defaults'\r\ndependencies:\r\n- !!python/unicode\r\n 'wget=1.15=2'\r\n- !!python/unicode\r\n 'conda=4.3.0=py27_0'\r\n- !!python/unicode\r\n 'conda-env=2.6.0=0'\r\n- !!python/unicode\r\n 'filelock=2.0.6=py27_0'\r\n- !!python/unicode\r\n 'boltons=16.3.1=py27_0'\r\n- !!python/unicode\r\n 'ca-certificates=2016.8.31=0'\r\n- !!python/unicode\r\n 'certifi=2016.8.31=py27_0'\r\n- !!python/unicode\r\n 'functools32=3.2.3.2=py27_1'\r\n...\r\n```\n", "before_files": [{"content": "\"\"\"\nWrapper around yaml to ensure that everything is ordered correctly.\n\nThis is based on the answer at http://stackoverflow.com/a/16782282\n\"\"\"\nfrom __future__ import absolute_import, print_function\nfrom collections import OrderedDict\n\nfrom conda.common.yaml import get_yaml\nyaml = get_yaml()\n\n\ndef represent_ordereddict(dumper, data):\n value = []\n\n for item_key, item_value in data.items():\n node_key = dumper.represent_data(item_key)\n node_value = dumper.represent_data(item_value)\n\n value.append((node_key, node_value))\n\n return yaml.nodes.MappingNode(u'tag:yaml.org,2002:map', value)\n\n\nyaml.add_representer(OrderedDict, represent_ordereddict)\n\ndump = yaml.dump\nload = yaml.load\ndict = OrderedDict\n", "path": "conda_env/yaml.py"}], "after_files": [{"content": "\"\"\"\nWrapper around yaml to ensure that everything is ordered correctly.\n\nThis is based on the answer at http://stackoverflow.com/a/16782282\n\"\"\"\nfrom __future__ import absolute_import, print_function\nfrom collections import OrderedDict\n\nfrom conda.common.compat import PY2\nfrom conda.common.yaml import get_yaml\nyaml = get_yaml()\n\n\ndef represent_ordereddict(dumper, data):\n value = []\n\n for item_key, item_value in data.items():\n node_key = dumper.represent_data(item_key)\n node_value = dumper.represent_data(item_value)\n\n value.append((node_key, node_value))\n\n return yaml.nodes.MappingNode(u'tag:yaml.org,2002:map', value)\n\n\nyaml.add_representer(OrderedDict, represent_ordereddict)\n\nif PY2:\n def represent_unicode(self, data):\n return self.represent_str(data.encode('utf-8'))\n\n yaml.add_representer(unicode, represent_unicode) # NOQA\n\ndump = yaml.dump\nload = yaml.load\ndict = OrderedDict\n", "path": "conda_env/yaml.py"}]}
795
177
gh_patches_debug_11179
rasdani/github-patches
git_diff
akvo__akvo-rsr-2213
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add reporting template for Plan Finland We should add the reporting template that @stellanl and @Geerts are working on to the "My reports" section. Preferably so that only superusers / admins / Plan Finland employees can see this, but we might need a little hack for that. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `akvo/rsr/reports.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 # Akvo RSR is covered by the GNU Affero General Public License. 4 # See more details in the license.txt file located at the root folder of the Akvo RSR module. 5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >. 6 7 from django.utils.translation import ugettext_lazy as _ 8 9 # Data for all available reports from ReportServer, with the following fields: 10 # - key: A unique identifier for the report 11 # - title: The title of the report, will be shown on the 'My reports' page as such 12 # - description: The description of the report, as displayed on the 'My reports' page 13 # - formats: The available formats for the report, see options below 14 # - parameters: The available parameters for the report, options; ['project', 'organisation'] 15 # - url: The URL where the report is available. Parameter(s) should be indicated in between {..}'s. 16 17 REPORTS = [ 18 { 19 'key': 'results-framework', 20 'title': unicode(_('Results and indicators overview')), 21 'description': unicode(_('This report gives an overview of the status of your project\'s ' 22 'results and indicators.')), 23 'formats': ['pdf',], 24 'parameters': ['project', ], 25 'url': '/en/reports/project_results/{project}?format={format}&download=true' 26 }, 27 { 28 'key': 'results-simple-table', 29 'title': unicode(_('Results and indicators table')), 30 'description': unicode(_('This report provides a view of your project\'s results and ' 31 'indicators data in a table.')), 32 'formats': ['excel',], 33 'parameters': ['project', ], 34 'url': '/en/reports/project_results_simple_table/{project}?format={format}&download=true' 35 }, 36 { 37 'key': 'projects-overview', 38 'title': unicode(_('Projects overview')), 39 'description': unicode(_('This report provides information about your organisation\'s ' 40 'projects: amount of updates, country, total budgets, project ' 41 'statuses, start- and end dates.')), 42 'formats': ['pdf', 'excel'], 43 'parameters': ['organisation', ], 44 'url': '/en/reports/project_overview/{organisation}?format={format}&download=true' 45 }, 46 { 47 'key': 'data-quality', 48 'title': unicode(_('Data quality overview')), 49 'description': unicode(_('This report gives an overview of your organisation\'s projects ' 50 'that have passed the planned end date, need funding or that ' 51 'haven\'t been edited or updated for 3 months.')), 52 'formats': ['pdf', 'excel'], 53 'parameters': ['organisation', ], 54 'url': '/en/reports/data_quality/{organisation}?format={format}&download=true' 55 } 56 ] 57 58 # Data for all available formats from ReportServer, with the following fields: 59 # - key: A unique identifier for the format, also used in the formats field of the reports 60 # - displayName: The display name of the format, as displayed on the 'My reports' page 61 # - icon: The font awesome icon of the format, as displayed on the 'My reports' page 62 63 FORMATS = [ 64 { 65 'key': 'pdf', 66 'displayName': 'PDF', 67 'icon': 'file-pdf-o', 68 }, 69 { 70 'key': 'excel', 71 'displayName': 'Excel', 72 'icon': 'file-excel-o', 73 }, 74 { 75 'key': 'word', 76 'displayName': 'Word', 77 'icon': 'file-word-o', 78 }, 79 { 80 'key': 'html', 81 'displayName': 'HTML', 82 'icon': 'code', 83 }, 84 ] 85 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/akvo/rsr/reports.py b/akvo/rsr/reports.py --- a/akvo/rsr/reports.py +++ b/akvo/rsr/reports.py @@ -52,6 +52,15 @@ 'formats': ['pdf', 'excel'], 'parameters': ['organisation', ], 'url': '/en/reports/data_quality/{organisation}?format={format}&download=true' + }, + { + 'key': 'plan-finland', + 'title': unicode(_('Plan Finland report')), + 'description': unicode(_('This custom MFA report for Plan Finland gives an overview of the ' + 'hierarchy of Plan Finland\'s projects and their results.')), + 'formats': ['pdf', ], + 'parameters': ['project', ], + 'url': '/en/reports/plan_finland/{project}?format={format}&download=true' } ]
{"golden_diff": "diff --git a/akvo/rsr/reports.py b/akvo/rsr/reports.py\n--- a/akvo/rsr/reports.py\n+++ b/akvo/rsr/reports.py\n@@ -52,6 +52,15 @@\n 'formats': ['pdf', 'excel'],\n 'parameters': ['organisation', ],\n 'url': '/en/reports/data_quality/{organisation}?format={format}&download=true'\n+ },\n+ {\n+ 'key': 'plan-finland',\n+ 'title': unicode(_('Plan Finland report')),\n+ 'description': unicode(_('This custom MFA report for Plan Finland gives an overview of the '\n+ 'hierarchy of Plan Finland\\'s projects and their results.')),\n+ 'formats': ['pdf', ],\n+ 'parameters': ['project', ],\n+ 'url': '/en/reports/plan_finland/{project}?format={format}&download=true'\n }\n ]\n", "issue": "Add reporting template for Plan Finland\nWe should add the reporting template that @stellanl and @Geerts are working on to the \"My reports\" section. Preferably so that only superusers / admins / Plan Finland employees can see this, but we might need a little hack for that.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom django.utils.translation import ugettext_lazy as _\n\n# Data for all available reports from ReportServer, with the following fields:\n# - key: A unique identifier for the report\n# - title: The title of the report, will be shown on the 'My reports' page as such\n# - description: The description of the report, as displayed on the 'My reports' page\n# - formats: The available formats for the report, see options below\n# - parameters: The available parameters for the report, options; ['project', 'organisation']\n# - url: The URL where the report is available. Parameter(s) should be indicated in between {..}'s.\n\nREPORTS = [\n {\n 'key': 'results-framework',\n 'title': unicode(_('Results and indicators overview')),\n 'description': unicode(_('This report gives an overview of the status of your project\\'s '\n 'results and indicators.')),\n 'formats': ['pdf',],\n 'parameters': ['project', ],\n 'url': '/en/reports/project_results/{project}?format={format}&download=true'\n },\n {\n 'key': 'results-simple-table',\n 'title': unicode(_('Results and indicators table')),\n 'description': unicode(_('This report provides a view of your project\\'s results and '\n 'indicators data in a table.')),\n 'formats': ['excel',],\n 'parameters': ['project', ],\n 'url': '/en/reports/project_results_simple_table/{project}?format={format}&download=true'\n },\n {\n 'key': 'projects-overview',\n 'title': unicode(_('Projects overview')),\n 'description': unicode(_('This report provides information about your organisation\\'s '\n 'projects: amount of updates, country, total budgets, project '\n 'statuses, start- and end dates.')),\n 'formats': ['pdf', 'excel'],\n 'parameters': ['organisation', ],\n 'url': '/en/reports/project_overview/{organisation}?format={format}&download=true'\n },\n {\n 'key': 'data-quality',\n 'title': unicode(_('Data quality overview')),\n 'description': unicode(_('This report gives an overview of your organisation\\'s projects '\n 'that have passed the planned end date, need funding or that '\n 'haven\\'t been edited or updated for 3 months.')),\n 'formats': ['pdf', 'excel'],\n 'parameters': ['organisation', ],\n 'url': '/en/reports/data_quality/{organisation}?format={format}&download=true'\n }\n]\n\n# Data for all available formats from ReportServer, with the following fields:\n# - key: A unique identifier for the format, also used in the formats field of the reports\n# - displayName: The display name of the format, as displayed on the 'My reports' page\n# - icon: The font awesome icon of the format, as displayed on the 'My reports' page\n\nFORMATS = [\n {\n 'key': 'pdf',\n 'displayName': 'PDF',\n 'icon': 'file-pdf-o',\n },\n {\n 'key': 'excel',\n 'displayName': 'Excel',\n 'icon': 'file-excel-o',\n },\n {\n 'key': 'word',\n 'displayName': 'Word',\n 'icon': 'file-word-o',\n },\n {\n 'key': 'html',\n 'displayName': 'HTML',\n 'icon': 'code',\n },\n]\n", "path": "akvo/rsr/reports.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom django.utils.translation import ugettext_lazy as _\n\n# Data for all available reports from ReportServer, with the following fields:\n# - key: A unique identifier for the report\n# - title: The title of the report, will be shown on the 'My reports' page as such\n# - description: The description of the report, as displayed on the 'My reports' page\n# - formats: The available formats for the report, see options below\n# - parameters: The available parameters for the report, options; ['project', 'organisation']\n# - url: The URL where the report is available. Parameter(s) should be indicated in between {..}'s.\n\nREPORTS = [\n {\n 'key': 'results-framework',\n 'title': unicode(_('Results and indicators overview')),\n 'description': unicode(_('This report gives an overview of the status of your project\\'s '\n 'results and indicators.')),\n 'formats': ['pdf',],\n 'parameters': ['project', ],\n 'url': '/en/reports/project_results/{project}?format={format}&download=true'\n },\n {\n 'key': 'results-simple-table',\n 'title': unicode(_('Results and indicators table')),\n 'description': unicode(_('This report provides a view of your project\\'s results and '\n 'indicators data in a table.')),\n 'formats': ['excel',],\n 'parameters': ['project', ],\n 'url': '/en/reports/project_results_simple_table/{project}?format={format}&download=true'\n },\n {\n 'key': 'projects-overview',\n 'title': unicode(_('Projects overview')),\n 'description': unicode(_('This report provides information about your organisation\\'s '\n 'projects: amount of updates, country, total budgets, project '\n 'statuses, start- and end dates.')),\n 'formats': ['pdf', 'excel'],\n 'parameters': ['organisation', ],\n 'url': '/en/reports/project_overview/{organisation}?format={format}&download=true'\n },\n {\n 'key': 'data-quality',\n 'title': unicode(_('Data quality overview')),\n 'description': unicode(_('This report gives an overview of your organisation\\'s projects '\n 'that have passed the planned end date, need funding or that '\n 'haven\\'t been edited or updated for 3 months.')),\n 'formats': ['pdf', 'excel'],\n 'parameters': ['organisation', ],\n 'url': '/en/reports/data_quality/{organisation}?format={format}&download=true'\n },\n {\n 'key': 'plan-finland',\n 'title': unicode(_('Plan Finland report')),\n 'description': unicode(_('This custom MFA report for Plan Finland gives an overview of the '\n 'hierarchy of Plan Finland\\'s projects and their results.')),\n 'formats': ['pdf', ],\n 'parameters': ['project', ],\n 'url': '/en/reports/plan_finland/{project}?format={format}&download=true'\n }\n]\n\n# Data for all available formats from ReportServer, with the following fields:\n# - key: A unique identifier for the format, also used in the formats field of the reports\n# - displayName: The display name of the format, as displayed on the 'My reports' page\n# - icon: The font awesome icon of the format, as displayed on the 'My reports' page\n\nFORMATS = [\n {\n 'key': 'pdf',\n 'displayName': 'PDF',\n 'icon': 'file-pdf-o',\n },\n {\n 'key': 'excel',\n 'displayName': 'Excel',\n 'icon': 'file-excel-o',\n },\n {\n 'key': 'word',\n 'displayName': 'Word',\n 'icon': 'file-word-o',\n },\n {\n 'key': 'html',\n 'displayName': 'HTML',\n 'icon': 'code',\n },\n]\n", "path": "akvo/rsr/reports.py"}]}
1,277
203
gh_patches_debug_26692
rasdani/github-patches
git_diff
google__fuzzbench-291
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [GCP] Runners are not started. I pulled master and tried to evaluate libfuzzer against honggfuzz in 5 trials for 1 hour on 1 benchmark (mbedtls_fuzz_dtlsclient). It doesn't generate the report anymore. The web bucket is empty, the experiments-result folder does not exist in the data bucket, the SQL database is empty, and the Error Reporting gives the following error: ``` ValueError: Empty experiment data. Message: Error generating HTML report. at validate_data (/work/src/analysis/data_utils.py:21) at generate_report (/work/src/analysis/generate_report.py:132) at output_report (/work/src/experiment/reporter.py:43) ``` I deleted authorization keys of the service account. I deleted the old and set up a new SQL database (incl. `alembic upgrade head`). I cleaned out the container registry (by deleting the `container` folder in the corresponding bucket). I cleaned out the Cloud Builds (by deleting `source` folder in the corresponding bucket). It recreates the containers and builds, when I start the dispatcher. The dispatcher runs properly. I SSH'ed into a random runner: `docker images` and `docker ps -a` return empty-handed. Is the recent setup gcr.io/fuzzbench-specific? Any suggestion to debug? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `common/benchmark_utils.py` Content: ``` 1 # Copyright 2020 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 """Code for dealing with benchmarks.""" 15 import os 16 import re 17 18 from common import experiment_utils 19 from common import fuzzer_utils 20 from common import logs 21 from common import oss_fuzz 22 from common import utils 23 24 VALID_BENCHMARK_REGEX = re.compile(r'^[A-Za-z0-9\._\-]+$') 25 26 27 def is_oss_fuzz(benchmark): 28 """Returns True if |benchmark| is OSS-Fuzz-based project.""" 29 return os.path.isfile(oss_fuzz.get_config_file(benchmark)) 30 31 32 def get_project(benchmark): 33 """Returns the OSS-Fuzz project of |benchmark| if it is based on an 34 OSS-Fuzz project, otherwise raises ValueError.""" 35 if is_oss_fuzz(benchmark): 36 return oss_fuzz.get_config(benchmark)['project'] 37 raise ValueError('Can only get project on OSS-Fuzz benchmarks.') 38 39 40 def get_fuzz_target(benchmark): 41 """Returns the fuzz target of |benchmark|""" 42 if is_oss_fuzz(benchmark): 43 return oss_fuzz.get_config(benchmark)['fuzz_target'] 44 return fuzzer_utils.DEFAULT_FUZZ_TARGET_NAME 45 46 47 def get_runner_image_url(benchmark, fuzzer, cloud_project): 48 """Get the URL of the docker runner image for fuzzing the benchmark with 49 fuzzer.""" 50 base_tag = experiment_utils.get_base_docker_tag(cloud_project) 51 if is_oss_fuzz(benchmark): 52 return '{base_tag}/oss-fuzz/runners/{fuzzer}/{project}'.format( 53 base_tag=base_tag, fuzzer=fuzzer, project=get_project(benchmark)) 54 return '{base_tag}/runners/{fuzzer}/{benchmark}'.format(base_tag=base_tag, 55 fuzzer=fuzzer, 56 benchmark=benchmark) 57 58 59 def get_builder_image_url(benchmark, fuzzer, cloud_project): 60 """Get the URL of the docker builder image for fuzzing the benchmark with 61 fuzzer.""" 62 base_tag = experiment_utils.get_base_docker_tag(cloud_project) 63 if is_oss_fuzz(benchmark): 64 return '{base_tag}/oss-fuzz/builders/{fuzzer}/{project}'.format( 65 base_tag=base_tag, fuzzer=fuzzer, project=get_project(benchmark)) 66 return '{base_tag}/builders/{fuzzer}/{benchmark}'.format( 67 base_tag=base_tag, fuzzer=fuzzer, benchmark=benchmark) 68 69 70 def get_oss_fuzz_builder_hash(benchmark): 71 """Get the specified hash of the OSS-Fuzz builder for the OSS-Fuzz project 72 used by |benchmark|.""" 73 if is_oss_fuzz(benchmark): 74 return oss_fuzz.get_config(benchmark)['oss_fuzz_builder_hash'] 75 raise ValueError('Can only get project on OSS-Fuzz benchmarks.') 76 77 78 def validate(benchmark): 79 """Return True if |benchmark| is a valid fuzzbench fuzzer.""" 80 if VALID_BENCHMARK_REGEX.match(benchmark) is None: 81 logs.error('%s does not conform to %s pattern.', benchmark, 82 VALID_BENCHMARK_REGEX.pattern) 83 return False 84 if benchmark in get_all_benchmarks(): 85 return True 86 logs.error('%s must have a build.sh or oss-fuzz.yaml.', benchmark) 87 return False 88 89 90 def get_all_benchmarks(): 91 """Returns the list of all benchmarks.""" 92 benchmarks_dir = os.path.join(utils.ROOT_DIR, 'benchmarks') 93 all_benchmarks = [] 94 for benchmark in os.listdir(benchmarks_dir): 95 benchmark_path = os.path.join(benchmarks_dir, benchmark) 96 if os.path.isfile(os.path.join(benchmark_path, 'oss-fuzz.yaml')): 97 # Benchmark is an OSS-Fuzz benchmark. 98 all_benchmarks.append(benchmark) 99 elif os.path.isfile(os.path.join(benchmark_path, 'build.sh')): 100 # Benchmark is a standard benchmark. 101 all_benchmarks.append(benchmark) 102 return all_benchmarks 103 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/common/benchmark_utils.py b/common/benchmark_utils.py --- a/common/benchmark_utils.py +++ b/common/benchmark_utils.py @@ -48,9 +48,6 @@ """Get the URL of the docker runner image for fuzzing the benchmark with fuzzer.""" base_tag = experiment_utils.get_base_docker_tag(cloud_project) - if is_oss_fuzz(benchmark): - return '{base_tag}/oss-fuzz/runners/{fuzzer}/{project}'.format( - base_tag=base_tag, fuzzer=fuzzer, project=get_project(benchmark)) return '{base_tag}/runners/{fuzzer}/{benchmark}'.format(base_tag=base_tag, fuzzer=fuzzer, benchmark=benchmark) @@ -60,9 +57,6 @@ """Get the URL of the docker builder image for fuzzing the benchmark with fuzzer.""" base_tag = experiment_utils.get_base_docker_tag(cloud_project) - if is_oss_fuzz(benchmark): - return '{base_tag}/oss-fuzz/builders/{fuzzer}/{project}'.format( - base_tag=base_tag, fuzzer=fuzzer, project=get_project(benchmark)) return '{base_tag}/builders/{fuzzer}/{benchmark}'.format( base_tag=base_tag, fuzzer=fuzzer, benchmark=benchmark)
{"golden_diff": "diff --git a/common/benchmark_utils.py b/common/benchmark_utils.py\n--- a/common/benchmark_utils.py\n+++ b/common/benchmark_utils.py\n@@ -48,9 +48,6 @@\n \"\"\"Get the URL of the docker runner image for fuzzing the benchmark with\n fuzzer.\"\"\"\n base_tag = experiment_utils.get_base_docker_tag(cloud_project)\n- if is_oss_fuzz(benchmark):\n- return '{base_tag}/oss-fuzz/runners/{fuzzer}/{project}'.format(\n- base_tag=base_tag, fuzzer=fuzzer, project=get_project(benchmark))\n return '{base_tag}/runners/{fuzzer}/{benchmark}'.format(base_tag=base_tag,\n fuzzer=fuzzer,\n benchmark=benchmark)\n@@ -60,9 +57,6 @@\n \"\"\"Get the URL of the docker builder image for fuzzing the benchmark with\n fuzzer.\"\"\"\n base_tag = experiment_utils.get_base_docker_tag(cloud_project)\n- if is_oss_fuzz(benchmark):\n- return '{base_tag}/oss-fuzz/builders/{fuzzer}/{project}'.format(\n- base_tag=base_tag, fuzzer=fuzzer, project=get_project(benchmark))\n return '{base_tag}/builders/{fuzzer}/{benchmark}'.format(\n base_tag=base_tag, fuzzer=fuzzer, benchmark=benchmark)\n", "issue": "[GCP] Runners are not started.\nI pulled master and tried to evaluate libfuzzer against honggfuzz in 5 trials for 1 hour on 1 benchmark (mbedtls_fuzz_dtlsclient). It doesn't generate the report anymore. The web bucket is empty, the experiments-result folder does not exist in the data bucket, the SQL database is empty, and the Error Reporting gives the following error:\r\n```\r\nValueError: Empty experiment data. Message: Error generating HTML report.\r\nat validate_data (/work/src/analysis/data_utils.py:21)\r\nat generate_report (/work/src/analysis/generate_report.py:132)\r\nat output_report (/work/src/experiment/reporter.py:43)\r\n```\r\n\r\nI deleted authorization keys of the service account. I deleted the old and set up a new SQL database (incl. `alembic upgrade head`). I cleaned out the container registry (by deleting the `container` folder in the corresponding bucket). I cleaned out the Cloud Builds (by deleting `source` folder in the corresponding bucket). It recreates the containers and builds, when I start the dispatcher. The dispatcher runs properly. I SSH'ed into a random runner: `docker images` and `docker ps -a` return empty-handed. Is the recent setup gcr.io/fuzzbench-specific? Any suggestion to debug?\n", "before_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Code for dealing with benchmarks.\"\"\"\nimport os\nimport re\n\nfrom common import experiment_utils\nfrom common import fuzzer_utils\nfrom common import logs\nfrom common import oss_fuzz\nfrom common import utils\n\nVALID_BENCHMARK_REGEX = re.compile(r'^[A-Za-z0-9\\._\\-]+$')\n\n\ndef is_oss_fuzz(benchmark):\n \"\"\"Returns True if |benchmark| is OSS-Fuzz-based project.\"\"\"\n return os.path.isfile(oss_fuzz.get_config_file(benchmark))\n\n\ndef get_project(benchmark):\n \"\"\"Returns the OSS-Fuzz project of |benchmark| if it is based on an\n OSS-Fuzz project, otherwise raises ValueError.\"\"\"\n if is_oss_fuzz(benchmark):\n return oss_fuzz.get_config(benchmark)['project']\n raise ValueError('Can only get project on OSS-Fuzz benchmarks.')\n\n\ndef get_fuzz_target(benchmark):\n \"\"\"Returns the fuzz target of |benchmark|\"\"\"\n if is_oss_fuzz(benchmark):\n return oss_fuzz.get_config(benchmark)['fuzz_target']\n return fuzzer_utils.DEFAULT_FUZZ_TARGET_NAME\n\n\ndef get_runner_image_url(benchmark, fuzzer, cloud_project):\n \"\"\"Get the URL of the docker runner image for fuzzing the benchmark with\n fuzzer.\"\"\"\n base_tag = experiment_utils.get_base_docker_tag(cloud_project)\n if is_oss_fuzz(benchmark):\n return '{base_tag}/oss-fuzz/runners/{fuzzer}/{project}'.format(\n base_tag=base_tag, fuzzer=fuzzer, project=get_project(benchmark))\n return '{base_tag}/runners/{fuzzer}/{benchmark}'.format(base_tag=base_tag,\n fuzzer=fuzzer,\n benchmark=benchmark)\n\n\ndef get_builder_image_url(benchmark, fuzzer, cloud_project):\n \"\"\"Get the URL of the docker builder image for fuzzing the benchmark with\n fuzzer.\"\"\"\n base_tag = experiment_utils.get_base_docker_tag(cloud_project)\n if is_oss_fuzz(benchmark):\n return '{base_tag}/oss-fuzz/builders/{fuzzer}/{project}'.format(\n base_tag=base_tag, fuzzer=fuzzer, project=get_project(benchmark))\n return '{base_tag}/builders/{fuzzer}/{benchmark}'.format(\n base_tag=base_tag, fuzzer=fuzzer, benchmark=benchmark)\n\n\ndef get_oss_fuzz_builder_hash(benchmark):\n \"\"\"Get the specified hash of the OSS-Fuzz builder for the OSS-Fuzz project\n used by |benchmark|.\"\"\"\n if is_oss_fuzz(benchmark):\n return oss_fuzz.get_config(benchmark)['oss_fuzz_builder_hash']\n raise ValueError('Can only get project on OSS-Fuzz benchmarks.')\n\n\ndef validate(benchmark):\n \"\"\"Return True if |benchmark| is a valid fuzzbench fuzzer.\"\"\"\n if VALID_BENCHMARK_REGEX.match(benchmark) is None:\n logs.error('%s does not conform to %s pattern.', benchmark,\n VALID_BENCHMARK_REGEX.pattern)\n return False\n if benchmark in get_all_benchmarks():\n return True\n logs.error('%s must have a build.sh or oss-fuzz.yaml.', benchmark)\n return False\n\n\ndef get_all_benchmarks():\n \"\"\"Returns the list of all benchmarks.\"\"\"\n benchmarks_dir = os.path.join(utils.ROOT_DIR, 'benchmarks')\n all_benchmarks = []\n for benchmark in os.listdir(benchmarks_dir):\n benchmark_path = os.path.join(benchmarks_dir, benchmark)\n if os.path.isfile(os.path.join(benchmark_path, 'oss-fuzz.yaml')):\n # Benchmark is an OSS-Fuzz benchmark.\n all_benchmarks.append(benchmark)\n elif os.path.isfile(os.path.join(benchmark_path, 'build.sh')):\n # Benchmark is a standard benchmark.\n all_benchmarks.append(benchmark)\n return all_benchmarks\n", "path": "common/benchmark_utils.py"}], "after_files": [{"content": "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Code for dealing with benchmarks.\"\"\"\nimport os\nimport re\n\nfrom common import experiment_utils\nfrom common import fuzzer_utils\nfrom common import logs\nfrom common import oss_fuzz\nfrom common import utils\n\nVALID_BENCHMARK_REGEX = re.compile(r'^[A-Za-z0-9\\._\\-]+$')\n\n\ndef is_oss_fuzz(benchmark):\n \"\"\"Returns True if |benchmark| is OSS-Fuzz-based project.\"\"\"\n return os.path.isfile(oss_fuzz.get_config_file(benchmark))\n\n\ndef get_project(benchmark):\n \"\"\"Returns the OSS-Fuzz project of |benchmark| if it is based on an\n OSS-Fuzz project, otherwise raises ValueError.\"\"\"\n if is_oss_fuzz(benchmark):\n return oss_fuzz.get_config(benchmark)['project']\n raise ValueError('Can only get project on OSS-Fuzz benchmarks.')\n\n\ndef get_fuzz_target(benchmark):\n \"\"\"Returns the fuzz target of |benchmark|\"\"\"\n if is_oss_fuzz(benchmark):\n return oss_fuzz.get_config(benchmark)['fuzz_target']\n return fuzzer_utils.DEFAULT_FUZZ_TARGET_NAME\n\n\ndef get_runner_image_url(benchmark, fuzzer, cloud_project):\n \"\"\"Get the URL of the docker runner image for fuzzing the benchmark with\n fuzzer.\"\"\"\n base_tag = experiment_utils.get_base_docker_tag(cloud_project)\n return '{base_tag}/runners/{fuzzer}/{benchmark}'.format(base_tag=base_tag,\n fuzzer=fuzzer,\n benchmark=benchmark)\n\n\ndef get_builder_image_url(benchmark, fuzzer, cloud_project):\n \"\"\"Get the URL of the docker builder image for fuzzing the benchmark with\n fuzzer.\"\"\"\n base_tag = experiment_utils.get_base_docker_tag(cloud_project)\n return '{base_tag}/builders/{fuzzer}/{benchmark}'.format(\n base_tag=base_tag, fuzzer=fuzzer, benchmark=benchmark)\n\n\ndef get_oss_fuzz_builder_hash(benchmark):\n \"\"\"Get the specified hash of the OSS-Fuzz builder for the OSS-Fuzz project\n used by |benchmark|.\"\"\"\n if is_oss_fuzz(benchmark):\n return oss_fuzz.get_config(benchmark)['oss_fuzz_builder_hash']\n raise ValueError('Can only get project on OSS-Fuzz benchmarks.')\n\n\ndef validate(benchmark):\n \"\"\"Return True if |benchmark| is a valid fuzzbench fuzzer.\"\"\"\n if VALID_BENCHMARK_REGEX.match(benchmark) is None:\n logs.error('%s does not conform to %s pattern.', benchmark,\n VALID_BENCHMARK_REGEX.pattern)\n return False\n if benchmark in get_all_benchmarks():\n return True\n logs.error('%s must have a build.sh or oss-fuzz.yaml.', benchmark)\n return False\n\n\ndef get_all_benchmarks():\n \"\"\"Returns the list of all benchmarks.\"\"\"\n benchmarks_dir = os.path.join(utils.ROOT_DIR, 'benchmarks')\n all_benchmarks = []\n for benchmark in os.listdir(benchmarks_dir):\n benchmark_path = os.path.join(benchmarks_dir, benchmark)\n if os.path.isfile(os.path.join(benchmark_path, 'oss-fuzz.yaml')):\n # Benchmark is an OSS-Fuzz benchmark.\n all_benchmarks.append(benchmark)\n elif os.path.isfile(os.path.join(benchmark_path, 'build.sh')):\n # Benchmark is a standard benchmark.\n all_benchmarks.append(benchmark)\n return all_benchmarks\n", "path": "common/benchmark_utils.py"}]}
1,686
291
gh_patches_debug_9883
rasdani/github-patches
git_diff
sublimelsp__LSP-2408
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Goto Definition throws **Describe the bug** Using `LSP-pyright`, click `Definition`. Nothing will happen visually. **To Reproduce** 1. `LSP-pyright` 2. hover over something 3. click `Definition` (or `Type Definition` or `Declaration`) 4. see error in console: ```py Traceback (most recent call last): File "D:\Programs\Sublime Text 4\Data\Installed Packages\LSP.sublime-package\plugin/core/sessions.py", line 1681, in <lambda> File "D:\Programs\Sublime Text 4\Data\Installed Packages\LSP.sublime-package\plugin/core/open.py", line 96, in open_file File "D:\Programs\Sublime Text 4\Data\Installed Packages\LSP.sublime-package\plugin/core/open.py", line 82, in _find_open_file TypeError: find_open_file() takes 2 positional arguments but 3 were given ``` **Expected behavior** no error, I go to definition **Environment (please complete the following information):** - OS: windows - Sublime Text version: 4121 - LSP version: 1.28.0 - Language servers used: pyright-1.2.42 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `plugin/core/open.py` Content: ``` 1 from .logging import exception_log 2 from .promise import Promise 3 from .promise import ResolveFunc 4 from .protocol import DocumentUri 5 from .protocol import Range 6 from .protocol import UINT_MAX 7 from .typing import Dict, Tuple, Optional 8 from .typing import cast 9 from .url import parse_uri 10 from .views import range_to_region 11 from urllib.parse import unquote, urlparse 12 import os 13 import re 14 import sublime 15 import sublime_plugin 16 import subprocess 17 import webbrowser 18 19 20 opening_files = {} # type: Dict[str, Tuple[Promise[Optional[sublime.View]], ResolveFunc[Optional[sublime.View]]]] 21 FRAGMENT_PATTERN = re.compile(r'^L?(\d+)(?:,(\d+))?(?:-L?(\d+)(?:,(\d+))?)?') 22 23 24 def lsp_range_from_uri_fragment(fragment: str) -> Optional[Range]: 25 match = FRAGMENT_PATTERN.match(fragment) 26 if match: 27 selection = {'start': {'line': 0, 'character': 0}, 'end': {'line': 0, 'character': 0}} # type: Range 28 # Line and column numbers in the fragment are assumed to be 1-based and need to be converted to 0-based 29 # numbers for the LSP Position structure. 30 start_line, start_column, end_line, end_column = [max(0, int(g) - 1) if g else None for g in match.groups()] 31 if start_line: 32 selection['start']['line'] = start_line 33 selection['end']['line'] = start_line 34 if start_column: 35 selection['start']['character'] = start_column 36 selection['end']['character'] = start_column 37 if end_line: 38 selection['end']['line'] = end_line 39 selection['end']['character'] = UINT_MAX 40 if end_column is not None: 41 selection['end']['character'] = end_column 42 return selection 43 return None 44 45 46 def open_file_uri( 47 window: sublime.Window, uri: DocumentUri, flags: int = 0, group: int = -1 48 ) -> Promise[Optional[sublime.View]]: 49 50 decoded_uri = unquote(uri) # decode percent-encoded characters 51 parsed = urlparse(decoded_uri) 52 open_promise = open_file(window, decoded_uri, flags, group) 53 if parsed.fragment: 54 selection = lsp_range_from_uri_fragment(parsed.fragment) 55 if selection: 56 return open_promise.then(lambda view: _select_and_center(view, cast(Range, selection))) 57 return open_promise 58 59 60 def _select_and_center(view: Optional[sublime.View], r: Range) -> Optional[sublime.View]: 61 if view: 62 return center_selection(view, r) 63 return None 64 65 66 def _return_existing_view(flags: int, existing_view_group: int, active_group: int, specified_group: int) -> bool: 67 if specified_group > -1: 68 return existing_view_group == specified_group 69 if bool(flags & (sublime.ADD_TO_SELECTION | sublime.REPLACE_MRU)): 70 return False 71 if existing_view_group == active_group: 72 return True 73 return not bool(flags & sublime.FORCE_GROUP) 74 75 76 def _find_open_file(window: sublime.Window, fname: str, group: int = -1) -> Optional[sublime.View]: 77 """A replacement for Window.find_open_file that prefers the active view instead of the leftmost one.""" 78 _group = window.active_group() if group == -1 else group 79 view = window.active_view_in_group(_group) 80 if view and fname == view.file_name(): 81 return view 82 return window.find_open_file(fname, group) 83 84 85 def open_file( 86 window: sublime.Window, uri: DocumentUri, flags: int = 0, group: int = -1 87 ) -> Promise[Optional[sublime.View]]: 88 """ 89 Open a file asynchronously. 90 It is only safe to call this function from the UI thread. 91 The provided uri MUST be a file URI 92 """ 93 file = parse_uri(uri)[1] 94 # window.open_file brings the file to focus if it's already opened, which we don't want (unless it's supposed 95 # to open as a separate view). 96 view = _find_open_file(window, file) 97 if view and _return_existing_view(flags, window.get_view_index(view)[0], window.active_group(), group): 98 return Promise.resolve(view) 99 100 was_already_open = view is not None 101 view = window.open_file(file, flags, group) 102 if not view.is_loading(): 103 if was_already_open and (flags & sublime.SEMI_TRANSIENT): 104 # workaround bug https://github.com/sublimehq/sublime_text/issues/2411 where transient view might not get 105 # its view listeners initialized. 106 sublime_plugin.check_view_event_listeners(view) # type: ignore 107 # It's already loaded. Possibly already open in a tab. 108 return Promise.resolve(view) 109 110 # Is the view opening right now? Then return the associated unresolved promise 111 for fn, value in opening_files.items(): 112 if fn == file or os.path.samefile(fn, file): 113 # Return the unresolved promise. A future on_load event will resolve the promise. 114 return value[0] 115 116 # Prepare a new promise to be resolved by a future on_load event (see the event listener in main.py) 117 def fullfill(resolve: ResolveFunc[Optional[sublime.View]]) -> None: 118 global opening_files 119 # Save the promise in the first element of the tuple -- except we cannot yet do that here 120 opening_files[file] = (None, resolve) # type: ignore 121 122 promise = Promise(fullfill) 123 tup = opening_files[file] 124 # Save the promise in the first element of the tuple so that the for-loop above can return it 125 opening_files[file] = (promise, tup[1]) 126 return promise 127 128 129 def center_selection(v: sublime.View, r: Range) -> sublime.View: 130 selection = range_to_region(r, v) 131 v.run_command("lsp_selection_set", {"regions": [(selection.a, selection.a)]}) 132 window = v.window() 133 if window: 134 window.focus_view(v) 135 if int(sublime.version()) >= 4124: 136 v.show_at_center(selection.begin(), animate=False) 137 else: 138 # TODO: remove later when a stable build lands 139 v.show_at_center(selection.begin()) # type: ignore 140 return v 141 142 143 def open_in_browser(uri: str) -> None: 144 # NOTE: Remove this check when on py3.8. 145 if not uri.lower().startswith(("http://", "https://")): 146 uri = "https://" + uri 147 if not webbrowser.open(uri): 148 sublime.status_message("failed to open: " + uri) 149 150 151 def open_externally(uri: str, take_focus: bool) -> bool: 152 """ 153 A blocking function that invokes the OS's "open with default extension" 154 """ 155 try: 156 # TODO: handle take_focus 157 if sublime.platform() == "windows": 158 os.startfile(uri) # type: ignore 159 elif sublime.platform() == "osx": 160 subprocess.check_call(("/usr/bin/open", uri)) 161 else: # linux 162 subprocess.check_call(("xdg-open", uri)) 163 return True 164 except Exception as ex: 165 exception_log("Failed to open {}".format(uri), ex) 166 return False 167 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/plugin/core/open.py b/plugin/core/open.py --- a/plugin/core/open.py +++ b/plugin/core/open.py @@ -1,3 +1,4 @@ +from .constants import ST_VERSION from .logging import exception_log from .promise import Promise from .promise import ResolveFunc @@ -79,7 +80,7 @@ view = window.active_view_in_group(_group) if view and fname == view.file_name(): return view - return window.find_open_file(fname, group) + return window.find_open_file(fname, group) if ST_VERSION >= 4136 else window.find_open_file(fname) def open_file(
{"golden_diff": "diff --git a/plugin/core/open.py b/plugin/core/open.py\n--- a/plugin/core/open.py\n+++ b/plugin/core/open.py\n@@ -1,3 +1,4 @@\n+from .constants import ST_VERSION\n from .logging import exception_log\n from .promise import Promise\n from .promise import ResolveFunc\n@@ -79,7 +80,7 @@\n view = window.active_view_in_group(_group)\n if view and fname == view.file_name():\n return view\n- return window.find_open_file(fname, group)\n+ return window.find_open_file(fname, group) if ST_VERSION >= 4136 else window.find_open_file(fname)\n \n \n def open_file(\n", "issue": "Goto Definition throws\n**Describe the bug**\r\nUsing `LSP-pyright`, click `Definition`. Nothing will happen visually.\r\n\r\n**To Reproduce**\r\n1. `LSP-pyright`\r\n2. hover over something\r\n3. click `Definition` (or `Type Definition` or `Declaration`)\r\n4. see error in console: \r\n```py\r\nTraceback (most recent call last):\r\n File \"D:\\Programs\\Sublime Text 4\\Data\\Installed Packages\\LSP.sublime-package\\plugin/core/sessions.py\", line 1681, in <lambda>\r\n File \"D:\\Programs\\Sublime Text 4\\Data\\Installed Packages\\LSP.sublime-package\\plugin/core/open.py\", line 96, in open_file\r\n File \"D:\\Programs\\Sublime Text 4\\Data\\Installed Packages\\LSP.sublime-package\\plugin/core/open.py\", line 82, in _find_open_file\r\nTypeError: find_open_file() takes 2 positional arguments but 3 were given\r\n```\r\n\r\n**Expected behavior**\r\nno error, I go to definition\r\n\r\n**Environment (please complete the following information):**\r\n- OS: windows\r\n- Sublime Text version: 4121\r\n- LSP version: 1.28.0\r\n- Language servers used: pyright-1.2.42\r\n\n", "before_files": [{"content": "from .logging import exception_log\nfrom .promise import Promise\nfrom .promise import ResolveFunc\nfrom .protocol import DocumentUri\nfrom .protocol import Range\nfrom .protocol import UINT_MAX\nfrom .typing import Dict, Tuple, Optional\nfrom .typing import cast\nfrom .url import parse_uri\nfrom .views import range_to_region\nfrom urllib.parse import unquote, urlparse\nimport os\nimport re\nimport sublime\nimport sublime_plugin\nimport subprocess\nimport webbrowser\n\n\nopening_files = {} # type: Dict[str, Tuple[Promise[Optional[sublime.View]], ResolveFunc[Optional[sublime.View]]]]\nFRAGMENT_PATTERN = re.compile(r'^L?(\\d+)(?:,(\\d+))?(?:-L?(\\d+)(?:,(\\d+))?)?')\n\n\ndef lsp_range_from_uri_fragment(fragment: str) -> Optional[Range]:\n match = FRAGMENT_PATTERN.match(fragment)\n if match:\n selection = {'start': {'line': 0, 'character': 0}, 'end': {'line': 0, 'character': 0}} # type: Range\n # Line and column numbers in the fragment are assumed to be 1-based and need to be converted to 0-based\n # numbers for the LSP Position structure.\n start_line, start_column, end_line, end_column = [max(0, int(g) - 1) if g else None for g in match.groups()]\n if start_line:\n selection['start']['line'] = start_line\n selection['end']['line'] = start_line\n if start_column:\n selection['start']['character'] = start_column\n selection['end']['character'] = start_column\n if end_line:\n selection['end']['line'] = end_line\n selection['end']['character'] = UINT_MAX\n if end_column is not None:\n selection['end']['character'] = end_column\n return selection\n return None\n\n\ndef open_file_uri(\n window: sublime.Window, uri: DocumentUri, flags: int = 0, group: int = -1\n) -> Promise[Optional[sublime.View]]:\n\n decoded_uri = unquote(uri) # decode percent-encoded characters\n parsed = urlparse(decoded_uri)\n open_promise = open_file(window, decoded_uri, flags, group)\n if parsed.fragment:\n selection = lsp_range_from_uri_fragment(parsed.fragment)\n if selection:\n return open_promise.then(lambda view: _select_and_center(view, cast(Range, selection)))\n return open_promise\n\n\ndef _select_and_center(view: Optional[sublime.View], r: Range) -> Optional[sublime.View]:\n if view:\n return center_selection(view, r)\n return None\n\n\ndef _return_existing_view(flags: int, existing_view_group: int, active_group: int, specified_group: int) -> bool:\n if specified_group > -1:\n return existing_view_group == specified_group\n if bool(flags & (sublime.ADD_TO_SELECTION | sublime.REPLACE_MRU)):\n return False\n if existing_view_group == active_group:\n return True\n return not bool(flags & sublime.FORCE_GROUP)\n\n\ndef _find_open_file(window: sublime.Window, fname: str, group: int = -1) -> Optional[sublime.View]:\n \"\"\"A replacement for Window.find_open_file that prefers the active view instead of the leftmost one.\"\"\"\n _group = window.active_group() if group == -1 else group\n view = window.active_view_in_group(_group)\n if view and fname == view.file_name():\n return view\n return window.find_open_file(fname, group)\n\n\ndef open_file(\n window: sublime.Window, uri: DocumentUri, flags: int = 0, group: int = -1\n) -> Promise[Optional[sublime.View]]:\n \"\"\"\n Open a file asynchronously.\n It is only safe to call this function from the UI thread.\n The provided uri MUST be a file URI\n \"\"\"\n file = parse_uri(uri)[1]\n # window.open_file brings the file to focus if it's already opened, which we don't want (unless it's supposed\n # to open as a separate view).\n view = _find_open_file(window, file)\n if view and _return_existing_view(flags, window.get_view_index(view)[0], window.active_group(), group):\n return Promise.resolve(view)\n\n was_already_open = view is not None\n view = window.open_file(file, flags, group)\n if not view.is_loading():\n if was_already_open and (flags & sublime.SEMI_TRANSIENT):\n # workaround bug https://github.com/sublimehq/sublime_text/issues/2411 where transient view might not get\n # its view listeners initialized.\n sublime_plugin.check_view_event_listeners(view) # type: ignore\n # It's already loaded. Possibly already open in a tab.\n return Promise.resolve(view)\n\n # Is the view opening right now? Then return the associated unresolved promise\n for fn, value in opening_files.items():\n if fn == file or os.path.samefile(fn, file):\n # Return the unresolved promise. A future on_load event will resolve the promise.\n return value[0]\n\n # Prepare a new promise to be resolved by a future on_load event (see the event listener in main.py)\n def fullfill(resolve: ResolveFunc[Optional[sublime.View]]) -> None:\n global opening_files\n # Save the promise in the first element of the tuple -- except we cannot yet do that here\n opening_files[file] = (None, resolve) # type: ignore\n\n promise = Promise(fullfill)\n tup = opening_files[file]\n # Save the promise in the first element of the tuple so that the for-loop above can return it\n opening_files[file] = (promise, tup[1])\n return promise\n\n\ndef center_selection(v: sublime.View, r: Range) -> sublime.View:\n selection = range_to_region(r, v)\n v.run_command(\"lsp_selection_set\", {\"regions\": [(selection.a, selection.a)]})\n window = v.window()\n if window:\n window.focus_view(v)\n if int(sublime.version()) >= 4124:\n v.show_at_center(selection.begin(), animate=False)\n else:\n # TODO: remove later when a stable build lands\n v.show_at_center(selection.begin()) # type: ignore\n return v\n\n\ndef open_in_browser(uri: str) -> None:\n # NOTE: Remove this check when on py3.8.\n if not uri.lower().startswith((\"http://\", \"https://\")):\n uri = \"https://\" + uri\n if not webbrowser.open(uri):\n sublime.status_message(\"failed to open: \" + uri)\n\n\ndef open_externally(uri: str, take_focus: bool) -> bool:\n \"\"\"\n A blocking function that invokes the OS's \"open with default extension\"\n \"\"\"\n try:\n # TODO: handle take_focus\n if sublime.platform() == \"windows\":\n os.startfile(uri) # type: ignore\n elif sublime.platform() == \"osx\":\n subprocess.check_call((\"/usr/bin/open\", uri))\n else: # linux\n subprocess.check_call((\"xdg-open\", uri))\n return True\n except Exception as ex:\n exception_log(\"Failed to open {}\".format(uri), ex)\n return False\n", "path": "plugin/core/open.py"}], "after_files": [{"content": "from .constants import ST_VERSION\nfrom .logging import exception_log\nfrom .promise import Promise\nfrom .promise import ResolveFunc\nfrom .protocol import DocumentUri\nfrom .protocol import Range\nfrom .protocol import UINT_MAX\nfrom .typing import Dict, Tuple, Optional\nfrom .typing import cast\nfrom .url import parse_uri\nfrom .views import range_to_region\nfrom urllib.parse import unquote, urlparse\nimport os\nimport re\nimport sublime\nimport sublime_plugin\nimport subprocess\nimport webbrowser\n\n\nopening_files = {} # type: Dict[str, Tuple[Promise[Optional[sublime.View]], ResolveFunc[Optional[sublime.View]]]]\nFRAGMENT_PATTERN = re.compile(r'^L?(\\d+)(?:,(\\d+))?(?:-L?(\\d+)(?:,(\\d+))?)?')\n\n\ndef lsp_range_from_uri_fragment(fragment: str) -> Optional[Range]:\n match = FRAGMENT_PATTERN.match(fragment)\n if match:\n selection = {'start': {'line': 0, 'character': 0}, 'end': {'line': 0, 'character': 0}} # type: Range\n # Line and column numbers in the fragment are assumed to be 1-based and need to be converted to 0-based\n # numbers for the LSP Position structure.\n start_line, start_column, end_line, end_column = [max(0, int(g) - 1) if g else None for g in match.groups()]\n if start_line:\n selection['start']['line'] = start_line\n selection['end']['line'] = start_line\n if start_column:\n selection['start']['character'] = start_column\n selection['end']['character'] = start_column\n if end_line:\n selection['end']['line'] = end_line\n selection['end']['character'] = UINT_MAX\n if end_column is not None:\n selection['end']['character'] = end_column\n return selection\n return None\n\n\ndef open_file_uri(\n window: sublime.Window, uri: DocumentUri, flags: int = 0, group: int = -1\n) -> Promise[Optional[sublime.View]]:\n\n decoded_uri = unquote(uri) # decode percent-encoded characters\n parsed = urlparse(decoded_uri)\n open_promise = open_file(window, decoded_uri, flags, group)\n if parsed.fragment:\n selection = lsp_range_from_uri_fragment(parsed.fragment)\n if selection:\n return open_promise.then(lambda view: _select_and_center(view, cast(Range, selection)))\n return open_promise\n\n\ndef _select_and_center(view: Optional[sublime.View], r: Range) -> Optional[sublime.View]:\n if view:\n return center_selection(view, r)\n return None\n\n\ndef _return_existing_view(flags: int, existing_view_group: int, active_group: int, specified_group: int) -> bool:\n if specified_group > -1:\n return existing_view_group == specified_group\n if bool(flags & (sublime.ADD_TO_SELECTION | sublime.REPLACE_MRU)):\n return False\n if existing_view_group == active_group:\n return True\n return not bool(flags & sublime.FORCE_GROUP)\n\n\ndef _find_open_file(window: sublime.Window, fname: str, group: int = -1) -> Optional[sublime.View]:\n \"\"\"A replacement for Window.find_open_file that prefers the active view instead of the leftmost one.\"\"\"\n _group = window.active_group() if group == -1 else group\n view = window.active_view_in_group(_group)\n if view and fname == view.file_name():\n return view\n return window.find_open_file(fname, group) if ST_VERSION >= 4136 else window.find_open_file(fname)\n\n\ndef open_file(\n window: sublime.Window, uri: DocumentUri, flags: int = 0, group: int = -1\n) -> Promise[Optional[sublime.View]]:\n \"\"\"\n Open a file asynchronously.\n It is only safe to call this function from the UI thread.\n The provided uri MUST be a file URI\n \"\"\"\n file = parse_uri(uri)[1]\n # window.open_file brings the file to focus if it's already opened, which we don't want (unless it's supposed\n # to open as a separate view).\n view = _find_open_file(window, file)\n if view and _return_existing_view(flags, window.get_view_index(view)[0], window.active_group(), group):\n return Promise.resolve(view)\n\n was_already_open = view is not None\n view = window.open_file(file, flags, group)\n if not view.is_loading():\n if was_already_open and (flags & sublime.SEMI_TRANSIENT):\n # workaround bug https://github.com/sublimehq/sublime_text/issues/2411 where transient view might not get\n # its view listeners initialized.\n sublime_plugin.check_view_event_listeners(view) # type: ignore\n # It's already loaded. Possibly already open in a tab.\n return Promise.resolve(view)\n\n # Is the view opening right now? Then return the associated unresolved promise\n for fn, value in opening_files.items():\n if fn == file or os.path.samefile(fn, file):\n # Return the unresolved promise. A future on_load event will resolve the promise.\n return value[0]\n\n # Prepare a new promise to be resolved by a future on_load event (see the event listener in main.py)\n def fullfill(resolve: ResolveFunc[Optional[sublime.View]]) -> None:\n global opening_files\n # Save the promise in the first element of the tuple -- except we cannot yet do that here\n opening_files[file] = (None, resolve) # type: ignore\n\n promise = Promise(fullfill)\n tup = opening_files[file]\n # Save the promise in the first element of the tuple so that the for-loop above can return it\n opening_files[file] = (promise, tup[1])\n return promise\n\n\ndef center_selection(v: sublime.View, r: Range) -> sublime.View:\n selection = range_to_region(r, v)\n v.run_command(\"lsp_selection_set\", {\"regions\": [(selection.a, selection.a)]})\n window = v.window()\n if window:\n window.focus_view(v)\n if int(sublime.version()) >= 4124:\n v.show_at_center(selection.begin(), animate=False)\n else:\n # TODO: remove later when a stable build lands\n v.show_at_center(selection.begin()) # type: ignore\n return v\n\n\ndef open_in_browser(uri: str) -> None:\n # NOTE: Remove this check when on py3.8.\n if not uri.lower().startswith((\"http://\", \"https://\")):\n uri = \"https://\" + uri\n if not webbrowser.open(uri):\n sublime.status_message(\"failed to open: \" + uri)\n\n\ndef open_externally(uri: str, take_focus: bool) -> bool:\n \"\"\"\n A blocking function that invokes the OS's \"open with default extension\"\n \"\"\"\n try:\n # TODO: handle take_focus\n if sublime.platform() == \"windows\":\n os.startfile(uri) # type: ignore\n elif sublime.platform() == \"osx\":\n subprocess.check_call((\"/usr/bin/open\", uri))\n else: # linux\n subprocess.check_call((\"xdg-open\", uri))\n return True\n except Exception as ex:\n exception_log(\"Failed to open {}\".format(uri), ex)\n return False\n", "path": "plugin/core/open.py"}]}
2,528
146
gh_patches_debug_3017
rasdani/github-patches
git_diff
secdev__scapy-2255
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- tcpdump check error in centos #### Brief description > I have installed tcpdump in PATH, but it reports: scapy.error.Scapy_Exception: tcpdump is not available. Cannot use filter ! I found the code which check tcudmp in /opt/rh/rh-python36/root/usr/lib/python3.6/site-packages/scapy/arch/common.py: ``` def _check_tcpdump(): """ Return True if the tcpdump command can be started """ with open(os.devnull, 'wb') as devnull: try: proc = subprocess.Popen([conf.prog.tcpdump, "--version"], stdout=devnull, stderr=subprocess.STDOUT) except OSError: return False return proc.wait() == 0 ``` the error is that tcpdump --version return 1 instead of 0 eg: ``` [root@localhost proxy]# tcpdump --version tcpdump version 4.1-PRE-CVS_2017_03_21 libpcap version 1.4.0 Usage: tcpdump [-aAdDefhIJKlLnNOpqRStuUvxX] [ -B size ] [ -c count ] [ -C file_size ] [ -E algo:secret ] [ -F file ] [ -G seconds ] [ -i interface ] [ -j tstamptype ] [ -M secret ] [ -Q|-P in|out|inout ] [ -r file ] [ -s snaplen ] [ -T type ] [ -w file ] [ -W filecount ] [ -y datalinktype ] [ -z command ] [ -Z user ] [ expression ] [root@localhost proxy]# echo $? 1 ``` #### Environment ``` [root@localhost proxy]# python3.6 --version Python 3.6.3 [root@localhost proxy]# pip3.6 freeze certifi==2018.11.29 chardet==3.0.4 idna==2.8 protobuf==3.6.1 psutil==5.4.8 PyMySQL==0.9.3 redis==3.0.1 requests==2.21.0 s8-protocol==1.0 scapy==2.4.2 six==1.11.0 snakeMQ==1.6 urllib3==1.24.1 virtualenv==15.1.0 xlrd==1.2.0 You are using pip version 9.0.1, however version 19.0.2 is available. You should consider upgrading via the 'pip install --upgrade pip' command. [root@localhost proxy]# uname -a Linux localhost.localdomain 2.6.32-431.el6.x86_64 #1 SMP Fri Nov 22 03:15:09 UTC 2013 x86_64 x86_64 x86_64 GNU/Linux [root@localhost proxy]# cat /etc/issue CentOS release 6.5 (Final) Kernel \r on an \m ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scapy/arch/common.py` Content: ``` 1 # This file is part of Scapy 2 # See http://www.secdev.org/projects/scapy for more information 3 # Copyright (C) Philippe Biondi <[email protected]> 4 # This program is published under a GPLv2 license 5 6 """ 7 Functions common to different architectures 8 """ 9 10 import ctypes 11 import os 12 import socket 13 import struct 14 import subprocess 15 import time 16 from ctypes import POINTER, Structure 17 from ctypes import c_uint, c_uint32, c_ushort, c_ubyte 18 from scapy.consts import WINDOWS 19 from scapy.config import conf 20 from scapy.data import MTU 21 from scapy.error import Scapy_Exception 22 import scapy.modules.six as six 23 24 if not WINDOWS: 25 from fcntl import ioctl 26 27 # BOOT 28 29 30 def _check_tcpdump(): 31 """ 32 Return True if the tcpdump command can be started 33 """ 34 try: 35 proc = subprocess.Popen( 36 [conf.prog.tcpdump, "--version"], 37 stdout=subprocess.PIPE, 38 stderr=subprocess.STDOUT 39 ) 40 output = proc.communicate()[0] 41 except OSError: 42 return False 43 44 # On some systems, --version does not exist on tcpdump 45 return proc.returncode == 0 or output.startswith(b'Usage: tcpdump ') 46 47 48 # This won't be used on Windows 49 TCPDUMP = WINDOWS or _check_tcpdump() 50 51 # UTILS 52 53 54 def get_if(iff, cmd): 55 """Ease SIOCGIF* ioctl calls""" 56 57 sck = socket.socket() 58 ifreq = ioctl(sck, cmd, struct.pack("16s16x", iff.encode("utf8"))) 59 sck.close() 60 return ifreq 61 62 63 def get_if_raw_hwaddr(iff): 64 """Get the raw MAC address of a local interface. 65 66 This function uses SIOCGIFHWADDR calls, therefore only works 67 on some distros. 68 69 :param iff: the network interface name as a string 70 :returns: the corresponding raw MAC address 71 """ 72 from scapy.arch import SIOCGIFHWADDR 73 return struct.unpack("16xh6s8x", get_if(iff, SIOCGIFHWADDR)) 74 75 # SOCKET UTILS 76 77 78 def _select_nonblock(sockets, remain=None): 79 """This function is called during sendrecv() routine to select 80 the available sockets. 81 """ 82 # pcap sockets aren't selectable, so we return all of them 83 # and ask the selecting functions to use nonblock_recv instead of recv 84 def _sleep_nonblock_recv(self): 85 res = self.nonblock_recv() 86 if res is None: 87 time.sleep(conf.recv_poll_rate) 88 return res 89 # we enforce remain=None: don't wait. 90 return sockets, _sleep_nonblock_recv 91 92 # BPF HANDLERS 93 94 95 class bpf_insn(Structure): 96 """"The BPF instruction data structure""" 97 _fields_ = [("code", c_ushort), 98 ("jt", c_ubyte), 99 ("jf", c_ubyte), 100 ("k", c_uint32)] 101 102 103 class bpf_program(Structure): 104 """"Structure for BIOCSETF""" 105 _fields_ = [("bf_len", c_uint), 106 ("bf_insns", POINTER(bpf_insn))] 107 108 109 def _legacy_bpf_pointer(tcpdump_lines): 110 """Get old-format BPF Pointer. Deprecated""" 111 X86_64 = os.uname()[4] in ['x86_64', 'aarch64'] 112 size = int(tcpdump_lines[0]) 113 bpf = b"" 114 for l in tcpdump_lines[1:]: 115 if six.PY2: 116 int_type = long # noqa: F821 117 else: 118 int_type = int 119 bpf += struct.pack("HBBI", *map(int_type, l.split())) 120 121 # Thanks to http://www.netprojects.de/scapy-with-pypy-solved/ for the pypy trick # noqa: E501 122 if conf.use_pypy: 123 str_buffer = ctypes.create_string_buffer(bpf) 124 return struct.pack('HL', size, ctypes.addressof(str_buffer)) 125 else: 126 # XXX. Argl! We need to give the kernel a pointer on the BPF, 127 # Python object header seems to be 20 bytes. 36 bytes for x86 64bits arch. # noqa: E501 128 if X86_64: 129 return struct.pack("HL", size, id(bpf) + 36) 130 else: 131 return struct.pack("HI", size, id(bpf) + 20) 132 133 134 def get_bpf_pointer(tcpdump_lines): 135 """Create a BPF Pointer for TCPDump filter""" 136 if conf.use_pypy: 137 return _legacy_bpf_pointer(tcpdump_lines) 138 139 # Allocate BPF instructions 140 size = int(tcpdump_lines[0]) 141 bpf_insn_a = bpf_insn * size 142 bip = bpf_insn_a() 143 144 # Fill the BPF instruction structures with the byte code 145 tcpdump_lines = tcpdump_lines[1:] 146 i = 0 147 for line in tcpdump_lines: 148 values = [int(v) for v in line.split()] 149 bip[i].code = c_ushort(values[0]) 150 bip[i].jt = c_ubyte(values[1]) 151 bip[i].jf = c_ubyte(values[2]) 152 bip[i].k = c_uint(values[3]) 153 i += 1 154 155 # Create the BPF program 156 return bpf_program(size, bip) 157 158 159 def compile_filter(bpf_filter, iface=None): 160 """Asks Tcpdump to parse the filter, then build the matching 161 BPF bytecode using get_bpf_pointer. 162 """ 163 if not TCPDUMP: 164 raise Scapy_Exception("tcpdump is not available. Cannot use filter !") 165 try: 166 process = subprocess.Popen([ 167 conf.prog.tcpdump, 168 "-p", 169 "-i", (conf.iface if iface is None else iface), 170 "-ddd", 171 "-s", str(MTU), 172 bpf_filter], 173 stdout=subprocess.PIPE, 174 stderr=subprocess.PIPE 175 ) 176 except OSError as ex: 177 raise Scapy_Exception("Failed to attach filter: %s" % ex) 178 lines, err = process.communicate() 179 ret = process.returncode 180 if ret: 181 raise Scapy_Exception( 182 "Failed to attach filter: tcpdump returned: %s" % err 183 ) 184 lines = lines.strip().split(b"\n") 185 return get_bpf_pointer(lines) 186 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scapy/arch/common.py b/scapy/arch/common.py --- a/scapy/arch/common.py +++ b/scapy/arch/common.py @@ -42,7 +42,9 @@ return False # On some systems, --version does not exist on tcpdump - return proc.returncode == 0 or output.startswith(b'Usage: tcpdump ') + return proc.returncode == 0 \ + or output.startswith(b'Usage: tcpdump ') \ + or output.startswith(b'tcpdump: unrecognized option') # This won't be used on Windows
{"golden_diff": "diff --git a/scapy/arch/common.py b/scapy/arch/common.py\n--- a/scapy/arch/common.py\n+++ b/scapy/arch/common.py\n@@ -42,7 +42,9 @@\n return False\n \n # On some systems, --version does not exist on tcpdump\n- return proc.returncode == 0 or output.startswith(b'Usage: tcpdump ')\n+ return proc.returncode == 0 \\\n+ or output.startswith(b'Usage: tcpdump ') \\\n+ or output.startswith(b'tcpdump: unrecognized option')\n \n \n # This won't be used on Windows\n", "issue": "tcpdump check error in centos\n#### Brief description\r\n\r\n> I have installed tcpdump in PATH, but it reports:\r\nscapy.error.Scapy_Exception: tcpdump is not available. Cannot use filter !\r\nI found the code which check tcudmp in /opt/rh/rh-python36/root/usr/lib/python3.6/site-packages/scapy/arch/common.py:\r\n\r\n```\r\ndef _check_tcpdump():\r\n \"\"\"\r\n Return True if the tcpdump command can be started\r\n \"\"\"\r\n with open(os.devnull, 'wb') as devnull:\r\n try:\r\n proc = subprocess.Popen([conf.prog.tcpdump, \"--version\"],\r\n stdout=devnull, stderr=subprocess.STDOUT)\r\n except OSError:\r\n return False\r\n return proc.wait() == 0\r\n```\r\n\r\n\r\nthe error is that tcpdump --version return 1 instead of 0\r\neg:\r\n```\r\n[root@localhost proxy]# tcpdump --version\r\ntcpdump version 4.1-PRE-CVS_2017_03_21\r\nlibpcap version 1.4.0\r\nUsage: tcpdump [-aAdDefhIJKlLnNOpqRStuUvxX] [ -B size ] [ -c count ]\r\n\t\t[ -C file_size ] [ -E algo:secret ] [ -F file ] [ -G seconds ]\r\n\t\t[ -i interface ] [ -j tstamptype ] [ -M secret ]\r\n\t\t[ -Q|-P in|out|inout ]\r\n\t\t[ -r file ] [ -s snaplen ] [ -T type ] [ -w file ]\r\n\t\t[ -W filecount ] [ -y datalinktype ] [ -z command ]\r\n\t\t[ -Z user ] [ expression ]\r\n[root@localhost proxy]# echo $?\r\n1\r\n```\r\n\r\n#### Environment\r\n\r\n```\r\n[root@localhost proxy]# python3.6 --version\r\nPython 3.6.3\r\n[root@localhost proxy]# pip3.6 freeze\r\ncertifi==2018.11.29\r\nchardet==3.0.4\r\nidna==2.8\r\nprotobuf==3.6.1\r\npsutil==5.4.8\r\nPyMySQL==0.9.3\r\nredis==3.0.1\r\nrequests==2.21.0\r\ns8-protocol==1.0\r\nscapy==2.4.2\r\nsix==1.11.0\r\nsnakeMQ==1.6\r\nurllib3==1.24.1\r\nvirtualenv==15.1.0\r\nxlrd==1.2.0\r\nYou are using pip version 9.0.1, however version 19.0.2 is available.\r\nYou should consider upgrading via the 'pip install --upgrade pip' command.\r\n[root@localhost proxy]# uname -a\r\nLinux localhost.localdomain 2.6.32-431.el6.x86_64 #1 SMP Fri Nov 22 03:15:09 UTC 2013 x86_64 x86_64 x86_64 GNU/Linux\r\n[root@localhost proxy]# cat /etc/issue\r\nCentOS release 6.5 (Final)\r\nKernel \\r on an \\m\r\n```\r\n\n", "before_files": [{"content": "# This file is part of Scapy\n# See http://www.secdev.org/projects/scapy for more information\n# Copyright (C) Philippe Biondi <[email protected]>\n# This program is published under a GPLv2 license\n\n\"\"\"\nFunctions common to different architectures\n\"\"\"\n\nimport ctypes\nimport os\nimport socket\nimport struct\nimport subprocess\nimport time\nfrom ctypes import POINTER, Structure\nfrom ctypes import c_uint, c_uint32, c_ushort, c_ubyte\nfrom scapy.consts import WINDOWS\nfrom scapy.config import conf\nfrom scapy.data import MTU\nfrom scapy.error import Scapy_Exception\nimport scapy.modules.six as six\n\nif not WINDOWS:\n from fcntl import ioctl\n\n# BOOT\n\n\ndef _check_tcpdump():\n \"\"\"\n Return True if the tcpdump command can be started\n \"\"\"\n try:\n proc = subprocess.Popen(\n [conf.prog.tcpdump, \"--version\"],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n )\n output = proc.communicate()[0]\n except OSError:\n return False\n\n # On some systems, --version does not exist on tcpdump\n return proc.returncode == 0 or output.startswith(b'Usage: tcpdump ')\n\n\n# This won't be used on Windows\nTCPDUMP = WINDOWS or _check_tcpdump()\n\n# UTILS\n\n\ndef get_if(iff, cmd):\n \"\"\"Ease SIOCGIF* ioctl calls\"\"\"\n\n sck = socket.socket()\n ifreq = ioctl(sck, cmd, struct.pack(\"16s16x\", iff.encode(\"utf8\")))\n sck.close()\n return ifreq\n\n\ndef get_if_raw_hwaddr(iff):\n \"\"\"Get the raw MAC address of a local interface.\n\n This function uses SIOCGIFHWADDR calls, therefore only works\n on some distros.\n\n :param iff: the network interface name as a string\n :returns: the corresponding raw MAC address\n \"\"\"\n from scapy.arch import SIOCGIFHWADDR\n return struct.unpack(\"16xh6s8x\", get_if(iff, SIOCGIFHWADDR))\n\n# SOCKET UTILS\n\n\ndef _select_nonblock(sockets, remain=None):\n \"\"\"This function is called during sendrecv() routine to select\n the available sockets.\n \"\"\"\n # pcap sockets aren't selectable, so we return all of them\n # and ask the selecting functions to use nonblock_recv instead of recv\n def _sleep_nonblock_recv(self):\n res = self.nonblock_recv()\n if res is None:\n time.sleep(conf.recv_poll_rate)\n return res\n # we enforce remain=None: don't wait.\n return sockets, _sleep_nonblock_recv\n\n# BPF HANDLERS\n\n\nclass bpf_insn(Structure):\n \"\"\"\"The BPF instruction data structure\"\"\"\n _fields_ = [(\"code\", c_ushort),\n (\"jt\", c_ubyte),\n (\"jf\", c_ubyte),\n (\"k\", c_uint32)]\n\n\nclass bpf_program(Structure):\n \"\"\"\"Structure for BIOCSETF\"\"\"\n _fields_ = [(\"bf_len\", c_uint),\n (\"bf_insns\", POINTER(bpf_insn))]\n\n\ndef _legacy_bpf_pointer(tcpdump_lines):\n \"\"\"Get old-format BPF Pointer. Deprecated\"\"\"\n X86_64 = os.uname()[4] in ['x86_64', 'aarch64']\n size = int(tcpdump_lines[0])\n bpf = b\"\"\n for l in tcpdump_lines[1:]:\n if six.PY2:\n int_type = long # noqa: F821\n else:\n int_type = int\n bpf += struct.pack(\"HBBI\", *map(int_type, l.split()))\n\n # Thanks to http://www.netprojects.de/scapy-with-pypy-solved/ for the pypy trick # noqa: E501\n if conf.use_pypy:\n str_buffer = ctypes.create_string_buffer(bpf)\n return struct.pack('HL', size, ctypes.addressof(str_buffer))\n else:\n # XXX. Argl! We need to give the kernel a pointer on the BPF,\n # Python object header seems to be 20 bytes. 36 bytes for x86 64bits arch. # noqa: E501\n if X86_64:\n return struct.pack(\"HL\", size, id(bpf) + 36)\n else:\n return struct.pack(\"HI\", size, id(bpf) + 20)\n\n\ndef get_bpf_pointer(tcpdump_lines):\n \"\"\"Create a BPF Pointer for TCPDump filter\"\"\"\n if conf.use_pypy:\n return _legacy_bpf_pointer(tcpdump_lines)\n\n # Allocate BPF instructions\n size = int(tcpdump_lines[0])\n bpf_insn_a = bpf_insn * size\n bip = bpf_insn_a()\n\n # Fill the BPF instruction structures with the byte code\n tcpdump_lines = tcpdump_lines[1:]\n i = 0\n for line in tcpdump_lines:\n values = [int(v) for v in line.split()]\n bip[i].code = c_ushort(values[0])\n bip[i].jt = c_ubyte(values[1])\n bip[i].jf = c_ubyte(values[2])\n bip[i].k = c_uint(values[3])\n i += 1\n\n # Create the BPF program\n return bpf_program(size, bip)\n\n\ndef compile_filter(bpf_filter, iface=None):\n \"\"\"Asks Tcpdump to parse the filter, then build the matching\n BPF bytecode using get_bpf_pointer.\n \"\"\"\n if not TCPDUMP:\n raise Scapy_Exception(\"tcpdump is not available. Cannot use filter !\")\n try:\n process = subprocess.Popen([\n conf.prog.tcpdump,\n \"-p\",\n \"-i\", (conf.iface if iface is None else iface),\n \"-ddd\",\n \"-s\", str(MTU),\n bpf_filter],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE\n )\n except OSError as ex:\n raise Scapy_Exception(\"Failed to attach filter: %s\" % ex)\n lines, err = process.communicate()\n ret = process.returncode\n if ret:\n raise Scapy_Exception(\n \"Failed to attach filter: tcpdump returned: %s\" % err\n )\n lines = lines.strip().split(b\"\\n\")\n return get_bpf_pointer(lines)\n", "path": "scapy/arch/common.py"}], "after_files": [{"content": "# This file is part of Scapy\n# See http://www.secdev.org/projects/scapy for more information\n# Copyright (C) Philippe Biondi <[email protected]>\n# This program is published under a GPLv2 license\n\n\"\"\"\nFunctions common to different architectures\n\"\"\"\n\nimport ctypes\nimport os\nimport socket\nimport struct\nimport subprocess\nimport time\nfrom ctypes import POINTER, Structure\nfrom ctypes import c_uint, c_uint32, c_ushort, c_ubyte\nfrom scapy.consts import WINDOWS\nfrom scapy.config import conf\nfrom scapy.data import MTU\nfrom scapy.error import Scapy_Exception\nimport scapy.modules.six as six\n\nif not WINDOWS:\n from fcntl import ioctl\n\n# BOOT\n\n\ndef _check_tcpdump():\n \"\"\"\n Return True if the tcpdump command can be started\n \"\"\"\n try:\n proc = subprocess.Popen(\n [conf.prog.tcpdump, \"--version\"],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n )\n output = proc.communicate()[0]\n except OSError:\n return False\n\n # On some systems, --version does not exist on tcpdump\n return proc.returncode == 0 \\\n or output.startswith(b'Usage: tcpdump ') \\\n or output.startswith(b'tcpdump: unrecognized option')\n\n\n# This won't be used on Windows\nTCPDUMP = WINDOWS or _check_tcpdump()\n\n# UTILS\n\n\ndef get_if(iff, cmd):\n \"\"\"Ease SIOCGIF* ioctl calls\"\"\"\n\n sck = socket.socket()\n ifreq = ioctl(sck, cmd, struct.pack(\"16s16x\", iff.encode(\"utf8\")))\n sck.close()\n return ifreq\n\n\ndef get_if_raw_hwaddr(iff):\n \"\"\"Get the raw MAC address of a local interface.\n\n This function uses SIOCGIFHWADDR calls, therefore only works\n on some distros.\n\n :param iff: the network interface name as a string\n :returns: the corresponding raw MAC address\n \"\"\"\n from scapy.arch import SIOCGIFHWADDR\n return struct.unpack(\"16xh6s8x\", get_if(iff, SIOCGIFHWADDR))\n\n# SOCKET UTILS\n\n\ndef _select_nonblock(sockets, remain=None):\n \"\"\"This function is called during sendrecv() routine to select\n the available sockets.\n \"\"\"\n # pcap sockets aren't selectable, so we return all of them\n # and ask the selecting functions to use nonblock_recv instead of recv\n def _sleep_nonblock_recv(self):\n res = self.nonblock_recv()\n if res is None:\n time.sleep(conf.recv_poll_rate)\n return res\n # we enforce remain=None: don't wait.\n return sockets, _sleep_nonblock_recv\n\n# BPF HANDLERS\n\n\nclass bpf_insn(Structure):\n \"\"\"\"The BPF instruction data structure\"\"\"\n _fields_ = [(\"code\", c_ushort),\n (\"jt\", c_ubyte),\n (\"jf\", c_ubyte),\n (\"k\", c_uint32)]\n\n\nclass bpf_program(Structure):\n \"\"\"\"Structure for BIOCSETF\"\"\"\n _fields_ = [(\"bf_len\", c_uint),\n (\"bf_insns\", POINTER(bpf_insn))]\n\n\ndef _legacy_bpf_pointer(tcpdump_lines):\n \"\"\"Get old-format BPF Pointer. Deprecated\"\"\"\n X86_64 = os.uname()[4] in ['x86_64', 'aarch64']\n size = int(tcpdump_lines[0])\n bpf = b\"\"\n for l in tcpdump_lines[1:]:\n if six.PY2:\n int_type = long # noqa: F821\n else:\n int_type = int\n bpf += struct.pack(\"HBBI\", *map(int_type, l.split()))\n\n # Thanks to http://www.netprojects.de/scapy-with-pypy-solved/ for the pypy trick # noqa: E501\n if conf.use_pypy:\n str_buffer = ctypes.create_string_buffer(bpf)\n return struct.pack('HL', size, ctypes.addressof(str_buffer))\n else:\n # XXX. Argl! We need to give the kernel a pointer on the BPF,\n # Python object header seems to be 20 bytes. 36 bytes for x86 64bits arch. # noqa: E501\n if X86_64:\n return struct.pack(\"HL\", size, id(bpf) + 36)\n else:\n return struct.pack(\"HI\", size, id(bpf) + 20)\n\n\ndef get_bpf_pointer(tcpdump_lines):\n \"\"\"Create a BPF Pointer for TCPDump filter\"\"\"\n if conf.use_pypy:\n return _legacy_bpf_pointer(tcpdump_lines)\n\n # Allocate BPF instructions\n size = int(tcpdump_lines[0])\n bpf_insn_a = bpf_insn * size\n bip = bpf_insn_a()\n\n # Fill the BPF instruction structures with the byte code\n tcpdump_lines = tcpdump_lines[1:]\n i = 0\n for line in tcpdump_lines:\n values = [int(v) for v in line.split()]\n bip[i].code = c_ushort(values[0])\n bip[i].jt = c_ubyte(values[1])\n bip[i].jf = c_ubyte(values[2])\n bip[i].k = c_uint(values[3])\n i += 1\n\n # Create the BPF program\n return bpf_program(size, bip)\n\n\ndef compile_filter(bpf_filter, iface=None):\n \"\"\"Asks Tcpdump to parse the filter, then build the matching\n BPF bytecode using get_bpf_pointer.\n \"\"\"\n if not TCPDUMP:\n raise Scapy_Exception(\"tcpdump is not available. Cannot use filter !\")\n try:\n process = subprocess.Popen([\n conf.prog.tcpdump,\n \"-p\",\n \"-i\", (conf.iface if iface is None else iface),\n \"-ddd\",\n \"-s\", str(MTU),\n bpf_filter],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE\n )\n except OSError as ex:\n raise Scapy_Exception(\"Failed to attach filter: %s\" % ex)\n lines, err = process.communicate()\n ret = process.returncode\n if ret:\n raise Scapy_Exception(\n \"Failed to attach filter: tcpdump returned: %s\" % err\n )\n lines = lines.strip().split(b\"\\n\")\n return get_bpf_pointer(lines)\n", "path": "scapy/arch/common.py"}]}
2,847
128
gh_patches_debug_17107
rasdani/github-patches
git_diff
nvaccess__nvda-10947
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Winamp support: NVDA doesn't detect the status of shuffle and repeat modes <!-- Please thoroughly read NVDA's wiki article on how to fill in this template, including how to provide the required files. Issues may be closed if the required information is not present. https://github.com/nvaccess/nvda/wiki/Github-issue-template-explanation-and-examples --> ### Steps to reproduce: 1. Start winamp 2. Press r or s toggle shuffle or repeat 3. NVDA will say "off" every time you toggle those modes ### Actual behavior: NVDA will say "off" every time you toggle those modes ## ### Expected behavior: NVDA needs to state if setting is on or off. ### System configuration #### NVDA installed/portable/running from source: Installed #### NVDA version: 2019.3.1 #### Windows version: Windows10 latest #### Name and version of other software in use when reproducing the issue: Winamp 5.666 Build 3516 #### Other information about your system: ### Other questions #### Does the issue still occur after restarting your computer? Yes #### Have you tried any other versions of NVDA? If so, please report their behaviors. No #### If addons are disabled, is your problem still occuring? Yes #### Did you try to run the COM registry fixing tool in NVDA menu / tools? No --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `source/appModules/winamp.py` Content: ``` 1 #appModules/winamp.py 2 #A part of NonVisual Desktop Access (NVDA) 3 #Copyright (C) 2006-2012 NVDA Contributors 4 #This file is covered by the GNU General Public License. 5 #See the file COPYING for more details. 6 7 from ctypes import * 8 from ctypes.wintypes import * 9 import winKernel 10 import winUser 11 from scriptHandler import isScriptWaiting 12 from NVDAObjects.IAccessible import IAccessible 13 import appModuleHandler 14 import speech 15 import locale 16 import controlTypes 17 import api 18 import watchdog 19 import braille 20 import ui 21 22 # message used to sent many messages to winamp's main window. 23 # most all of the IPC_* messages involve sending the message in the form of: 24 # result = SendMessage(hwnd_winamp,WM_WA_IPC,(parameter),IPC_*); 25 26 WM_WA_IPC=winUser.WM_USER 27 28 # winamp window 29 IPC_GET_SHUFFLE=250 30 IPC_GET_REPEAT=251 31 32 # playlist editor 33 IPC_PLAYLIST_GET_NEXT_SELECTED=3029 34 IPC_PE_GETCURINDEX=100 35 IPC_PE_GETINDEXTOTAL=101 36 # in_process ONLY 37 IPC_PE_GETINDEXTITLE=200 # lParam = pointer to fileinfo2 structure 38 39 class fileinfo2(Structure): 40 _fields_=[ 41 ('fileindex',c_int), 42 ('filetitle',c_char*256), 43 ('filelength',c_char*16), 44 ] 45 46 hwndWinamp=0 47 48 def getShuffle(): 49 global hwndWinamp 50 return watchdog.cancellableSendMessage(hwndWinamp,WM_WA_IPC,0,IPC_GET_SHUFFLE) 51 52 def getRepeat(): 53 global hwndWinamp 54 return watchdog.cancellableSendMessage(hwndWinamp,WM_WA_IPC,0,IPC_GET_REPEAT) 55 56 class AppModule(appModuleHandler.AppModule): 57 58 def event_NVDAObject_init(self,obj): 59 global hwndWinamp 60 hwndWinamp=windll.user32.FindWindowA("Winamp v1.x",None) 61 62 def chooseNVDAObjectOverlayClasses(self, obj, clsList): 63 windowClass = obj.windowClassName 64 if windowClass == "Winamp PE": 65 clsList.insert(0, winampPlaylistEditor) 66 elif windowClass == "Winamp v1.x": 67 clsList.insert(0, winampMainWindow) 68 69 class winampMainWindow(IAccessible): 70 71 def event_nameChange(self): 72 pass 73 74 def script_shuffleToggle(self,gesture): 75 gesture.send() 76 if not isScriptWaiting(): 77 api.processPendingEvents() 78 if getShuffle(): 79 # Translators: the user has pressed the shuffle tracks toggle in winamp, shuffle is now on. 80 onOff=pgettext("shuffle", "on") 81 else: 82 # Translators: the user has pressed the shuffle tracks toggle in winamp, shuffle is now off. 83 onOff=pgettext("shuffle", "off") 84 ui.message(onOff) 85 86 def script_repeatToggle(self,gesture): 87 gesture.send() 88 if not isScriptWaiting(): 89 api.processPendingEvents() 90 if getRepeat(): 91 # Translators: the user has pressed the repeat track toggle in winamp, repeat is now on. 92 onOff=pgettext("repeat", "on") 93 else: 94 # Translators: the user has pressed the repeat track toggle in winamp, repeat is now off. 95 onOff=pgettext("repeat", "off") 96 ui.message(onOff) 97 98 __gestures = { 99 "kb:s": "shuffleToggle", 100 "kb:r": "repeatToggle", 101 } 102 103 class winampPlaylistEditor(winampMainWindow): 104 105 def _get_name(self): 106 curIndex=watchdog.cancellableSendMessage(hwndWinamp,WM_WA_IPC,-1,IPC_PLAYLIST_GET_NEXT_SELECTED) 107 if curIndex <0: 108 return None 109 info=fileinfo2() 110 info.fileindex=curIndex 111 internalInfo=winKernel.virtualAllocEx(self.processHandle,None,sizeof(info),winKernel.MEM_COMMIT,winKernel.PAGE_READWRITE) 112 try: 113 winKernel.writeProcessMemory(self.processHandle,internalInfo,byref(info),sizeof(info),None) 114 watchdog.cancellableSendMessage(self.windowHandle,WM_WA_IPC,IPC_PE_GETINDEXTITLE,internalInfo) 115 winKernel.readProcessMemory(self.processHandle,internalInfo,byref(info),sizeof(info),None) 116 finally: 117 winKernel.virtualFreeEx(self.processHandle,internalInfo,0,winKernel.MEM_RELEASE) 118 # file title is fetched in the current locale encoding. 119 # We need to decode it to unicode first. 120 encoding=locale.getlocale()[1] 121 fileTitle=info.filetitle.decode(encoding,errors="replace") 122 return "%d.\t%s\t%s"%(curIndex+1,fileTitle,info.filelength) 123 124 def _get_role(self): 125 return controlTypes.ROLE_LISTITEM 126 127 def script_changeItem(self,gesture): 128 gesture.send() 129 if not isScriptWaiting(): 130 api.processPendingEvents() 131 speech.speakObject(self,reason=controlTypes.REASON_FOCUS) 132 braille.handler.handleGainFocus(self) 133 134 def event_nameChange(self): 135 return super(winampMainWindow,self).event_nameChange() 136 137 __changeItemGestures = ( 138 "kb:upArrow", 139 "kb:downArrow", 140 "kb:pageUp", 141 "kb:pageDown", 142 ) 143 144 def initOverlayClass(self): 145 for gesture in self.__changeItemGestures: 146 self.bindGesture(gesture, "changeItem") 147 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/source/appModules/winamp.py b/source/appModules/winamp.py --- a/source/appModules/winamp.py +++ b/source/appModules/winamp.py @@ -1,8 +1,7 @@ -#appModules/winamp.py -#A part of NonVisual Desktop Access (NVDA) -#Copyright (C) 2006-2012 NVDA Contributors -#This file is covered by the GNU General Public License. -#See the file COPYING for more details. +# A part of NonVisual Desktop Access (NVDA) +# Copyright (C) 2006-2020 NV Access Limited +# This file is covered by the GNU General Public License. +# See the file COPYING for more details. from ctypes import * from ctypes.wintypes import * @@ -57,7 +56,7 @@ def event_NVDAObject_init(self,obj): global hwndWinamp - hwndWinamp=windll.user32.FindWindowA("Winamp v1.x",None) + hwndWinamp = winUser.FindWindow("Winamp v1.x", None) def chooseNVDAObjectOverlayClasses(self, obj, clsList): windowClass = obj.windowClassName
{"golden_diff": "diff --git a/source/appModules/winamp.py b/source/appModules/winamp.py\n--- a/source/appModules/winamp.py\n+++ b/source/appModules/winamp.py\n@@ -1,8 +1,7 @@\n-#appModules/winamp.py\r\n-#A part of NonVisual Desktop Access (NVDA)\r\n-#Copyright (C) 2006-2012 NVDA Contributors\r\n-#This file is covered by the GNU General Public License.\r\n-#See the file COPYING for more details.\r\n+# A part of NonVisual Desktop Access (NVDA)\r\n+# Copyright (C) 2006-2020 NV Access Limited\r\n+# This file is covered by the GNU General Public License.\r\n+# See the file COPYING for more details.\r\n \r\n from ctypes import *\r\n from ctypes.wintypes import *\r\n@@ -57,7 +56,7 @@\n \r\n \tdef event_NVDAObject_init(self,obj):\r\n \t\tglobal hwndWinamp\r\n-\t\thwndWinamp=windll.user32.FindWindowA(\"Winamp v1.x\",None)\r\n+\t\thwndWinamp = winUser.FindWindow(\"Winamp v1.x\", None)\r\n \r\n \tdef chooseNVDAObjectOverlayClasses(self, obj, clsList):\r\n \t\twindowClass = obj.windowClassName\n", "issue": "Winamp support: NVDA doesn't detect the status of shuffle and repeat modes\n<!--\r\nPlease thoroughly read NVDA's wiki article on how to fill in this template, including how to provide the required files.\r\nIssues may be closed if the required information is not present.\r\nhttps://github.com/nvaccess/nvda/wiki/Github-issue-template-explanation-and-examples\r\n-->\r\n\r\n### Steps to reproduce:\r\n1. Start winamp\r\n2. Press r or s toggle shuffle or repeat\r\n3. NVDA will say \"off\" every time you toggle those modes\r\n### Actual behavior:\r\nNVDA will say \"off\" every time you toggle those modes\r\n##\r\n### Expected behavior:\r\nNVDA needs to state if setting is on or off.\r\n### System configuration\r\n#### NVDA installed/portable/running from source:\r\nInstalled\r\n#### NVDA version:\r\n2019.3.1\r\n#### Windows version:\r\nWindows10 latest\r\n\r\n#### Name and version of other software in use when reproducing the issue:\r\nWinamp 5.666 Build 3516\r\n#### Other information about your system:\r\n\r\n### Other questions\r\n#### Does the issue still occur after restarting your computer?\r\nYes\r\n#### Have you tried any other versions of NVDA? If so, please report their behaviors.\r\nNo\r\n#### If addons are disabled, is your problem still occuring?\r\nYes\r\n#### Did you try to run the COM registry fixing tool in NVDA menu / tools?\r\nNo\n", "before_files": [{"content": "#appModules/winamp.py\r\n#A part of NonVisual Desktop Access (NVDA)\r\n#Copyright (C) 2006-2012 NVDA Contributors\r\n#This file is covered by the GNU General Public License.\r\n#See the file COPYING for more details.\r\n\r\nfrom ctypes import *\r\nfrom ctypes.wintypes import *\r\nimport winKernel\r\nimport winUser\r\nfrom scriptHandler import isScriptWaiting\r\nfrom NVDAObjects.IAccessible import IAccessible \r\nimport appModuleHandler\r\nimport speech\r\nimport locale\r\nimport controlTypes\r\nimport api\r\nimport watchdog\r\nimport braille\r\nimport ui\r\n\r\n# message used to sent many messages to winamp's main window. \r\n# most all of the IPC_* messages involve sending the message in the form of:\r\n# result = SendMessage(hwnd_winamp,WM_WA_IPC,(parameter),IPC_*);\r\n\r\nWM_WA_IPC=winUser.WM_USER\r\n\r\n# winamp window\r\nIPC_GET_SHUFFLE=250\r\nIPC_GET_REPEAT=251\r\n\r\n# playlist editor\r\nIPC_PLAYLIST_GET_NEXT_SELECTED=3029\r\nIPC_PE_GETCURINDEX=100\r\nIPC_PE_GETINDEXTOTAL=101\r\n# in_process ONLY\r\nIPC_PE_GETINDEXTITLE=200 # lParam = pointer to fileinfo2 structure\r\n\r\nclass fileinfo2(Structure):\r\n\t_fields_=[\r\n\t\t('fileindex',c_int),\r\n\t\t('filetitle',c_char*256),\r\n\t\t('filelength',c_char*16),\r\n\t]\r\n\r\nhwndWinamp=0\r\n\r\ndef getShuffle():\r\n\tglobal hwndWinamp\r\n\treturn watchdog.cancellableSendMessage(hwndWinamp,WM_WA_IPC,0,IPC_GET_SHUFFLE)\r\n\r\ndef getRepeat():\r\n\tglobal hwndWinamp\r\n\treturn watchdog.cancellableSendMessage(hwndWinamp,WM_WA_IPC,0,IPC_GET_REPEAT)\r\n\r\nclass AppModule(appModuleHandler.AppModule):\r\n\r\n\tdef event_NVDAObject_init(self,obj):\r\n\t\tglobal hwndWinamp\r\n\t\thwndWinamp=windll.user32.FindWindowA(\"Winamp v1.x\",None)\r\n\r\n\tdef chooseNVDAObjectOverlayClasses(self, obj, clsList):\r\n\t\twindowClass = obj.windowClassName\r\n\t\tif windowClass == \"Winamp PE\":\r\n\t\t\tclsList.insert(0, winampPlaylistEditor)\r\n\t\telif windowClass == \"Winamp v1.x\":\r\n\t\t\tclsList.insert(0, winampMainWindow)\r\n\r\nclass winampMainWindow(IAccessible):\r\n\r\n\tdef event_nameChange(self):\r\n\t\tpass\r\n\r\n\tdef script_shuffleToggle(self,gesture):\r\n\t\tgesture.send()\r\n\t\tif not isScriptWaiting():\r\n\t\t\tapi.processPendingEvents()\r\n\t\t\tif getShuffle():\r\n\t\t\t\t# Translators: the user has pressed the shuffle tracks toggle in winamp, shuffle is now on.\r\n\t\t\t\tonOff=pgettext(\"shuffle\", \"on\")\r\n\t\t\telse:\r\n\t\t\t\t# Translators: the user has pressed the shuffle tracks toggle in winamp, shuffle is now off.\r\n\t\t\t\tonOff=pgettext(\"shuffle\", \"off\")\r\n\t\t\tui.message(onOff)\r\n\r\n\tdef script_repeatToggle(self,gesture):\r\n\t\tgesture.send()\r\n\t\tif not isScriptWaiting():\r\n\t\t\tapi.processPendingEvents()\r\n\t\t\tif getRepeat():\r\n\t\t\t\t# Translators: the user has pressed the repeat track toggle in winamp, repeat is now on.\r\n\t\t\t\tonOff=pgettext(\"repeat\", \"on\")\r\n\t\t\telse:\r\n\t\t\t\t# Translators: the user has pressed the repeat track toggle in winamp, repeat is now off.\r\n\t\t\t\tonOff=pgettext(\"repeat\", \"off\")\r\n\t\t\tui.message(onOff)\r\n\r\n\t__gestures = {\r\n\t\t\"kb:s\": \"shuffleToggle\",\r\n\t\t\"kb:r\": \"repeatToggle\",\r\n\t}\r\n\r\nclass winampPlaylistEditor(winampMainWindow):\r\n\r\n\tdef _get_name(self):\r\n\t\tcurIndex=watchdog.cancellableSendMessage(hwndWinamp,WM_WA_IPC,-1,IPC_PLAYLIST_GET_NEXT_SELECTED)\r\n\t\tif curIndex <0:\r\n\t\t\treturn None\r\n\t\tinfo=fileinfo2()\r\n\t\tinfo.fileindex=curIndex\r\n\t\tinternalInfo=winKernel.virtualAllocEx(self.processHandle,None,sizeof(info),winKernel.MEM_COMMIT,winKernel.PAGE_READWRITE)\r\n\t\ttry:\r\n\t\t\twinKernel.writeProcessMemory(self.processHandle,internalInfo,byref(info),sizeof(info),None)\r\n\t\t\twatchdog.cancellableSendMessage(self.windowHandle,WM_WA_IPC,IPC_PE_GETINDEXTITLE,internalInfo)\r\n\t\t\twinKernel.readProcessMemory(self.processHandle,internalInfo,byref(info),sizeof(info),None)\r\n\t\tfinally:\r\n\t\t\twinKernel.virtualFreeEx(self.processHandle,internalInfo,0,winKernel.MEM_RELEASE)\r\n\t\t# file title is fetched in the current locale encoding.\r\n\t\t# We need to decode it to unicode first. \r\n\t\tencoding=locale.getlocale()[1]\r\n\t\tfileTitle=info.filetitle.decode(encoding,errors=\"replace\")\r\n\t\treturn \"%d.\\t%s\\t%s\"%(curIndex+1,fileTitle,info.filelength)\r\n\r\n\tdef _get_role(self):\r\n\t\treturn controlTypes.ROLE_LISTITEM\r\n\r\n\tdef script_changeItem(self,gesture):\r\n\t\tgesture.send()\r\n\t\tif not isScriptWaiting():\r\n\t\t\tapi.processPendingEvents()\r\n\t\t\tspeech.speakObject(self,reason=controlTypes.REASON_FOCUS)\r\n\t\t\tbraille.handler.handleGainFocus(self)\r\n\r\n\tdef event_nameChange(self):\r\n\t\treturn super(winampMainWindow,self).event_nameChange()\r\n\r\n\t__changeItemGestures = (\r\n\t\t\"kb:upArrow\",\r\n\t\t\"kb:downArrow\",\r\n\t\t\"kb:pageUp\",\r\n\t\t\"kb:pageDown\",\r\n\t)\r\n\r\n\tdef initOverlayClass(self):\r\n\t\tfor gesture in self.__changeItemGestures:\r\n\t\t\tself.bindGesture(gesture, \"changeItem\")\r\n", "path": "source/appModules/winamp.py"}], "after_files": [{"content": "# A part of NonVisual Desktop Access (NVDA)\r\n# Copyright (C) 2006-2020 NV Access Limited\r\n# This file is covered by the GNU General Public License.\r\n# See the file COPYING for more details.\r\n\r\nfrom ctypes import *\r\nfrom ctypes.wintypes import *\r\nimport winKernel\r\nimport winUser\r\nfrom scriptHandler import isScriptWaiting\r\nfrom NVDAObjects.IAccessible import IAccessible \r\nimport appModuleHandler\r\nimport speech\r\nimport locale\r\nimport controlTypes\r\nimport api\r\nimport watchdog\r\nimport braille\r\nimport ui\r\n\r\n# message used to sent many messages to winamp's main window. \r\n# most all of the IPC_* messages involve sending the message in the form of:\r\n# result = SendMessage(hwnd_winamp,WM_WA_IPC,(parameter),IPC_*);\r\n\r\nWM_WA_IPC=winUser.WM_USER\r\n\r\n# winamp window\r\nIPC_GET_SHUFFLE=250\r\nIPC_GET_REPEAT=251\r\n\r\n# playlist editor\r\nIPC_PLAYLIST_GET_NEXT_SELECTED=3029\r\nIPC_PE_GETCURINDEX=100\r\nIPC_PE_GETINDEXTOTAL=101\r\n# in_process ONLY\r\nIPC_PE_GETINDEXTITLE=200 # lParam = pointer to fileinfo2 structure\r\n\r\nclass fileinfo2(Structure):\r\n\t_fields_=[\r\n\t\t('fileindex',c_int),\r\n\t\t('filetitle',c_char*256),\r\n\t\t('filelength',c_char*16),\r\n\t]\r\n\r\nhwndWinamp=0\r\n\r\ndef getShuffle():\r\n\tglobal hwndWinamp\r\n\treturn watchdog.cancellableSendMessage(hwndWinamp,WM_WA_IPC,0,IPC_GET_SHUFFLE)\r\n\r\ndef getRepeat():\r\n\tglobal hwndWinamp\r\n\treturn watchdog.cancellableSendMessage(hwndWinamp,WM_WA_IPC,0,IPC_GET_REPEAT)\r\n\r\nclass AppModule(appModuleHandler.AppModule):\r\n\r\n\tdef event_NVDAObject_init(self,obj):\r\n\t\tglobal hwndWinamp\r\n\t\thwndWinamp = winUser.FindWindow(\"Winamp v1.x\", None)\r\n\r\n\tdef chooseNVDAObjectOverlayClasses(self, obj, clsList):\r\n\t\twindowClass = obj.windowClassName\r\n\t\tif windowClass == \"Winamp PE\":\r\n\t\t\tclsList.insert(0, winampPlaylistEditor)\r\n\t\telif windowClass == \"Winamp v1.x\":\r\n\t\t\tclsList.insert(0, winampMainWindow)\r\n\r\nclass winampMainWindow(IAccessible):\r\n\r\n\tdef event_nameChange(self):\r\n\t\tpass\r\n\r\n\tdef script_shuffleToggle(self,gesture):\r\n\t\tgesture.send()\r\n\t\tif not isScriptWaiting():\r\n\t\t\tapi.processPendingEvents()\r\n\t\t\tif getShuffle():\r\n\t\t\t\t# Translators: the user has pressed the shuffle tracks toggle in winamp, shuffle is now on.\r\n\t\t\t\tonOff=pgettext(\"shuffle\", \"on\")\r\n\t\t\telse:\r\n\t\t\t\t# Translators: the user has pressed the shuffle tracks toggle in winamp, shuffle is now off.\r\n\t\t\t\tonOff=pgettext(\"shuffle\", \"off\")\r\n\t\t\tui.message(onOff)\r\n\r\n\tdef script_repeatToggle(self,gesture):\r\n\t\tgesture.send()\r\n\t\tif not isScriptWaiting():\r\n\t\t\tapi.processPendingEvents()\r\n\t\t\tif getRepeat():\r\n\t\t\t\t# Translators: the user has pressed the repeat track toggle in winamp, repeat is now on.\r\n\t\t\t\tonOff=pgettext(\"repeat\", \"on\")\r\n\t\t\telse:\r\n\t\t\t\t# Translators: the user has pressed the repeat track toggle in winamp, repeat is now off.\r\n\t\t\t\tonOff=pgettext(\"repeat\", \"off\")\r\n\t\t\tui.message(onOff)\r\n\r\n\t__gestures = {\r\n\t\t\"kb:s\": \"shuffleToggle\",\r\n\t\t\"kb:r\": \"repeatToggle\",\r\n\t}\r\n\r\nclass winampPlaylistEditor(winampMainWindow):\r\n\r\n\tdef _get_name(self):\r\n\t\tcurIndex=watchdog.cancellableSendMessage(hwndWinamp,WM_WA_IPC,-1,IPC_PLAYLIST_GET_NEXT_SELECTED)\r\n\t\tif curIndex <0:\r\n\t\t\treturn None\r\n\t\tinfo=fileinfo2()\r\n\t\tinfo.fileindex=curIndex\r\n\t\tinternalInfo=winKernel.virtualAllocEx(self.processHandle,None,sizeof(info),winKernel.MEM_COMMIT,winKernel.PAGE_READWRITE)\r\n\t\ttry:\r\n\t\t\twinKernel.writeProcessMemory(self.processHandle,internalInfo,byref(info),sizeof(info),None)\r\n\t\t\twatchdog.cancellableSendMessage(self.windowHandle,WM_WA_IPC,IPC_PE_GETINDEXTITLE,internalInfo)\r\n\t\t\twinKernel.readProcessMemory(self.processHandle,internalInfo,byref(info),sizeof(info),None)\r\n\t\tfinally:\r\n\t\t\twinKernel.virtualFreeEx(self.processHandle,internalInfo,0,winKernel.MEM_RELEASE)\r\n\t\t# file title is fetched in the current locale encoding.\r\n\t\t# We need to decode it to unicode first. \r\n\t\tencoding=locale.getlocale()[1]\r\n\t\tfileTitle=info.filetitle.decode(encoding,errors=\"replace\")\r\n\t\treturn \"%d.\\t%s\\t%s\"%(curIndex+1,fileTitle,info.filelength)\r\n\r\n\tdef _get_role(self):\r\n\t\treturn controlTypes.ROLE_LISTITEM\r\n\r\n\tdef script_changeItem(self,gesture):\r\n\t\tgesture.send()\r\n\t\tif not isScriptWaiting():\r\n\t\t\tapi.processPendingEvents()\r\n\t\t\tspeech.speakObject(self,reason=controlTypes.REASON_FOCUS)\r\n\t\t\tbraille.handler.handleGainFocus(self)\r\n\r\n\tdef event_nameChange(self):\r\n\t\treturn super(winampMainWindow,self).event_nameChange()\r\n\r\n\t__changeItemGestures = (\r\n\t\t\"kb:upArrow\",\r\n\t\t\"kb:downArrow\",\r\n\t\t\"kb:pageUp\",\r\n\t\t\"kb:pageDown\",\r\n\t)\r\n\r\n\tdef initOverlayClass(self):\r\n\t\tfor gesture in self.__changeItemGestures:\r\n\t\t\tself.bindGesture(gesture, \"changeItem\")\r\n", "path": "source/appModules/winamp.py"}]}
2,137
271
gh_patches_debug_19139
rasdani/github-patches
git_diff
mesonbuild__meson-127
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- shutil.ReadError: Unknown archive format (tar.xz) ``` Dowloading enet from http://enet.bespin.org/download/enet-1.3.13.tar.gz Downloading patch from https://github.com/ignatenkobrain/meson_wrap_example/raw/master/subprojects/enet-meson.tar.xz Traceback (most recent call last): File "/home/brain/git/meson/meson.py", line 188, in run app.generate() File "/home/brain/git/meson/meson.py", line 129, in generate intr.run() File "/home/brain/git/meson/interpreter.py", line 871, in run self.evaluate_codeblock(self.ast) File "/home/brain/git/meson/interpreter.py", line 893, in evaluate_codeblock raise e File "/home/brain/git/meson/interpreter.py", line 887, in evaluate_codeblock self.evaluate_statement(cur) File "/home/brain/git/meson/interpreter.py", line 951, in evaluate_statement return self.evaluate_if(cur) File "/home/brain/git/meson/interpreter.py", line 1757, in evaluate_if self.evaluate_codeblock(node.elseblock) File "/home/brain/git/meson/interpreter.py", line 893, in evaluate_codeblock raise e File "/home/brain/git/meson/interpreter.py", line 887, in evaluate_codeblock self.evaluate_statement(cur) File "/home/brain/git/meson/interpreter.py", line 943, in evaluate_statement return self.assignment(cur) File "/home/brain/git/meson/interpreter.py", line 1637, in assignment value = self.evaluate_statement(node.value) File "/home/brain/git/meson/interpreter.py", line 941, in evaluate_statement return self.function_call(cur) File "/home/brain/git/meson/interpreter.py", line 1618, in function_call return self.funcs[func_name](node, self.flatten(posargs), kwargs) File "/home/brain/git/meson/interpreter.py", line 60, in wrapped return f(self, node, args, kwargs) File "/home/brain/git/meson/interpreter.py", line 52, in wrapped return f(self, node, args, kwargs) File "/home/brain/git/meson/interpreter.py", line 1071, in func_subproject resolved = r.resolve(dirname) File "/home/brain/git/meson/wrap.py", line 64, in resolve self.extract_package(p) File "/home/brain/git/meson/wrap.py", line 128, in extract_package shutil.unpack_archive(os.path.join(self.cachedir, package.get('patch_filename')), self.subdir_root) File "/usr/lib64/python3.4/shutil.py", line 953, in unpack_archive raise ReadError("Unknown archive format '{0}'".format(filename)) shutil.ReadError: Unknown archive format '/home/brain/meson_wrap/subprojects/packagecache/enet-meson.tar.xz' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `wrap.py` Content: ``` 1 # Copyright 2015 The Meson development team 2 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 7 # http://www.apache.org/licenses/LICENSE-2.0 8 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import mlog 16 import urllib.request, os, hashlib, shutil 17 import subprocess 18 19 class PackageDefinition: 20 def __init__(self, fname): 21 self.values = {} 22 ifile = open(fname) 23 first = ifile.readline().strip() 24 25 if first == '[wrap-file]': 26 self.type = 'file' 27 elif first == '[wrap-git]': 28 self.type = 'git' 29 else: 30 raise RuntimeError('Invalid format of package file') 31 for line in ifile: 32 line = line.strip() 33 if line == '': 34 continue 35 (k, v) = line.split('=', 1) 36 k = k.strip() 37 v = v.strip() 38 self.values[k] = v 39 40 def get(self, key): 41 return self.values[key] 42 43 def has_patch(self): 44 return 'patch_url' in self.values 45 46 class Resolver: 47 def __init__(self, subdir_root): 48 self.subdir_root = subdir_root 49 self.cachedir = os.path.join(self.subdir_root, 'packagecache') 50 51 def resolve(self, packagename): 52 fname = os.path.join(self.subdir_root, packagename + '.wrap') 53 dirname = os.path.join(self.subdir_root, packagename) 54 if not os.path.isfile(fname): 55 if os.path.isdir(dirname): 56 # No wrap file but dir exists -> user put it there manually. 57 return packagename 58 return None 59 p = PackageDefinition(fname) 60 if p.type == 'file': 61 if not os.path.isdir(self.cachedir): 62 os.mkdir(self.cachedir) 63 self.download(p, packagename) 64 self.extract_package(p) 65 elif p.type == 'git': 66 self.get_git(p) 67 else: 68 raise RuntimeError('Unreachable code.') 69 return p.get('directory') 70 71 def get_git(self, p): 72 checkoutdir = os.path.join(self.subdir_root, p.get('directory')) 73 revno = p.get('revision') 74 is_there = os.path.isdir(checkoutdir) 75 if is_there: 76 if revno.lower() == 'head': 77 subprocess.check_call(['git', 'pull'], cwd=checkoutdir) 78 else: 79 if subprocess.call(['git', 'checkout', revno], cwd=checkoutdir) != 0: 80 subprocess.check_call(['git', 'fetch'], cwd=checkoutdir) 81 subprocess.check_call(['git', 'checkout', revno], 82 cwd=checkoutdir) 83 else: 84 subprocess.check_call(['git', 'clone', p.get('url'), 85 p.get('directory')], cwd=self.subdir_root) 86 if revno.lower() != 'head': 87 subprocess.check_call(['git', 'checkout', revno], 88 cwd=checkoutdir) 89 90 91 def get_data(self, url): 92 u = urllib.request.urlopen(url) 93 data = u.read() 94 u.close() 95 h = hashlib.sha256() 96 h.update(data) 97 hashvalue = h.hexdigest() 98 return (data, hashvalue) 99 100 def download(self, p, packagename): 101 ofname = os.path.join(self.cachedir, p.get('source_filename')) 102 if os.path.exists(ofname): 103 mlog.log('Using', mlog.bold(packagename), 'from cache.') 104 return 105 srcurl = p.get('source_url') 106 mlog.log('Dowloading', mlog.bold(packagename), 'from', srcurl) 107 (srcdata, dhash) = self.get_data(srcurl) 108 expected = p.get('source_hash') 109 if dhash != expected: 110 raise RuntimeError('Incorrect hash for source %s:\n %s expected\n %s actual.' % (packagename, expected, dhash)) 111 if p.has_patch(): 112 purl = p.get('patch_url') 113 mlog.log('Downloading patch from', mlog.bold(purl)) 114 (pdata, phash) = self.get_data(purl) 115 expected = p.get('patch_hash') 116 if phash != expected: 117 raise RuntimeError('Incorrect hash for patch %s:\n %s expected\n %s actual' % (packagename, expected, phash)) 118 open(os.path.join(self.cachedir, p.get('patch_filename')), 'wb').write(pdata) 119 else: 120 mlog.log('Package does not require patch.') 121 open(ofname, 'wb').write(srcdata) 122 123 def extract_package(self, package): 124 if os.path.isdir(os.path.join(self.subdir_root, package.get('directory'))): 125 return 126 shutil.unpack_archive(os.path.join(self.cachedir, package.get('source_filename')), self.subdir_root) 127 if package.has_patch(): 128 shutil.unpack_archive(os.path.join(self.cachedir, package.get('patch_filename')), self.subdir_root) 129 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/wrap.py b/wrap.py --- a/wrap.py +++ b/wrap.py @@ -15,6 +15,7 @@ import mlog import urllib.request, os, hashlib, shutil import subprocess +import sys class PackageDefinition: def __init__(self, fname): @@ -121,6 +122,13 @@ open(ofname, 'wb').write(srcdata) def extract_package(self, package): + if sys.version_info < (3, 5): + try: + import lzma + del lzma + shutil.register_unpack_format('xztar', ['.tar.xz', '.txz'], shutil._unpack_tarfile, [], "xz'ed tar-file") + except ImportError: + pass if os.path.isdir(os.path.join(self.subdir_root, package.get('directory'))): return shutil.unpack_archive(os.path.join(self.cachedir, package.get('source_filename')), self.subdir_root)
{"golden_diff": "diff --git a/wrap.py b/wrap.py\n--- a/wrap.py\n+++ b/wrap.py\n@@ -15,6 +15,7 @@\n import mlog\n import urllib.request, os, hashlib, shutil\n import subprocess\n+import sys\n \n class PackageDefinition:\n def __init__(self, fname):\n@@ -121,6 +122,13 @@\n open(ofname, 'wb').write(srcdata)\n \n def extract_package(self, package):\n+ if sys.version_info < (3, 5):\n+ try:\n+ import lzma\n+ del lzma\n+ shutil.register_unpack_format('xztar', ['.tar.xz', '.txz'], shutil._unpack_tarfile, [], \"xz'ed tar-file\")\n+ except ImportError:\n+ pass\n if os.path.isdir(os.path.join(self.subdir_root, package.get('directory'))):\n return\n shutil.unpack_archive(os.path.join(self.cachedir, package.get('source_filename')), self.subdir_root)\n", "issue": "shutil.ReadError: Unknown archive format (tar.xz)\n```\nDowloading enet from http://enet.bespin.org/download/enet-1.3.13.tar.gz\nDownloading patch from https://github.com/ignatenkobrain/meson_wrap_example/raw/master/subprojects/enet-meson.tar.xz\nTraceback (most recent call last):\n File \"/home/brain/git/meson/meson.py\", line 188, in run\n app.generate()\n File \"/home/brain/git/meson/meson.py\", line 129, in generate\n intr.run()\n File \"/home/brain/git/meson/interpreter.py\", line 871, in run\n self.evaluate_codeblock(self.ast)\n File \"/home/brain/git/meson/interpreter.py\", line 893, in evaluate_codeblock\n raise e\n File \"/home/brain/git/meson/interpreter.py\", line 887, in evaluate_codeblock\n self.evaluate_statement(cur)\n File \"/home/brain/git/meson/interpreter.py\", line 951, in evaluate_statement\n return self.evaluate_if(cur)\n File \"/home/brain/git/meson/interpreter.py\", line 1757, in evaluate_if\n self.evaluate_codeblock(node.elseblock)\n File \"/home/brain/git/meson/interpreter.py\", line 893, in evaluate_codeblock\n raise e\n File \"/home/brain/git/meson/interpreter.py\", line 887, in evaluate_codeblock\n self.evaluate_statement(cur)\n File \"/home/brain/git/meson/interpreter.py\", line 943, in evaluate_statement\n return self.assignment(cur)\n File \"/home/brain/git/meson/interpreter.py\", line 1637, in assignment\n value = self.evaluate_statement(node.value)\n File \"/home/brain/git/meson/interpreter.py\", line 941, in evaluate_statement\n return self.function_call(cur)\n File \"/home/brain/git/meson/interpreter.py\", line 1618, in function_call\n return self.funcs[func_name](node, self.flatten(posargs), kwargs)\n File \"/home/brain/git/meson/interpreter.py\", line 60, in wrapped\n return f(self, node, args, kwargs)\n File \"/home/brain/git/meson/interpreter.py\", line 52, in wrapped\n return f(self, node, args, kwargs)\n File \"/home/brain/git/meson/interpreter.py\", line 1071, in func_subproject\n resolved = r.resolve(dirname)\n File \"/home/brain/git/meson/wrap.py\", line 64, in resolve\n self.extract_package(p)\n File \"/home/brain/git/meson/wrap.py\", line 128, in extract_package\n shutil.unpack_archive(os.path.join(self.cachedir, package.get('patch_filename')), self.subdir_root)\n File \"/usr/lib64/python3.4/shutil.py\", line 953, in unpack_archive\n raise ReadError(\"Unknown archive format '{0}'\".format(filename))\nshutil.ReadError: Unknown archive format '/home/brain/meson_wrap/subprojects/packagecache/enet-meson.tar.xz'\n```\n\n", "before_files": [{"content": "# Copyright 2015 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport mlog\nimport urllib.request, os, hashlib, shutil\nimport subprocess\n\nclass PackageDefinition:\n def __init__(self, fname):\n self.values = {}\n ifile = open(fname)\n first = ifile.readline().strip()\n\n if first == '[wrap-file]':\n self.type = 'file'\n elif first == '[wrap-git]':\n self.type = 'git'\n else:\n raise RuntimeError('Invalid format of package file')\n for line in ifile:\n line = line.strip()\n if line == '':\n continue\n (k, v) = line.split('=', 1)\n k = k.strip()\n v = v.strip()\n self.values[k] = v\n\n def get(self, key):\n return self.values[key]\n\n def has_patch(self):\n return 'patch_url' in self.values\n\nclass Resolver:\n def __init__(self, subdir_root):\n self.subdir_root = subdir_root\n self.cachedir = os.path.join(self.subdir_root, 'packagecache')\n\n def resolve(self, packagename):\n fname = os.path.join(self.subdir_root, packagename + '.wrap')\n dirname = os.path.join(self.subdir_root, packagename)\n if not os.path.isfile(fname):\n if os.path.isdir(dirname):\n # No wrap file but dir exists -> user put it there manually.\n return packagename \n return None\n p = PackageDefinition(fname)\n if p.type == 'file':\n if not os.path.isdir(self.cachedir):\n os.mkdir(self.cachedir)\n self.download(p, packagename)\n self.extract_package(p)\n elif p.type == 'git':\n self.get_git(p)\n else:\n raise RuntimeError('Unreachable code.')\n return p.get('directory')\n\n def get_git(self, p):\n checkoutdir = os.path.join(self.subdir_root, p.get('directory'))\n revno = p.get('revision')\n is_there = os.path.isdir(checkoutdir)\n if is_there:\n if revno.lower() == 'head':\n subprocess.check_call(['git', 'pull'], cwd=checkoutdir)\n else:\n if subprocess.call(['git', 'checkout', revno], cwd=checkoutdir) != 0:\n subprocess.check_call(['git', 'fetch'], cwd=checkoutdir)\n subprocess.check_call(['git', 'checkout', revno],\n cwd=checkoutdir)\n else:\n subprocess.check_call(['git', 'clone', p.get('url'),\n p.get('directory')], cwd=self.subdir_root)\n if revno.lower() != 'head':\n subprocess.check_call(['git', 'checkout', revno],\n cwd=checkoutdir)\n\n\n def get_data(self, url):\n u = urllib.request.urlopen(url)\n data = u.read()\n u.close()\n h = hashlib.sha256()\n h.update(data)\n hashvalue = h.hexdigest()\n return (data, hashvalue)\n\n def download(self, p, packagename):\n ofname = os.path.join(self.cachedir, p.get('source_filename'))\n if os.path.exists(ofname):\n mlog.log('Using', mlog.bold(packagename), 'from cache.')\n return\n srcurl = p.get('source_url')\n mlog.log('Dowloading', mlog.bold(packagename), 'from', srcurl)\n (srcdata, dhash) = self.get_data(srcurl)\n expected = p.get('source_hash')\n if dhash != expected:\n raise RuntimeError('Incorrect hash for source %s:\\n %s expected\\n %s actual.' % (packagename, expected, dhash))\n if p.has_patch():\n purl = p.get('patch_url')\n mlog.log('Downloading patch from', mlog.bold(purl))\n (pdata, phash) = self.get_data(purl)\n expected = p.get('patch_hash')\n if phash != expected:\n raise RuntimeError('Incorrect hash for patch %s:\\n %s expected\\n %s actual' % (packagename, expected, phash))\n open(os.path.join(self.cachedir, p.get('patch_filename')), 'wb').write(pdata)\n else:\n mlog.log('Package does not require patch.')\n open(ofname, 'wb').write(srcdata)\n\n def extract_package(self, package):\n if os.path.isdir(os.path.join(self.subdir_root, package.get('directory'))):\n return\n shutil.unpack_archive(os.path.join(self.cachedir, package.get('source_filename')), self.subdir_root)\n if package.has_patch():\n shutil.unpack_archive(os.path.join(self.cachedir, package.get('patch_filename')), self.subdir_root)\n", "path": "wrap.py"}], "after_files": [{"content": "# Copyright 2015 The Meson development team\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport mlog\nimport urllib.request, os, hashlib, shutil\nimport subprocess\nimport sys\n\nclass PackageDefinition:\n def __init__(self, fname):\n self.values = {}\n ifile = open(fname)\n first = ifile.readline().strip()\n\n if first == '[wrap-file]':\n self.type = 'file'\n elif first == '[wrap-git]':\n self.type = 'git'\n else:\n raise RuntimeError('Invalid format of package file')\n for line in ifile:\n line = line.strip()\n if line == '':\n continue\n (k, v) = line.split('=', 1)\n k = k.strip()\n v = v.strip()\n self.values[k] = v\n\n def get(self, key):\n return self.values[key]\n\n def has_patch(self):\n return 'patch_url' in self.values\n\nclass Resolver:\n def __init__(self, subdir_root):\n self.subdir_root = subdir_root\n self.cachedir = os.path.join(self.subdir_root, 'packagecache')\n\n def resolve(self, packagename):\n fname = os.path.join(self.subdir_root, packagename + '.wrap')\n dirname = os.path.join(self.subdir_root, packagename)\n if not os.path.isfile(fname):\n if os.path.isdir(dirname):\n # No wrap file but dir exists -> user put it there manually.\n return packagename \n return None\n p = PackageDefinition(fname)\n if p.type == 'file':\n if not os.path.isdir(self.cachedir):\n os.mkdir(self.cachedir)\n self.download(p, packagename)\n self.extract_package(p)\n elif p.type == 'git':\n self.get_git(p)\n else:\n raise RuntimeError('Unreachable code.')\n return p.get('directory')\n\n def get_git(self, p):\n checkoutdir = os.path.join(self.subdir_root, p.get('directory'))\n revno = p.get('revision')\n is_there = os.path.isdir(checkoutdir)\n if is_there:\n if revno.lower() == 'head':\n subprocess.check_call(['git', 'pull'], cwd=checkoutdir)\n else:\n if subprocess.call(['git', 'checkout', revno], cwd=checkoutdir) != 0:\n subprocess.check_call(['git', 'fetch'], cwd=checkoutdir)\n subprocess.check_call(['git', 'checkout', revno],\n cwd=checkoutdir)\n else:\n subprocess.check_call(['git', 'clone', p.get('url'),\n p.get('directory')], cwd=self.subdir_root)\n if revno.lower() != 'head':\n subprocess.check_call(['git', 'checkout', revno],\n cwd=checkoutdir)\n\n\n def get_data(self, url):\n u = urllib.request.urlopen(url)\n data = u.read()\n u.close()\n h = hashlib.sha256()\n h.update(data)\n hashvalue = h.hexdigest()\n return (data, hashvalue)\n\n def download(self, p, packagename):\n ofname = os.path.join(self.cachedir, p.get('source_filename'))\n if os.path.exists(ofname):\n mlog.log('Using', mlog.bold(packagename), 'from cache.')\n return\n srcurl = p.get('source_url')\n mlog.log('Dowloading', mlog.bold(packagename), 'from', srcurl)\n (srcdata, dhash) = self.get_data(srcurl)\n expected = p.get('source_hash')\n if dhash != expected:\n raise RuntimeError('Incorrect hash for source %s:\\n %s expected\\n %s actual.' % (packagename, expected, dhash))\n if p.has_patch():\n purl = p.get('patch_url')\n mlog.log('Downloading patch from', mlog.bold(purl))\n (pdata, phash) = self.get_data(purl)\n expected = p.get('patch_hash')\n if phash != expected:\n raise RuntimeError('Incorrect hash for patch %s:\\n %s expected\\n %s actual' % (packagename, expected, phash))\n open(os.path.join(self.cachedir, p.get('patch_filename')), 'wb').write(pdata)\n else:\n mlog.log('Package does not require patch.')\n open(ofname, 'wb').write(srcdata)\n\n def extract_package(self, package):\n if sys.version_info < (3, 5):\n try:\n import lzma\n del lzma\n shutil.register_unpack_format('xztar', ['.tar.xz', '.txz'], shutil._unpack_tarfile, [], \"xz'ed tar-file\")\n except ImportError:\n pass\n if os.path.isdir(os.path.join(self.subdir_root, package.get('directory'))):\n return\n shutil.unpack_archive(os.path.join(self.cachedir, package.get('source_filename')), self.subdir_root)\n if package.has_patch():\n shutil.unpack_archive(os.path.join(self.cachedir, package.get('patch_filename')), self.subdir_root)\n", "path": "wrap.py"}]}
2,400
223
gh_patches_debug_16944
rasdani/github-patches
git_diff
Mailu__Mailu-876
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Missing wildcard option in alias flask command Hi guys, First of all, thank you for making this awesome Mailu project :+1: :100: I'm using command line to mass import some alias from an old server. There doesn't seem to have an option to toggle wildcard support: ``` Usage: flask mailu alias [OPTIONS] LOCALPART DOMAIN_NAME DESTINATION Create an alias Options: --help Show this message and exit. ``` Would be great if it can be done through CLI. Thanks again! --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `core/admin/mailu/manage.py` Content: ``` 1 from mailu import models 2 3 from flask import current_app as app 4 from flask import cli as flask_cli 5 6 import flask 7 import os 8 import socket 9 import uuid 10 import click 11 12 13 db = models.db 14 15 16 @click.group() 17 def mailu(cls=flask_cli.FlaskGroup): 18 """ Mailu command line 19 """ 20 21 22 @mailu.command() 23 @flask_cli.with_appcontext 24 def advertise(): 25 """ Advertise this server against statistic services. 26 """ 27 if os.path.isfile(app.config["INSTANCE_ID_PATH"]): 28 with open(app.config["INSTANCE_ID_PATH"], "r") as handle: 29 instance_id = handle.read() 30 else: 31 instance_id = str(uuid.uuid4()) 32 with open(app.config["INSTANCE_ID_PATH"], "w") as handle: 33 handle.write(instance_id) 34 if not app.config["DISABLE_STATISTICS"]: 35 try: 36 socket.gethostbyname(app.config["STATS_ENDPOINT"].format(instance_id)) 37 except: 38 pass 39 40 41 @mailu.command() 42 @click.argument('localpart') 43 @click.argument('domain_name') 44 @click.argument('password') 45 @flask_cli.with_appcontext 46 def admin(localpart, domain_name, password): 47 """ Create an admin user 48 """ 49 domain = models.Domain.query.get(domain_name) 50 if not domain: 51 domain = models.Domain(name=domain_name) 52 db.session.add(domain) 53 user = models.User( 54 localpart=localpart, 55 domain=domain, 56 global_admin=True 57 ) 58 user.set_password(password) 59 db.session.add(user) 60 db.session.commit() 61 62 63 @mailu.command() 64 @click.argument('localpart') 65 @click.argument('domain_name') 66 @click.argument('password') 67 @click.argument('hash_scheme') 68 @flask_cli.with_appcontext 69 def user(localpart, domain_name, password, hash_scheme=None): 70 """ Create a user 71 """ 72 if hash_scheme is None: 73 hash_scheme = app.config['PASSWORD_SCHEME'] 74 domain = models.Domain.query.get(domain_name) 75 if not domain: 76 domain = models.Domain(name=domain_name) 77 db.session.add(domain) 78 user = models.User( 79 localpart=localpart, 80 domain=domain, 81 global_admin=False 82 ) 83 user.set_password(password, hash_scheme=hash_scheme) 84 db.session.add(user) 85 db.session.commit() 86 87 88 @mailu.command() 89 @click.option('-n', '--domain_name') 90 @click.option('-u', '--max_users') 91 @click.option('-a', '--max_aliases') 92 @click.option('-q', '--max_quota_bytes') 93 @flask_cli.with_appcontext 94 def domain(domain_name, max_users=-1, max_aliases=-1, max_quota_bytes=0): 95 domain = models.Domain.query.get(domain_name) 96 if not domain: 97 domain = models.Domain(name=domain_name) 98 db.session.add(domain) 99 db.session.commit() 100 101 102 @mailu.command() 103 @click.argument('localpart') 104 @click.argument('domain_name') 105 @click.argument('password_hash') 106 @click.argument('hash_scheme') 107 @flask_cli.with_appcontext 108 def user_import(localpart, domain_name, password_hash, hash_scheme = None): 109 """ Import a user along with password hash. 110 """ 111 if hash_scheme is None: 112 hash_scheme = app.config['PASSWORD_SCHEME'] 113 domain = models.Domain.query.get(domain_name) 114 if not domain: 115 domain = models.Domain(name=domain_name) 116 db.session.add(domain) 117 user = models.User( 118 localpart=localpart, 119 domain=domain, 120 global_admin=False 121 ) 122 user.set_password(password_hash, hash_scheme=hash_scheme, raw=True) 123 db.session.add(user) 124 db.session.commit() 125 126 127 @mailu.command() 128 @click.option('-v', '--verbose') 129 @click.option('-d', '--delete_objects') 130 @flask_cli.with_appcontext 131 def config_update(verbose=False, delete_objects=False): 132 """sync configuration with data from YAML-formatted stdin""" 133 import yaml 134 import sys 135 new_config = yaml.load(sys.stdin) 136 # print new_config 137 domains = new_config.get('domains', []) 138 tracked_domains = set() 139 for domain_config in domains: 140 if verbose: 141 print(str(domain_config)) 142 domain_name = domain_config['name'] 143 max_users = domain_config.get('max_users', -1) 144 max_aliases = domain_config.get('max_aliases', -1) 145 max_quota_bytes = domain_config.get('max_quota_bytes', 0) 146 tracked_domains.add(domain_name) 147 domain = models.Domain.query.get(domain_name) 148 if not domain: 149 domain = models.Domain(name=domain_name, 150 max_users=max_users, 151 max_aliases=max_aliases, 152 max_quota_bytes=max_quota_bytes) 153 db.session.add(domain) 154 print("Added " + str(domain_config)) 155 else: 156 domain.max_users = max_users 157 domain.max_aliases = max_aliases 158 domain.max_quota_bytes = max_quota_bytes 159 db.session.add(domain) 160 print("Updated " + str(domain_config)) 161 162 users = new_config.get('users', []) 163 tracked_users = set() 164 user_optional_params = ('comment', 'quota_bytes', 'global_admin', 165 'enable_imap', 'enable_pop', 'forward_enabled', 166 'forward_destination', 'reply_enabled', 167 'reply_subject', 'reply_body', 'displayed_name', 168 'spam_enabled', 'email', 'spam_threshold') 169 for user_config in users: 170 if verbose: 171 print(str(user_config)) 172 localpart = user_config['localpart'] 173 domain_name = user_config['domain'] 174 password_hash = user_config.get('password_hash', None) 175 hash_scheme = user_config.get('hash_scheme', None) 176 domain = models.Domain.query.get(domain_name) 177 email = '{0}@{1}'.format(localpart, domain_name) 178 optional_params = {} 179 for k in user_optional_params: 180 if k in user_config: 181 optional_params[k] = user_config[k] 182 if not domain: 183 domain = models.Domain(name=domain_name) 184 db.session.add(domain) 185 user = models.User.query.get(email) 186 tracked_users.add(email) 187 tracked_domains.add(domain_name) 188 if not user: 189 user = models.User( 190 localpart=localpart, 191 domain=domain, 192 **optional_params 193 ) 194 else: 195 for k in optional_params: 196 setattr(user, k, optional_params[k]) 197 user.set_password(password_hash, hash_scheme=hash_scheme, raw=True) 198 db.session.add(user) 199 200 aliases = new_config.get('aliases', []) 201 tracked_aliases = set() 202 for alias_config in aliases: 203 if verbose: 204 print(str(alias_config)) 205 localpart = alias_config['localpart'] 206 domain_name = alias_config['domain'] 207 if type(alias_config['destination']) is str: 208 destination = alias_config['destination'].split(',') 209 else: 210 destination = alias_config['destination'] 211 wildcard = alias_config.get('wildcard', False) 212 domain = models.Domain.query.get(domain_name) 213 email = '{0}@{1}'.format(localpart, domain_name) 214 if not domain: 215 domain = models.Domain(name=domain_name) 216 db.session.add(domain) 217 alias = models.Alias.query.get(email) 218 tracked_aliases.add(email) 219 tracked_domains.add(domain_name) 220 if not alias: 221 alias = models.Alias( 222 localpart=localpart, 223 domain=domain, 224 wildcard=wildcard, 225 destination=destination, 226 email=email 227 ) 228 else: 229 alias.destination = destination 230 alias.wildcard = wildcard 231 db.session.add(alias) 232 233 db.session.commit() 234 235 managers = new_config.get('managers', []) 236 # tracked_managers=set() 237 for manager_config in managers: 238 if verbose: 239 print(str(manager_config)) 240 domain_name = manager_config['domain'] 241 user_name = manager_config['user'] 242 domain = models.Domain.query.get(domain_name) 243 manageruser = models.User.query.get(user_name + '@' + domain_name) 244 if manageruser not in domain.managers: 245 domain.managers.append(manageruser) 246 db.session.add(domain) 247 248 db.session.commit() 249 250 if delete_objects: 251 for user in db.session.query(models.User).all(): 252 if not (user.email in tracked_users): 253 if verbose: 254 print("Deleting user: " + str(user.email)) 255 db.session.delete(user) 256 for alias in db.session.query(models.Alias).all(): 257 if not (alias.email in tracked_aliases): 258 if verbose: 259 print("Deleting alias: " + str(alias.email)) 260 db.session.delete(alias) 261 for domain in db.session.query(models.Domain).all(): 262 if not (domain.name in tracked_domains): 263 if verbose: 264 print("Deleting domain: " + str(domain.name)) 265 db.session.delete(domain) 266 db.session.commit() 267 268 269 @mailu.command() 270 @click.argument('email') 271 @flask_cli.with_appcontext 272 def user_delete(email): 273 """delete user""" 274 user = models.User.query.get(email) 275 if user: 276 db.session.delete(user) 277 db.session.commit() 278 279 280 @mailu.command() 281 @click.argument('email') 282 @flask_cli.with_appcontext 283 def alias_delete(email): 284 """delete alias""" 285 alias = models.Alias.query.get(email) 286 if alias: 287 db.session.delete(alias) 288 db.session.commit() 289 290 291 @mailu.command() 292 @click.argument('localpart') 293 @click.argument('domain_name') 294 @click.argument('destination') 295 @flask_cli.with_appcontext 296 def alias(localpart, domain_name, destination): 297 """ Create an alias 298 """ 299 domain = models.Domain.query.get(domain_name) 300 if not domain: 301 domain = models.Domain(name=domain_name) 302 db.session.add(domain) 303 alias = models.Alias( 304 localpart=localpart, 305 domain=domain, 306 destination=destination.split(','), 307 email="%s@%s" % (localpart, domain_name) 308 ) 309 db.session.add(alias) 310 db.session.commit() 311 312 313 @mailu.command() 314 @click.argument('domain_name') 315 @click.argument('max_users') 316 @click.argument('max_aliases') 317 @click.argument('max_quota_bytes') 318 @flask_cli.with_appcontext 319 def setlimits(domain_name, max_users, max_aliases, max_quota_bytes): 320 """ Set domain limits 321 """ 322 domain = models.Domain.query.get(domain_name) 323 domain.max_users = max_users 324 domain.max_aliases = max_aliases 325 domain.max_quota_bytes = max_quota_bytes 326 db.session.add(domain) 327 db.session.commit() 328 329 330 @mailu.command() 331 @click.argument('domain_name') 332 @click.argument('user_name') 333 @flask_cli.with_appcontext 334 def setmanager(domain_name, user_name='manager'): 335 """ Make a user manager of a domain 336 """ 337 domain = models.Domain.query.get(domain_name) 338 manageruser = models.User.query.get(user_name + '@' + domain_name) 339 domain.managers.append(manageruser) 340 db.session.add(domain) 341 db.session.commit() 342 343 344 if __name__ == '__main__': 345 cli() 346 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/core/admin/mailu/manage.py b/core/admin/mailu/manage.py --- a/core/admin/mailu/manage.py +++ b/core/admin/mailu/manage.py @@ -292,8 +292,9 @@ @click.argument('localpart') @click.argument('domain_name') @click.argument('destination') [email protected]('-w', '--wildcard', is_flag=True) @flask_cli.with_appcontext -def alias(localpart, domain_name, destination): +def alias(localpart, domain_name, destination, wildcard=False): """ Create an alias """ domain = models.Domain.query.get(domain_name) @@ -303,6 +304,7 @@ alias = models.Alias( localpart=localpart, domain=domain, + wildcard=wildcard, destination=destination.split(','), email="%s@%s" % (localpart, domain_name) )
{"golden_diff": "diff --git a/core/admin/mailu/manage.py b/core/admin/mailu/manage.py\n--- a/core/admin/mailu/manage.py\n+++ b/core/admin/mailu/manage.py\n@@ -292,8 +292,9 @@\n @click.argument('localpart')\n @click.argument('domain_name')\n @click.argument('destination')\[email protected]('-w', '--wildcard', is_flag=True)\n @flask_cli.with_appcontext\n-def alias(localpart, domain_name, destination):\n+def alias(localpart, domain_name, destination, wildcard=False):\n \"\"\" Create an alias\n \"\"\"\n domain = models.Domain.query.get(domain_name)\n@@ -303,6 +304,7 @@\n alias = models.Alias(\n localpart=localpart,\n domain=domain,\n+ wildcard=wildcard,\n destination=destination.split(','),\n email=\"%s@%s\" % (localpart, domain_name)\n )\n", "issue": "Missing wildcard option in alias flask command\nHi guys,\r\n\r\nFirst of all, thank you for making this awesome Mailu project :+1: :100: \r\n\r\nI'm using command line to mass import some alias from an old server. There doesn't seem to have an option to toggle wildcard support:\r\n\r\n```\r\nUsage: flask mailu alias [OPTIONS] LOCALPART DOMAIN_NAME DESTINATION\r\n\r\n Create an alias\r\n\r\nOptions:\r\n --help Show this message and exit.\r\n```\r\n\r\nWould be great if it can be done through CLI. Thanks again!\n", "before_files": [{"content": "from mailu import models\n\nfrom flask import current_app as app\nfrom flask import cli as flask_cli\n\nimport flask\nimport os\nimport socket\nimport uuid\nimport click\n\n\ndb = models.db\n\n\[email protected]()\ndef mailu(cls=flask_cli.FlaskGroup):\n \"\"\" Mailu command line\n \"\"\"\n\n\[email protected]()\n@flask_cli.with_appcontext\ndef advertise():\n \"\"\" Advertise this server against statistic services.\n \"\"\"\n if os.path.isfile(app.config[\"INSTANCE_ID_PATH\"]):\n with open(app.config[\"INSTANCE_ID_PATH\"], \"r\") as handle:\n instance_id = handle.read()\n else:\n instance_id = str(uuid.uuid4())\n with open(app.config[\"INSTANCE_ID_PATH\"], \"w\") as handle:\n handle.write(instance_id)\n if not app.config[\"DISABLE_STATISTICS\"]:\n try:\n socket.gethostbyname(app.config[\"STATS_ENDPOINT\"].format(instance_id))\n except:\n pass\n\n\[email protected]()\[email protected]('localpart')\[email protected]('domain_name')\[email protected]('password')\n@flask_cli.with_appcontext\ndef admin(localpart, domain_name, password):\n \"\"\" Create an admin user\n \"\"\"\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n user = models.User(\n localpart=localpart,\n domain=domain,\n global_admin=True\n )\n user.set_password(password)\n db.session.add(user)\n db.session.commit()\n\n\[email protected]()\[email protected]('localpart')\[email protected]('domain_name')\[email protected]('password')\[email protected]('hash_scheme')\n@flask_cli.with_appcontext\ndef user(localpart, domain_name, password, hash_scheme=None):\n \"\"\" Create a user\n \"\"\"\n if hash_scheme is None:\n hash_scheme = app.config['PASSWORD_SCHEME']\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n user = models.User(\n localpart=localpart,\n domain=domain,\n global_admin=False\n )\n user.set_password(password, hash_scheme=hash_scheme)\n db.session.add(user)\n db.session.commit()\n\n\[email protected]()\[email protected]('-n', '--domain_name')\[email protected]('-u', '--max_users')\[email protected]('-a', '--max_aliases')\[email protected]('-q', '--max_quota_bytes')\n@flask_cli.with_appcontext\ndef domain(domain_name, max_users=-1, max_aliases=-1, max_quota_bytes=0):\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n db.session.commit()\n\n\[email protected]()\[email protected]('localpart')\[email protected]('domain_name')\[email protected]('password_hash')\[email protected]('hash_scheme')\n@flask_cli.with_appcontext\ndef user_import(localpart, domain_name, password_hash, hash_scheme = None):\n \"\"\" Import a user along with password hash.\n \"\"\"\n if hash_scheme is None:\n hash_scheme = app.config['PASSWORD_SCHEME']\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n user = models.User(\n localpart=localpart,\n domain=domain,\n global_admin=False\n )\n user.set_password(password_hash, hash_scheme=hash_scheme, raw=True)\n db.session.add(user)\n db.session.commit()\n\n\[email protected]()\[email protected]('-v', '--verbose')\[email protected]('-d', '--delete_objects')\n@flask_cli.with_appcontext\ndef config_update(verbose=False, delete_objects=False):\n \"\"\"sync configuration with data from YAML-formatted stdin\"\"\"\n import yaml\n import sys\n new_config = yaml.load(sys.stdin)\n # print new_config\n domains = new_config.get('domains', [])\n tracked_domains = set()\n for domain_config in domains:\n if verbose:\n print(str(domain_config))\n domain_name = domain_config['name']\n max_users = domain_config.get('max_users', -1)\n max_aliases = domain_config.get('max_aliases', -1)\n max_quota_bytes = domain_config.get('max_quota_bytes', 0)\n tracked_domains.add(domain_name)\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name,\n max_users=max_users,\n max_aliases=max_aliases,\n max_quota_bytes=max_quota_bytes)\n db.session.add(domain)\n print(\"Added \" + str(domain_config))\n else:\n domain.max_users = max_users\n domain.max_aliases = max_aliases\n domain.max_quota_bytes = max_quota_bytes\n db.session.add(domain)\n print(\"Updated \" + str(domain_config))\n\n users = new_config.get('users', [])\n tracked_users = set()\n user_optional_params = ('comment', 'quota_bytes', 'global_admin',\n 'enable_imap', 'enable_pop', 'forward_enabled',\n 'forward_destination', 'reply_enabled',\n 'reply_subject', 'reply_body', 'displayed_name',\n 'spam_enabled', 'email', 'spam_threshold')\n for user_config in users:\n if verbose:\n print(str(user_config))\n localpart = user_config['localpart']\n domain_name = user_config['domain']\n password_hash = user_config.get('password_hash', None)\n hash_scheme = user_config.get('hash_scheme', None)\n domain = models.Domain.query.get(domain_name)\n email = '{0}@{1}'.format(localpart, domain_name)\n optional_params = {}\n for k in user_optional_params:\n if k in user_config:\n optional_params[k] = user_config[k]\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n user = models.User.query.get(email)\n tracked_users.add(email)\n tracked_domains.add(domain_name)\n if not user:\n user = models.User(\n localpart=localpart,\n domain=domain,\n **optional_params\n )\n else:\n for k in optional_params:\n setattr(user, k, optional_params[k])\n user.set_password(password_hash, hash_scheme=hash_scheme, raw=True)\n db.session.add(user)\n\n aliases = new_config.get('aliases', [])\n tracked_aliases = set()\n for alias_config in aliases:\n if verbose:\n print(str(alias_config))\n localpart = alias_config['localpart']\n domain_name = alias_config['domain']\n if type(alias_config['destination']) is str:\n destination = alias_config['destination'].split(',')\n else:\n destination = alias_config['destination']\n wildcard = alias_config.get('wildcard', False)\n domain = models.Domain.query.get(domain_name)\n email = '{0}@{1}'.format(localpart, domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n alias = models.Alias.query.get(email)\n tracked_aliases.add(email)\n tracked_domains.add(domain_name)\n if not alias:\n alias = models.Alias(\n localpart=localpart,\n domain=domain,\n wildcard=wildcard,\n destination=destination,\n email=email\n )\n else:\n alias.destination = destination\n alias.wildcard = wildcard\n db.session.add(alias)\n\n db.session.commit()\n\n managers = new_config.get('managers', [])\n # tracked_managers=set()\n for manager_config in managers:\n if verbose:\n print(str(manager_config))\n domain_name = manager_config['domain']\n user_name = manager_config['user']\n domain = models.Domain.query.get(domain_name)\n manageruser = models.User.query.get(user_name + '@' + domain_name)\n if manageruser not in domain.managers:\n domain.managers.append(manageruser)\n db.session.add(domain)\n\n db.session.commit()\n\n if delete_objects:\n for user in db.session.query(models.User).all():\n if not (user.email in tracked_users):\n if verbose:\n print(\"Deleting user: \" + str(user.email))\n db.session.delete(user)\n for alias in db.session.query(models.Alias).all():\n if not (alias.email in tracked_aliases):\n if verbose:\n print(\"Deleting alias: \" + str(alias.email))\n db.session.delete(alias)\n for domain in db.session.query(models.Domain).all():\n if not (domain.name in tracked_domains):\n if verbose:\n print(\"Deleting domain: \" + str(domain.name))\n db.session.delete(domain)\n db.session.commit()\n\n\[email protected]()\[email protected]('email')\n@flask_cli.with_appcontext\ndef user_delete(email):\n \"\"\"delete user\"\"\"\n user = models.User.query.get(email)\n if user:\n db.session.delete(user)\n db.session.commit()\n\n\[email protected]()\[email protected]('email')\n@flask_cli.with_appcontext\ndef alias_delete(email):\n \"\"\"delete alias\"\"\"\n alias = models.Alias.query.get(email)\n if alias:\n db.session.delete(alias)\n db.session.commit()\n\n\[email protected]()\[email protected]('localpart')\[email protected]('domain_name')\[email protected]('destination')\n@flask_cli.with_appcontext\ndef alias(localpart, domain_name, destination):\n \"\"\" Create an alias\n \"\"\"\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n alias = models.Alias(\n localpart=localpart,\n domain=domain,\n destination=destination.split(','),\n email=\"%s@%s\" % (localpart, domain_name)\n )\n db.session.add(alias)\n db.session.commit()\n\n\[email protected]()\[email protected]('domain_name')\[email protected]('max_users')\[email protected]('max_aliases')\[email protected]('max_quota_bytes')\n@flask_cli.with_appcontext\ndef setlimits(domain_name, max_users, max_aliases, max_quota_bytes):\n \"\"\" Set domain limits\n \"\"\"\n domain = models.Domain.query.get(domain_name)\n domain.max_users = max_users\n domain.max_aliases = max_aliases\n domain.max_quota_bytes = max_quota_bytes\n db.session.add(domain)\n db.session.commit()\n\n\[email protected]()\[email protected]('domain_name')\[email protected]('user_name')\n@flask_cli.with_appcontext\ndef setmanager(domain_name, user_name='manager'):\n \"\"\" Make a user manager of a domain\n \"\"\"\n domain = models.Domain.query.get(domain_name)\n manageruser = models.User.query.get(user_name + '@' + domain_name)\n domain.managers.append(manageruser)\n db.session.add(domain)\n db.session.commit()\n\n\nif __name__ == '__main__':\n cli()\n", "path": "core/admin/mailu/manage.py"}], "after_files": [{"content": "from mailu import models\n\nfrom flask import current_app as app\nfrom flask import cli as flask_cli\n\nimport flask\nimport os\nimport socket\nimport uuid\nimport click\n\n\ndb = models.db\n\n\[email protected]()\ndef mailu(cls=flask_cli.FlaskGroup):\n \"\"\" Mailu command line\n \"\"\"\n\n\[email protected]()\n@flask_cli.with_appcontext\ndef advertise():\n \"\"\" Advertise this server against statistic services.\n \"\"\"\n if os.path.isfile(app.config[\"INSTANCE_ID_PATH\"]):\n with open(app.config[\"INSTANCE_ID_PATH\"], \"r\") as handle:\n instance_id = handle.read()\n else:\n instance_id = str(uuid.uuid4())\n with open(app.config[\"INSTANCE_ID_PATH\"], \"w\") as handle:\n handle.write(instance_id)\n if not app.config[\"DISABLE_STATISTICS\"]:\n try:\n socket.gethostbyname(app.config[\"STATS_ENDPOINT\"].format(instance_id))\n except:\n pass\n\n\[email protected]()\[email protected]('localpart')\[email protected]('domain_name')\[email protected]('password')\n@flask_cli.with_appcontext\ndef admin(localpart, domain_name, password):\n \"\"\" Create an admin user\n \"\"\"\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n user = models.User(\n localpart=localpart,\n domain=domain,\n global_admin=True\n )\n user.set_password(password)\n db.session.add(user)\n db.session.commit()\n\n\[email protected]()\[email protected]('localpart')\[email protected]('domain_name')\[email protected]('password')\[email protected]('hash_scheme')\n@flask_cli.with_appcontext\ndef user(localpart, domain_name, password, hash_scheme=None):\n \"\"\" Create a user\n \"\"\"\n if hash_scheme is None:\n hash_scheme = app.config['PASSWORD_SCHEME']\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n user = models.User(\n localpart=localpart,\n domain=domain,\n global_admin=False\n )\n user.set_password(password, hash_scheme=hash_scheme)\n db.session.add(user)\n db.session.commit()\n\n\[email protected]()\[email protected]('-n', '--domain_name')\[email protected]('-u', '--max_users')\[email protected]('-a', '--max_aliases')\[email protected]('-q', '--max_quota_bytes')\n@flask_cli.with_appcontext\ndef domain(domain_name, max_users=-1, max_aliases=-1, max_quota_bytes=0):\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n db.session.commit()\n\n\[email protected]()\[email protected]('localpart')\[email protected]('domain_name')\[email protected]('password_hash')\[email protected]('hash_scheme')\n@flask_cli.with_appcontext\ndef user_import(localpart, domain_name, password_hash, hash_scheme = None):\n \"\"\" Import a user along with password hash.\n \"\"\"\n if hash_scheme is None:\n hash_scheme = app.config['PASSWORD_SCHEME']\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n user = models.User(\n localpart=localpart,\n domain=domain,\n global_admin=False\n )\n user.set_password(password_hash, hash_scheme=hash_scheme, raw=True)\n db.session.add(user)\n db.session.commit()\n\n\[email protected]()\[email protected]('-v', '--verbose')\[email protected]('-d', '--delete_objects')\n@flask_cli.with_appcontext\ndef config_update(verbose=False, delete_objects=False):\n \"\"\"sync configuration with data from YAML-formatted stdin\"\"\"\n import yaml\n import sys\n new_config = yaml.load(sys.stdin)\n # print new_config\n domains = new_config.get('domains', [])\n tracked_domains = set()\n for domain_config in domains:\n if verbose:\n print(str(domain_config))\n domain_name = domain_config['name']\n max_users = domain_config.get('max_users', -1)\n max_aliases = domain_config.get('max_aliases', -1)\n max_quota_bytes = domain_config.get('max_quota_bytes', 0)\n tracked_domains.add(domain_name)\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name,\n max_users=max_users,\n max_aliases=max_aliases,\n max_quota_bytes=max_quota_bytes)\n db.session.add(domain)\n print(\"Added \" + str(domain_config))\n else:\n domain.max_users = max_users\n domain.max_aliases = max_aliases\n domain.max_quota_bytes = max_quota_bytes\n db.session.add(domain)\n print(\"Updated \" + str(domain_config))\n\n users = new_config.get('users', [])\n tracked_users = set()\n user_optional_params = ('comment', 'quota_bytes', 'global_admin',\n 'enable_imap', 'enable_pop', 'forward_enabled',\n 'forward_destination', 'reply_enabled',\n 'reply_subject', 'reply_body', 'displayed_name',\n 'spam_enabled', 'email', 'spam_threshold')\n for user_config in users:\n if verbose:\n print(str(user_config))\n localpart = user_config['localpart']\n domain_name = user_config['domain']\n password_hash = user_config.get('password_hash', None)\n hash_scheme = user_config.get('hash_scheme', None)\n domain = models.Domain.query.get(domain_name)\n email = '{0}@{1}'.format(localpart, domain_name)\n optional_params = {}\n for k in user_optional_params:\n if k in user_config:\n optional_params[k] = user_config[k]\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n user = models.User.query.get(email)\n tracked_users.add(email)\n tracked_domains.add(domain_name)\n if not user:\n user = models.User(\n localpart=localpart,\n domain=domain,\n **optional_params\n )\n else:\n for k in optional_params:\n setattr(user, k, optional_params[k])\n user.set_password(password_hash, hash_scheme=hash_scheme, raw=True)\n db.session.add(user)\n\n aliases = new_config.get('aliases', [])\n tracked_aliases = set()\n for alias_config in aliases:\n if verbose:\n print(str(alias_config))\n localpart = alias_config['localpart']\n domain_name = alias_config['domain']\n if type(alias_config['destination']) is str:\n destination = alias_config['destination'].split(',')\n else:\n destination = alias_config['destination']\n wildcard = alias_config.get('wildcard', False)\n domain = models.Domain.query.get(domain_name)\n email = '{0}@{1}'.format(localpart, domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n alias = models.Alias.query.get(email)\n tracked_aliases.add(email)\n tracked_domains.add(domain_name)\n if not alias:\n alias = models.Alias(\n localpart=localpart,\n domain=domain,\n wildcard=wildcard,\n destination=destination,\n email=email\n )\n else:\n alias.destination = destination\n alias.wildcard = wildcard\n db.session.add(alias)\n\n db.session.commit()\n\n managers = new_config.get('managers', [])\n # tracked_managers=set()\n for manager_config in managers:\n if verbose:\n print(str(manager_config))\n domain_name = manager_config['domain']\n user_name = manager_config['user']\n domain = models.Domain.query.get(domain_name)\n manageruser = models.User.query.get(user_name + '@' + domain_name)\n if manageruser not in domain.managers:\n domain.managers.append(manageruser)\n db.session.add(domain)\n\n db.session.commit()\n\n if delete_objects:\n for user in db.session.query(models.User).all():\n if not (user.email in tracked_users):\n if verbose:\n print(\"Deleting user: \" + str(user.email))\n db.session.delete(user)\n for alias in db.session.query(models.Alias).all():\n if not (alias.email in tracked_aliases):\n if verbose:\n print(\"Deleting alias: \" + str(alias.email))\n db.session.delete(alias)\n for domain in db.session.query(models.Domain).all():\n if not (domain.name in tracked_domains):\n if verbose:\n print(\"Deleting domain: \" + str(domain.name))\n db.session.delete(domain)\n db.session.commit()\n\n\[email protected]()\[email protected]('email')\n@flask_cli.with_appcontext\ndef user_delete(email):\n \"\"\"delete user\"\"\"\n user = models.User.query.get(email)\n if user:\n db.session.delete(user)\n db.session.commit()\n\n\[email protected]()\[email protected]('email')\n@flask_cli.with_appcontext\ndef alias_delete(email):\n \"\"\"delete alias\"\"\"\n alias = models.Alias.query.get(email)\n if alias:\n db.session.delete(alias)\n db.session.commit()\n\n\[email protected]()\[email protected]('localpart')\[email protected]('domain_name')\[email protected]('destination')\[email protected]('-w', '--wildcard', is_flag=True)\n@flask_cli.with_appcontext\ndef alias(localpart, domain_name, destination, wildcard=False):\n \"\"\" Create an alias\n \"\"\"\n domain = models.Domain.query.get(domain_name)\n if not domain:\n domain = models.Domain(name=domain_name)\n db.session.add(domain)\n alias = models.Alias(\n localpart=localpart,\n domain=domain,\n wildcard=wildcard,\n destination=destination.split(','),\n email=\"%s@%s\" % (localpart, domain_name)\n )\n db.session.add(alias)\n db.session.commit()\n\n\[email protected]()\[email protected]('domain_name')\[email protected]('max_users')\[email protected]('max_aliases')\[email protected]('max_quota_bytes')\n@flask_cli.with_appcontext\ndef setlimits(domain_name, max_users, max_aliases, max_quota_bytes):\n \"\"\" Set domain limits\n \"\"\"\n domain = models.Domain.query.get(domain_name)\n domain.max_users = max_users\n domain.max_aliases = max_aliases\n domain.max_quota_bytes = max_quota_bytes\n db.session.add(domain)\n db.session.commit()\n\n\[email protected]()\[email protected]('domain_name')\[email protected]('user_name')\n@flask_cli.with_appcontext\ndef setmanager(domain_name, user_name='manager'):\n \"\"\" Make a user manager of a domain\n \"\"\"\n domain = models.Domain.query.get(domain_name)\n manageruser = models.User.query.get(user_name + '@' + domain_name)\n domain.managers.append(manageruser)\n db.session.add(domain)\n db.session.commit()\n\n\nif __name__ == '__main__':\n cli()\n", "path": "core/admin/mailu/manage.py"}]}
3,646
200
gh_patches_debug_4798
rasdani/github-patches
git_diff
pytorch__vision-2793
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- The ASPP layer has a hard-coded value that only works if the atrous_rates param is of length 3 ## 🐛 Bug The ASPP layer has a hard-coded value that only works if the list of atrous rates provided is of length 3. The hard-coded value in question is the `5` on line 83 here: https://github.com/pytorch/vision/blob/master/torchvision/models/segmentation/deeplabv3.py#L82-L86 ## To Reproduce ```python3 aspp = tv.models.segmentation.deeplabv3.ASPP(1, [12, 24]) # DeepLab uses [12, 24, 36], which works aspp.eval() with torch.no_grad(): out = aspp(torch.empty(1, 1, 256, 256)) ``` Results in ``` RuntimeError: Given groups=1, weight of size [256, 1280, 1, 1], expected input[1, 1024, 256, 256] to have 1280 channels, but got 1024 channels instead ``` ## Expected behavior It should work with different number of atrous rates. This can be fixed simply by replacing ```python3 nn.Conv2d(5 * out_channels, out_channels, 1, bias=False) ``` with ```python3 nn.Conv2d(len(self.convs) * out_channels, out_channels, 1, bias=False) ``` ## Environment ``` PyTorch version: 1.6.0 Is debug build: False CUDA used to build PyTorch: 10.2 ROCM used to build PyTorch: N/A OS: Ubuntu 20.04.1 LTS (x86_64) GCC version: (Ubuntu 9.3.0-17ubuntu1~20.04) 9.3.0 Clang version: Could not collect CMake version: version 3.18.2 Python version: 3.7 (64-bit runtime) Is CUDA available: True CUDA runtime version: Could not collect GPU models and configuration: GPU 0: GeForce GTX 1060 with Max-Q Design Nvidia driver version: 450.66 cuDNN version: /usr/lib/x86_64-linux-gnu/libcudnn.so.7.6.1 HIP runtime version: N/A MIOpen runtime version: N/A Versions of relevant libraries: [pip3] numpy==1.19.1 [pip3] torch==1.6.0 [pip3] torchvision==0.7.0 [conda] blas 1.0 mkl [conda] cudatoolkit 10.2.89 hfd86e86_1 [conda] mkl 2020.2 256 [conda] mkl-service 2.3.0 py37he904b0f_0 [conda] mkl_fft 1.1.0 py37h23d657b_0 [conda] mkl_random 1.1.1 py37h0573a6f_0 [conda] numpy 1.19.1 py37hbc911f0_0 [conda] numpy-base 1.19.1 py37hfa32c7d_0 [conda] torch 1.6.0 pypi_0 pypi [conda] torchvision 0.7.0 pypi_0 pypi ``` cc @vfdev-5 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `torchvision/models/segmentation/deeplabv3.py` Content: ``` 1 import torch 2 from torch import nn 3 from torch.nn import functional as F 4 5 from ._utils import _SimpleSegmentationModel 6 7 8 __all__ = ["DeepLabV3"] 9 10 11 class DeepLabV3(_SimpleSegmentationModel): 12 """ 13 Implements DeepLabV3 model from 14 `"Rethinking Atrous Convolution for Semantic Image Segmentation" 15 <https://arxiv.org/abs/1706.05587>`_. 16 17 Arguments: 18 backbone (nn.Module): the network used to compute the features for the model. 19 The backbone should return an OrderedDict[Tensor], with the key being 20 "out" for the last feature map used, and "aux" if an auxiliary classifier 21 is used. 22 classifier (nn.Module): module that takes the "out" element returned from 23 the backbone and returns a dense prediction. 24 aux_classifier (nn.Module, optional): auxiliary classifier used during training 25 """ 26 pass 27 28 29 class DeepLabHead(nn.Sequential): 30 def __init__(self, in_channels, num_classes): 31 super(DeepLabHead, self).__init__( 32 ASPP(in_channels, [12, 24, 36]), 33 nn.Conv2d(256, 256, 3, padding=1, bias=False), 34 nn.BatchNorm2d(256), 35 nn.ReLU(), 36 nn.Conv2d(256, num_classes, 1) 37 ) 38 39 40 class ASPPConv(nn.Sequential): 41 def __init__(self, in_channels, out_channels, dilation): 42 modules = [ 43 nn.Conv2d(in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False), 44 nn.BatchNorm2d(out_channels), 45 nn.ReLU() 46 ] 47 super(ASPPConv, self).__init__(*modules) 48 49 50 class ASPPPooling(nn.Sequential): 51 def __init__(self, in_channels, out_channels): 52 super(ASPPPooling, self).__init__( 53 nn.AdaptiveAvgPool2d(1), 54 nn.Conv2d(in_channels, out_channels, 1, bias=False), 55 nn.BatchNorm2d(out_channels), 56 nn.ReLU()) 57 58 def forward(self, x): 59 size = x.shape[-2:] 60 for mod in self: 61 x = mod(x) 62 return F.interpolate(x, size=size, mode='bilinear', align_corners=False) 63 64 65 class ASPP(nn.Module): 66 def __init__(self, in_channels, atrous_rates, out_channels=256): 67 super(ASPP, self).__init__() 68 modules = [] 69 modules.append(nn.Sequential( 70 nn.Conv2d(in_channels, out_channels, 1, bias=False), 71 nn.BatchNorm2d(out_channels), 72 nn.ReLU())) 73 74 rates = tuple(atrous_rates) 75 for rate in rates: 76 modules.append(ASPPConv(in_channels, out_channels, rate)) 77 78 modules.append(ASPPPooling(in_channels, out_channels)) 79 80 self.convs = nn.ModuleList(modules) 81 82 self.project = nn.Sequential( 83 nn.Conv2d(5 * out_channels, out_channels, 1, bias=False), 84 nn.BatchNorm2d(out_channels), 85 nn.ReLU(), 86 nn.Dropout(0.5)) 87 88 def forward(self, x): 89 res = [] 90 for conv in self.convs: 91 res.append(conv(x)) 92 res = torch.cat(res, dim=1) 93 return self.project(res) 94 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/torchvision/models/segmentation/deeplabv3.py b/torchvision/models/segmentation/deeplabv3.py --- a/torchvision/models/segmentation/deeplabv3.py +++ b/torchvision/models/segmentation/deeplabv3.py @@ -80,7 +80,7 @@ self.convs = nn.ModuleList(modules) self.project = nn.Sequential( - nn.Conv2d(5 * out_channels, out_channels, 1, bias=False), + nn.Conv2d(len(self.convs) * out_channels, out_channels, 1, bias=False), nn.BatchNorm2d(out_channels), nn.ReLU(), nn.Dropout(0.5))
{"golden_diff": "diff --git a/torchvision/models/segmentation/deeplabv3.py b/torchvision/models/segmentation/deeplabv3.py\n--- a/torchvision/models/segmentation/deeplabv3.py\n+++ b/torchvision/models/segmentation/deeplabv3.py\n@@ -80,7 +80,7 @@\n self.convs = nn.ModuleList(modules)\n \n self.project = nn.Sequential(\n- nn.Conv2d(5 * out_channels, out_channels, 1, bias=False),\n+ nn.Conv2d(len(self.convs) * out_channels, out_channels, 1, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(),\n nn.Dropout(0.5))\n", "issue": "The ASPP layer has a hard-coded value that only works if the atrous_rates param is of length 3\n## \ud83d\udc1b Bug\r\n\r\nThe ASPP layer has a hard-coded value that only works if the list of atrous rates provided is of length 3. The hard-coded value in question is the `5` on line 83 here:\r\nhttps://github.com/pytorch/vision/blob/master/torchvision/models/segmentation/deeplabv3.py#L82-L86\r\n\r\n## To Reproduce\r\n\r\n```python3\r\naspp = tv.models.segmentation.deeplabv3.ASPP(1, [12, 24]) # DeepLab uses [12, 24, 36], which works\r\n\r\naspp.eval()\r\nwith torch.no_grad():\r\n out = aspp(torch.empty(1, 1, 256, 256))\r\n```\r\nResults in\r\n```\r\nRuntimeError: Given groups=1, weight of size [256, 1280, 1, 1], expected input[1, 1024, 256, 256] to have 1280 channels, but got 1024 channels instead\r\n```\r\n\r\n## Expected behavior\r\n\r\nIt should work with different number of atrous rates. This can be fixed simply by replacing\r\n```python3\r\nnn.Conv2d(5 * out_channels, out_channels, 1, bias=False)\r\n```\r\nwith\r\n\r\n```python3\r\nnn.Conv2d(len(self.convs) * out_channels, out_channels, 1, bias=False)\r\n```\r\n\r\n## Environment\r\n\r\n```\r\nPyTorch version: 1.6.0\r\nIs debug build: False\r\nCUDA used to build PyTorch: 10.2\r\nROCM used to build PyTorch: N/A\r\n\r\nOS: Ubuntu 20.04.1 LTS (x86_64)\r\nGCC version: (Ubuntu 9.3.0-17ubuntu1~20.04) 9.3.0\r\nClang version: Could not collect\r\nCMake version: version 3.18.2\r\n\r\nPython version: 3.7 (64-bit runtime)\r\nIs CUDA available: True\r\nCUDA runtime version: Could not collect\r\nGPU models and configuration: GPU 0: GeForce GTX 1060 with Max-Q Design\r\nNvidia driver version: 450.66\r\ncuDNN version: /usr/lib/x86_64-linux-gnu/libcudnn.so.7.6.1\r\nHIP runtime version: N/A\r\nMIOpen runtime version: N/A\r\n\r\nVersions of relevant libraries:\r\n[pip3] numpy==1.19.1\r\n[pip3] torch==1.6.0\r\n[pip3] torchvision==0.7.0\r\n[conda] blas 1.0 mkl \r\n[conda] cudatoolkit 10.2.89 hfd86e86_1 \r\n[conda] mkl 2020.2 256 \r\n[conda] mkl-service 2.3.0 py37he904b0f_0 \r\n[conda] mkl_fft 1.1.0 py37h23d657b_0 \r\n[conda] mkl_random 1.1.1 py37h0573a6f_0 \r\n[conda] numpy 1.19.1 py37hbc911f0_0 \r\n[conda] numpy-base 1.19.1 py37hfa32c7d_0 \r\n[conda] torch 1.6.0 pypi_0 pypi\r\n[conda] torchvision 0.7.0 pypi_0 pypi\r\n```\r\n\n\ncc @vfdev-5\n", "before_files": [{"content": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom ._utils import _SimpleSegmentationModel\n\n\n__all__ = [\"DeepLabV3\"]\n\n\nclass DeepLabV3(_SimpleSegmentationModel):\n \"\"\"\n Implements DeepLabV3 model from\n `\"Rethinking Atrous Convolution for Semantic Image Segmentation\"\n <https://arxiv.org/abs/1706.05587>`_.\n\n Arguments:\n backbone (nn.Module): the network used to compute the features for the model.\n The backbone should return an OrderedDict[Tensor], with the key being\n \"out\" for the last feature map used, and \"aux\" if an auxiliary classifier\n is used.\n classifier (nn.Module): module that takes the \"out\" element returned from\n the backbone and returns a dense prediction.\n aux_classifier (nn.Module, optional): auxiliary classifier used during training\n \"\"\"\n pass\n\n\nclass DeepLabHead(nn.Sequential):\n def __init__(self, in_channels, num_classes):\n super(DeepLabHead, self).__init__(\n ASPP(in_channels, [12, 24, 36]),\n nn.Conv2d(256, 256, 3, padding=1, bias=False),\n nn.BatchNorm2d(256),\n nn.ReLU(),\n nn.Conv2d(256, num_classes, 1)\n )\n\n\nclass ASPPConv(nn.Sequential):\n def __init__(self, in_channels, out_channels, dilation):\n modules = [\n nn.Conv2d(in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU()\n ]\n super(ASPPConv, self).__init__(*modules)\n\n\nclass ASPPPooling(nn.Sequential):\n def __init__(self, in_channels, out_channels):\n super(ASPPPooling, self).__init__(\n nn.AdaptiveAvgPool2d(1),\n nn.Conv2d(in_channels, out_channels, 1, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU())\n\n def forward(self, x):\n size = x.shape[-2:]\n for mod in self:\n x = mod(x)\n return F.interpolate(x, size=size, mode='bilinear', align_corners=False)\n\n\nclass ASPP(nn.Module):\n def __init__(self, in_channels, atrous_rates, out_channels=256):\n super(ASPP, self).__init__()\n modules = []\n modules.append(nn.Sequential(\n nn.Conv2d(in_channels, out_channels, 1, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU()))\n\n rates = tuple(atrous_rates)\n for rate in rates:\n modules.append(ASPPConv(in_channels, out_channels, rate))\n\n modules.append(ASPPPooling(in_channels, out_channels))\n\n self.convs = nn.ModuleList(modules)\n\n self.project = nn.Sequential(\n nn.Conv2d(5 * out_channels, out_channels, 1, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(),\n nn.Dropout(0.5))\n\n def forward(self, x):\n res = []\n for conv in self.convs:\n res.append(conv(x))\n res = torch.cat(res, dim=1)\n return self.project(res)\n", "path": "torchvision/models/segmentation/deeplabv3.py"}], "after_files": [{"content": "import torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom ._utils import _SimpleSegmentationModel\n\n\n__all__ = [\"DeepLabV3\"]\n\n\nclass DeepLabV3(_SimpleSegmentationModel):\n \"\"\"\n Implements DeepLabV3 model from\n `\"Rethinking Atrous Convolution for Semantic Image Segmentation\"\n <https://arxiv.org/abs/1706.05587>`_.\n\n Arguments:\n backbone (nn.Module): the network used to compute the features for the model.\n The backbone should return an OrderedDict[Tensor], with the key being\n \"out\" for the last feature map used, and \"aux\" if an auxiliary classifier\n is used.\n classifier (nn.Module): module that takes the \"out\" element returned from\n the backbone and returns a dense prediction.\n aux_classifier (nn.Module, optional): auxiliary classifier used during training\n \"\"\"\n pass\n\n\nclass DeepLabHead(nn.Sequential):\n def __init__(self, in_channels, num_classes):\n super(DeepLabHead, self).__init__(\n ASPP(in_channels, [12, 24, 36]),\n nn.Conv2d(256, 256, 3, padding=1, bias=False),\n nn.BatchNorm2d(256),\n nn.ReLU(),\n nn.Conv2d(256, num_classes, 1)\n )\n\n\nclass ASPPConv(nn.Sequential):\n def __init__(self, in_channels, out_channels, dilation):\n modules = [\n nn.Conv2d(in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU()\n ]\n super(ASPPConv, self).__init__(*modules)\n\n\nclass ASPPPooling(nn.Sequential):\n def __init__(self, in_channels, out_channels):\n super(ASPPPooling, self).__init__(\n nn.AdaptiveAvgPool2d(1),\n nn.Conv2d(in_channels, out_channels, 1, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU())\n\n def forward(self, x):\n size = x.shape[-2:]\n for mod in self:\n x = mod(x)\n return F.interpolate(x, size=size, mode='bilinear', align_corners=False)\n\n\nclass ASPP(nn.Module):\n def __init__(self, in_channels, atrous_rates, out_channels=256):\n super(ASPP, self).__init__()\n modules = []\n modules.append(nn.Sequential(\n nn.Conv2d(in_channels, out_channels, 1, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU()))\n\n rates = tuple(atrous_rates)\n for rate in rates:\n modules.append(ASPPConv(in_channels, out_channels, rate))\n\n modules.append(ASPPPooling(in_channels, out_channels))\n\n self.convs = nn.ModuleList(modules)\n\n self.project = nn.Sequential(\n nn.Conv2d(len(self.convs) * out_channels, out_channels, 1, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(),\n nn.Dropout(0.5))\n\n def forward(self, x):\n res = []\n for conv in self.convs:\n res.append(conv(x))\n res = torch.cat(res, dim=1)\n return self.project(res)\n", "path": "torchvision/models/segmentation/deeplabv3.py"}]}
2,046
164
gh_patches_debug_14203
rasdani/github-patches
git_diff
getsentry__sentry-python-2476
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- FastApiIntegration hides request handler function name ### How do you use Sentry? Sentry Saas (sentry.io) ### Version 1.31.0 ### Steps to Reproduce Use the python sentry sdk in conjunction with FastAPI and the NewRelic python agent. View transactions in the NewRelic UI. ### Expected Result The NewRelic transaction name should show the function name handling the FastAPI route. ### Actual Result New Relic services representing our new fastapi services are seeing significant time spent in a transaction named `sentry_sdk.integrations.fastapi:patch_get_request_handler.<locals>._sentry_get_request_handler.<locals>._sentry_call`. ![image](https://github.com/getsentry/sentry-python/assets/66433626/22bb7222-fb35-45fa-8b19-cba6432b8752) The source code for `fastspi:patch_get_request_handler` wraps non-async request handlers in a local function called `_sentry_call`. This local function shares the same `__qualname__` responsible for the significant transaction time. The result is every route handler has the same name and it appears in New Relic that the service has one transaction type. Sentry sdk integrations wrap functions all the time to create sentry scope and yet we never have this problem. It appears the issue comes from a simple oversight when writing the fastapi integration. Other integrations use `sentry_sdk._functools:wraps` to wrap a function with sentry specific behavior, yet ensure the name, qualname, etc. are retrained from the wrapped function. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sentry_sdk/integrations/fastapi.py` Content: ``` 1 import asyncio 2 from copy import deepcopy 3 4 from sentry_sdk._types import TYPE_CHECKING 5 from sentry_sdk.hub import Hub, _should_send_default_pii 6 from sentry_sdk.integrations import DidNotEnable 7 from sentry_sdk.tracing import SOURCE_FOR_STYLE, TRANSACTION_SOURCE_ROUTE 8 from sentry_sdk.utils import transaction_from_function, logger 9 10 if TYPE_CHECKING: 11 from typing import Any, Callable, Dict 12 from sentry_sdk.scope import Scope 13 14 try: 15 from sentry_sdk.integrations.starlette import ( 16 StarletteIntegration, 17 StarletteRequestExtractor, 18 ) 19 except DidNotEnable: 20 raise DidNotEnable("Starlette is not installed") 21 22 try: 23 import fastapi # type: ignore 24 except ImportError: 25 raise DidNotEnable("FastAPI is not installed") 26 27 28 _DEFAULT_TRANSACTION_NAME = "generic FastAPI request" 29 30 31 class FastApiIntegration(StarletteIntegration): 32 identifier = "fastapi" 33 34 @staticmethod 35 def setup_once(): 36 # type: () -> None 37 patch_get_request_handler() 38 39 40 def _set_transaction_name_and_source(scope, transaction_style, request): 41 # type: (Scope, str, Any) -> None 42 name = "" 43 44 if transaction_style == "endpoint": 45 endpoint = request.scope.get("endpoint") 46 if endpoint: 47 name = transaction_from_function(endpoint) or "" 48 49 elif transaction_style == "url": 50 route = request.scope.get("route") 51 if route: 52 path = getattr(route, "path", None) 53 if path is not None: 54 name = path 55 56 if not name: 57 name = _DEFAULT_TRANSACTION_NAME 58 source = TRANSACTION_SOURCE_ROUTE 59 else: 60 source = SOURCE_FOR_STYLE[transaction_style] 61 62 scope.set_transaction_name(name, source=source) 63 logger.debug( 64 "[FastAPI] Set transaction name and source on scope: %s / %s", name, source 65 ) 66 67 68 def patch_get_request_handler(): 69 # type: () -> None 70 old_get_request_handler = fastapi.routing.get_request_handler 71 72 def _sentry_get_request_handler(*args, **kwargs): 73 # type: (*Any, **Any) -> Any 74 dependant = kwargs.get("dependant") 75 if ( 76 dependant 77 and dependant.call is not None 78 and not asyncio.iscoroutinefunction(dependant.call) 79 ): 80 old_call = dependant.call 81 82 def _sentry_call(*args, **kwargs): 83 # type: (*Any, **Any) -> Any 84 hub = Hub.current 85 with hub.configure_scope() as sentry_scope: 86 if sentry_scope.profile is not None: 87 sentry_scope.profile.update_active_thread_id() 88 return old_call(*args, **kwargs) 89 90 dependant.call = _sentry_call 91 92 old_app = old_get_request_handler(*args, **kwargs) 93 94 async def _sentry_app(*args, **kwargs): 95 # type: (*Any, **Any) -> Any 96 hub = Hub.current 97 integration = hub.get_integration(FastApiIntegration) 98 if integration is None: 99 return await old_app(*args, **kwargs) 100 101 with hub.configure_scope() as sentry_scope: 102 request = args[0] 103 104 _set_transaction_name_and_source( 105 sentry_scope, integration.transaction_style, request 106 ) 107 108 extractor = StarletteRequestExtractor(request) 109 info = await extractor.extract_request_info() 110 111 def _make_request_event_processor(req, integration): 112 # type: (Any, Any) -> Callable[[Dict[str, Any], Dict[str, Any]], Dict[str, Any]] 113 def event_processor(event, hint): 114 # type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any] 115 116 # Extract information from request 117 request_info = event.get("request", {}) 118 if info: 119 if "cookies" in info and _should_send_default_pii(): 120 request_info["cookies"] = info["cookies"] 121 if "data" in info: 122 request_info["data"] = info["data"] 123 event["request"] = deepcopy(request_info) 124 125 return event 126 127 return event_processor 128 129 sentry_scope._name = FastApiIntegration.identifier 130 sentry_scope.add_event_processor( 131 _make_request_event_processor(request, integration) 132 ) 133 134 return await old_app(*args, **kwargs) 135 136 return _sentry_app 137 138 fastapi.routing.get_request_handler = _sentry_get_request_handler 139 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sentry_sdk/integrations/fastapi.py b/sentry_sdk/integrations/fastapi.py --- a/sentry_sdk/integrations/fastapi.py +++ b/sentry_sdk/integrations/fastapi.py @@ -1,6 +1,7 @@ import asyncio from copy import deepcopy +from sentry_sdk._functools import wraps from sentry_sdk._types import TYPE_CHECKING from sentry_sdk.hub import Hub, _should_send_default_pii from sentry_sdk.integrations import DidNotEnable @@ -79,6 +80,7 @@ ): old_call = dependant.call + @wraps(old_call) def _sentry_call(*args, **kwargs): # type: (*Any, **Any) -> Any hub = Hub.current
{"golden_diff": "diff --git a/sentry_sdk/integrations/fastapi.py b/sentry_sdk/integrations/fastapi.py\n--- a/sentry_sdk/integrations/fastapi.py\n+++ b/sentry_sdk/integrations/fastapi.py\n@@ -1,6 +1,7 @@\n import asyncio\n from copy import deepcopy\n \n+from sentry_sdk._functools import wraps\n from sentry_sdk._types import TYPE_CHECKING\n from sentry_sdk.hub import Hub, _should_send_default_pii\n from sentry_sdk.integrations import DidNotEnable\n@@ -79,6 +80,7 @@\n ):\n old_call = dependant.call\n \n+ @wraps(old_call)\n def _sentry_call(*args, **kwargs):\n # type: (*Any, **Any) -> Any\n hub = Hub.current\n", "issue": "FastApiIntegration hides request handler function name\n### How do you use Sentry?\n\nSentry Saas (sentry.io)\n\n### Version\n\n1.31.0\n\n### Steps to Reproduce\n\nUse the python sentry sdk in conjunction with FastAPI and the NewRelic python agent. \r\nView transactions in the NewRelic UI. \n\n### Expected Result\n\nThe NewRelic transaction name should show the function name handling the FastAPI route. \n\n### Actual Result\n\nNew Relic services representing our new fastapi services are seeing significant time spent in a transaction named `sentry_sdk.integrations.fastapi:patch_get_request_handler.<locals>._sentry_get_request_handler.<locals>._sentry_call`.\r\n\r\n![image](https://github.com/getsentry/sentry-python/assets/66433626/22bb7222-fb35-45fa-8b19-cba6432b8752)\r\n\r\nThe source code for `fastspi:patch_get_request_handler` wraps non-async request handlers in a local function called `_sentry_call`. This local function shares the same `__qualname__` responsible for the significant transaction time. The result is every route handler has the same name and it appears in New Relic that the service has one transaction type. \r\n\r\nSentry sdk integrations wrap functions all the time to create sentry scope and yet we never have this problem. It appears the issue comes from a simple oversight when writing the fastapi integration. Other integrations use `sentry_sdk._functools:wraps` to wrap a function with sentry specific behavior, yet ensure the name, qualname, etc. are retrained from the wrapped function. \n", "before_files": [{"content": "import asyncio\nfrom copy import deepcopy\n\nfrom sentry_sdk._types import TYPE_CHECKING\nfrom sentry_sdk.hub import Hub, _should_send_default_pii\nfrom sentry_sdk.integrations import DidNotEnable\nfrom sentry_sdk.tracing import SOURCE_FOR_STYLE, TRANSACTION_SOURCE_ROUTE\nfrom sentry_sdk.utils import transaction_from_function, logger\n\nif TYPE_CHECKING:\n from typing import Any, Callable, Dict\n from sentry_sdk.scope import Scope\n\ntry:\n from sentry_sdk.integrations.starlette import (\n StarletteIntegration,\n StarletteRequestExtractor,\n )\nexcept DidNotEnable:\n raise DidNotEnable(\"Starlette is not installed\")\n\ntry:\n import fastapi # type: ignore\nexcept ImportError:\n raise DidNotEnable(\"FastAPI is not installed\")\n\n\n_DEFAULT_TRANSACTION_NAME = \"generic FastAPI request\"\n\n\nclass FastApiIntegration(StarletteIntegration):\n identifier = \"fastapi\"\n\n @staticmethod\n def setup_once():\n # type: () -> None\n patch_get_request_handler()\n\n\ndef _set_transaction_name_and_source(scope, transaction_style, request):\n # type: (Scope, str, Any) -> None\n name = \"\"\n\n if transaction_style == \"endpoint\":\n endpoint = request.scope.get(\"endpoint\")\n if endpoint:\n name = transaction_from_function(endpoint) or \"\"\n\n elif transaction_style == \"url\":\n route = request.scope.get(\"route\")\n if route:\n path = getattr(route, \"path\", None)\n if path is not None:\n name = path\n\n if not name:\n name = _DEFAULT_TRANSACTION_NAME\n source = TRANSACTION_SOURCE_ROUTE\n else:\n source = SOURCE_FOR_STYLE[transaction_style]\n\n scope.set_transaction_name(name, source=source)\n logger.debug(\n \"[FastAPI] Set transaction name and source on scope: %s / %s\", name, source\n )\n\n\ndef patch_get_request_handler():\n # type: () -> None\n old_get_request_handler = fastapi.routing.get_request_handler\n\n def _sentry_get_request_handler(*args, **kwargs):\n # type: (*Any, **Any) -> Any\n dependant = kwargs.get(\"dependant\")\n if (\n dependant\n and dependant.call is not None\n and not asyncio.iscoroutinefunction(dependant.call)\n ):\n old_call = dependant.call\n\n def _sentry_call(*args, **kwargs):\n # type: (*Any, **Any) -> Any\n hub = Hub.current\n with hub.configure_scope() as sentry_scope:\n if sentry_scope.profile is not None:\n sentry_scope.profile.update_active_thread_id()\n return old_call(*args, **kwargs)\n\n dependant.call = _sentry_call\n\n old_app = old_get_request_handler(*args, **kwargs)\n\n async def _sentry_app(*args, **kwargs):\n # type: (*Any, **Any) -> Any\n hub = Hub.current\n integration = hub.get_integration(FastApiIntegration)\n if integration is None:\n return await old_app(*args, **kwargs)\n\n with hub.configure_scope() as sentry_scope:\n request = args[0]\n\n _set_transaction_name_and_source(\n sentry_scope, integration.transaction_style, request\n )\n\n extractor = StarletteRequestExtractor(request)\n info = await extractor.extract_request_info()\n\n def _make_request_event_processor(req, integration):\n # type: (Any, Any) -> Callable[[Dict[str, Any], Dict[str, Any]], Dict[str, Any]]\n def event_processor(event, hint):\n # type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]\n\n # Extract information from request\n request_info = event.get(\"request\", {})\n if info:\n if \"cookies\" in info and _should_send_default_pii():\n request_info[\"cookies\"] = info[\"cookies\"]\n if \"data\" in info:\n request_info[\"data\"] = info[\"data\"]\n event[\"request\"] = deepcopy(request_info)\n\n return event\n\n return event_processor\n\n sentry_scope._name = FastApiIntegration.identifier\n sentry_scope.add_event_processor(\n _make_request_event_processor(request, integration)\n )\n\n return await old_app(*args, **kwargs)\n\n return _sentry_app\n\n fastapi.routing.get_request_handler = _sentry_get_request_handler\n", "path": "sentry_sdk/integrations/fastapi.py"}], "after_files": [{"content": "import asyncio\nfrom copy import deepcopy\n\nfrom sentry_sdk._functools import wraps\nfrom sentry_sdk._types import TYPE_CHECKING\nfrom sentry_sdk.hub import Hub, _should_send_default_pii\nfrom sentry_sdk.integrations import DidNotEnable\nfrom sentry_sdk.tracing import SOURCE_FOR_STYLE, TRANSACTION_SOURCE_ROUTE\nfrom sentry_sdk.utils import transaction_from_function, logger\n\nif TYPE_CHECKING:\n from typing import Any, Callable, Dict\n from sentry_sdk.scope import Scope\n\ntry:\n from sentry_sdk.integrations.starlette import (\n StarletteIntegration,\n StarletteRequestExtractor,\n )\nexcept DidNotEnable:\n raise DidNotEnable(\"Starlette is not installed\")\n\ntry:\n import fastapi # type: ignore\nexcept ImportError:\n raise DidNotEnable(\"FastAPI is not installed\")\n\n\n_DEFAULT_TRANSACTION_NAME = \"generic FastAPI request\"\n\n\nclass FastApiIntegration(StarletteIntegration):\n identifier = \"fastapi\"\n\n @staticmethod\n def setup_once():\n # type: () -> None\n patch_get_request_handler()\n\n\ndef _set_transaction_name_and_source(scope, transaction_style, request):\n # type: (Scope, str, Any) -> None\n name = \"\"\n\n if transaction_style == \"endpoint\":\n endpoint = request.scope.get(\"endpoint\")\n if endpoint:\n name = transaction_from_function(endpoint) or \"\"\n\n elif transaction_style == \"url\":\n route = request.scope.get(\"route\")\n if route:\n path = getattr(route, \"path\", None)\n if path is not None:\n name = path\n\n if not name:\n name = _DEFAULT_TRANSACTION_NAME\n source = TRANSACTION_SOURCE_ROUTE\n else:\n source = SOURCE_FOR_STYLE[transaction_style]\n\n scope.set_transaction_name(name, source=source)\n logger.debug(\n \"[FastAPI] Set transaction name and source on scope: %s / %s\", name, source\n )\n\n\ndef patch_get_request_handler():\n # type: () -> None\n old_get_request_handler = fastapi.routing.get_request_handler\n\n def _sentry_get_request_handler(*args, **kwargs):\n # type: (*Any, **Any) -> Any\n dependant = kwargs.get(\"dependant\")\n if (\n dependant\n and dependant.call is not None\n and not asyncio.iscoroutinefunction(dependant.call)\n ):\n old_call = dependant.call\n\n @wraps(old_call)\n def _sentry_call(*args, **kwargs):\n # type: (*Any, **Any) -> Any\n hub = Hub.current\n with hub.configure_scope() as sentry_scope:\n if sentry_scope.profile is not None:\n sentry_scope.profile.update_active_thread_id()\n return old_call(*args, **kwargs)\n\n dependant.call = _sentry_call\n\n old_app = old_get_request_handler(*args, **kwargs)\n\n async def _sentry_app(*args, **kwargs):\n # type: (*Any, **Any) -> Any\n hub = Hub.current\n integration = hub.get_integration(FastApiIntegration)\n if integration is None:\n return await old_app(*args, **kwargs)\n\n with hub.configure_scope() as sentry_scope:\n request = args[0]\n\n _set_transaction_name_and_source(\n sentry_scope, integration.transaction_style, request\n )\n\n extractor = StarletteRequestExtractor(request)\n info = await extractor.extract_request_info()\n\n def _make_request_event_processor(req, integration):\n # type: (Any, Any) -> Callable[[Dict[str, Any], Dict[str, Any]], Dict[str, Any]]\n def event_processor(event, hint):\n # type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]\n\n # Extract information from request\n request_info = event.get(\"request\", {})\n if info:\n if \"cookies\" in info and _should_send_default_pii():\n request_info[\"cookies\"] = info[\"cookies\"]\n if \"data\" in info:\n request_info[\"data\"] = info[\"data\"]\n event[\"request\"] = deepcopy(request_info)\n\n return event\n\n return event_processor\n\n sentry_scope._name = FastApiIntegration.identifier\n sentry_scope.add_event_processor(\n _make_request_event_processor(request, integration)\n )\n\n return await old_app(*args, **kwargs)\n\n return _sentry_app\n\n fastapi.routing.get_request_handler = _sentry_get_request_handler\n", "path": "sentry_sdk/integrations/fastapi.py"}]}
1,898
178
gh_patches_debug_18754
rasdani/github-patches
git_diff
streamlink__streamlink-4467
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Remove `streamlink.plugin.api.utils.itertags` [`streamlink.plugin.api.utils.itertags`](https://github.com/streamlink/streamlink/blob/3.2.0/src/streamlink/plugin/api/utils.py#L16-L28) has become obsolete ever since `lxml` was added as a dependency to Streamlink for parsing HTML. `itertags` is a hacky implementation via regexes, which is not only slow, but it's also impossible to correctly parse HTML nodes with regular expressions, so it shouldn't be used when better and much faster solutions are available. It also always requires unescaping tag values, which is annoying. We've already updated and replaced lots of plugins which were previously using it, but there are still some left: ``` $ GIT_PAGER=cat git grep -F 'from streamlink.plugin.api.utils import' a1ce471f a1ce471f:src/streamlink/plugins/cdnbg.py:from streamlink.plugin.api.utils import itertags a1ce471f:src/streamlink/plugins/facebook.py:from streamlink.plugin.api.utils import itertags a1ce471f:src/streamlink/plugins/funimationnow.py:from streamlink.plugin.api.utils import itertags a1ce471f:src/streamlink/plugins/senategov.py:from streamlink.plugin.api.utils import itertags a1ce471f:src/streamlink/plugins/vrtbe.py:from streamlink.plugin.api.utils import itertags a1ce471f:tests/test_plugin_utils.py:from streamlink.plugin.api.utils import itertags ``` - [x] cdnbg - [x] facebook - [x] funimationnow - [x] senategov - [x] vrtbe Once every last plugin has been updated, the entire `streamlink.plugin.api.utils` module can be removed, as it only contains the `itertags` function and some other useless export aliases which are not even used anymore in Streamlink's codebase. If we care about plugin-API stability (something which has never been discussed), removing this would be considered a breaking change. Since we've just dropped py36, that's something which could be included in the 4.0.0 release. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/streamlink/plugin/api/utils.py` Content: ``` 1 """Useful wrappers and other tools.""" 2 import re 3 from collections import namedtuple 4 5 from streamlink.utils.parse import parse_json, parse_qsd as parse_query, parse_xml 6 7 __all__ = ["parse_json", "parse_xml", "parse_query"] 8 9 10 tag_re = re.compile(r'''(?=<(?P<tag>[a-zA-Z]+)(?P<attr>.*?)(?P<end>/)?>(?:(?P<inner>.*?)</\s*(?P=tag)\s*>)?)''', 11 re.MULTILINE | re.DOTALL) 12 attr_re = re.compile(r'''\s*(?P<key>[\w-]+)\s*(?:=\s*(?P<quote>["']?)(?P<value>.*?)(?P=quote)\s*)?''') 13 Tag = namedtuple("Tag", "tag attributes text") 14 15 16 def itertags(html, tag): 17 """ 18 Brute force regex based HTML tag parser. This is a rough-and-ready searcher to find HTML tags when 19 standards compliance is not required. Will find tags that are commented out, or inside script tag etc. 20 21 :param html: HTML page 22 :param tag: tag name to find 23 :return: generator with Tags 24 """ 25 for match in tag_re.finditer(html): 26 if match.group("tag") == tag: 27 attrs = {a.group("key").lower(): a.group("value") for a in attr_re.finditer(match.group("attr"))} 28 yield Tag(match.group("tag"), attrs, match.group("inner")) 29 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/streamlink/plugin/api/utils.py b/src/streamlink/plugin/api/utils.py deleted file mode 100644 --- a/src/streamlink/plugin/api/utils.py +++ /dev/null @@ -1,28 +0,0 @@ -"""Useful wrappers and other tools.""" -import re -from collections import namedtuple - -from streamlink.utils.parse import parse_json, parse_qsd as parse_query, parse_xml - -__all__ = ["parse_json", "parse_xml", "parse_query"] - - -tag_re = re.compile(r'''(?=<(?P<tag>[a-zA-Z]+)(?P<attr>.*?)(?P<end>/)?>(?:(?P<inner>.*?)</\s*(?P=tag)\s*>)?)''', - re.MULTILINE | re.DOTALL) -attr_re = re.compile(r'''\s*(?P<key>[\w-]+)\s*(?:=\s*(?P<quote>["']?)(?P<value>.*?)(?P=quote)\s*)?''') -Tag = namedtuple("Tag", "tag attributes text") - - -def itertags(html, tag): - """ - Brute force regex based HTML tag parser. This is a rough-and-ready searcher to find HTML tags when - standards compliance is not required. Will find tags that are commented out, or inside script tag etc. - - :param html: HTML page - :param tag: tag name to find - :return: generator with Tags - """ - for match in tag_re.finditer(html): - if match.group("tag") == tag: - attrs = {a.group("key").lower(): a.group("value") for a in attr_re.finditer(match.group("attr"))} - yield Tag(match.group("tag"), attrs, match.group("inner"))
{"golden_diff": "diff --git a/src/streamlink/plugin/api/utils.py b/src/streamlink/plugin/api/utils.py\ndeleted file mode 100644\n--- a/src/streamlink/plugin/api/utils.py\n+++ /dev/null\n@@ -1,28 +0,0 @@\n-\"\"\"Useful wrappers and other tools.\"\"\"\n-import re\n-from collections import namedtuple\n-\n-from streamlink.utils.parse import parse_json, parse_qsd as parse_query, parse_xml\n-\n-__all__ = [\"parse_json\", \"parse_xml\", \"parse_query\"]\n-\n-\n-tag_re = re.compile(r'''(?=<(?P<tag>[a-zA-Z]+)(?P<attr>.*?)(?P<end>/)?>(?:(?P<inner>.*?)</\\s*(?P=tag)\\s*>)?)''',\n- re.MULTILINE | re.DOTALL)\n-attr_re = re.compile(r'''\\s*(?P<key>[\\w-]+)\\s*(?:=\\s*(?P<quote>[\"']?)(?P<value>.*?)(?P=quote)\\s*)?''')\n-Tag = namedtuple(\"Tag\", \"tag attributes text\")\n-\n-\n-def itertags(html, tag):\n- \"\"\"\n- Brute force regex based HTML tag parser. This is a rough-and-ready searcher to find HTML tags when\n- standards compliance is not required. Will find tags that are commented out, or inside script tag etc.\n-\n- :param html: HTML page\n- :param tag: tag name to find\n- :return: generator with Tags\n- \"\"\"\n- for match in tag_re.finditer(html):\n- if match.group(\"tag\") == tag:\n- attrs = {a.group(\"key\").lower(): a.group(\"value\") for a in attr_re.finditer(match.group(\"attr\"))}\n- yield Tag(match.group(\"tag\"), attrs, match.group(\"inner\"))\n", "issue": "Remove `streamlink.plugin.api.utils.itertags`\n[`streamlink.plugin.api.utils.itertags`](https://github.com/streamlink/streamlink/blob/3.2.0/src/streamlink/plugin/api/utils.py#L16-L28) has become obsolete ever since `lxml` was added as a dependency to Streamlink for parsing HTML.\r\n\r\n`itertags` is a hacky implementation via regexes, which is not only slow, but it's also impossible to correctly parse HTML nodes with regular expressions, so it shouldn't be used when better and much faster solutions are available. It also always requires unescaping tag values, which is annoying.\r\n\r\nWe've already updated and replaced lots of plugins which were previously using it, but there are still some left:\r\n```\r\n$ GIT_PAGER=cat git grep -F 'from streamlink.plugin.api.utils import' a1ce471f\r\na1ce471f:src/streamlink/plugins/cdnbg.py:from streamlink.plugin.api.utils import itertags\r\na1ce471f:src/streamlink/plugins/facebook.py:from streamlink.plugin.api.utils import itertags\r\na1ce471f:src/streamlink/plugins/funimationnow.py:from streamlink.plugin.api.utils import itertags\r\na1ce471f:src/streamlink/plugins/senategov.py:from streamlink.plugin.api.utils import itertags\r\na1ce471f:src/streamlink/plugins/vrtbe.py:from streamlink.plugin.api.utils import itertags\r\na1ce471f:tests/test_plugin_utils.py:from streamlink.plugin.api.utils import itertags\r\n```\r\n\r\n- [x] cdnbg\r\n- [x] facebook\r\n- [x] funimationnow\r\n- [x] senategov\r\n- [x] vrtbe\r\n\r\nOnce every last plugin has been updated, the entire `streamlink.plugin.api.utils` module can be removed, as it only contains the `itertags` function and some other useless export aliases which are not even used anymore in Streamlink's codebase.\r\n\r\nIf we care about plugin-API stability (something which has never been discussed), removing this would be considered a breaking change. Since we've just dropped py36, that's something which could be included in the 4.0.0 release.\n", "before_files": [{"content": "\"\"\"Useful wrappers and other tools.\"\"\"\nimport re\nfrom collections import namedtuple\n\nfrom streamlink.utils.parse import parse_json, parse_qsd as parse_query, parse_xml\n\n__all__ = [\"parse_json\", \"parse_xml\", \"parse_query\"]\n\n\ntag_re = re.compile(r'''(?=<(?P<tag>[a-zA-Z]+)(?P<attr>.*?)(?P<end>/)?>(?:(?P<inner>.*?)</\\s*(?P=tag)\\s*>)?)''',\n re.MULTILINE | re.DOTALL)\nattr_re = re.compile(r'''\\s*(?P<key>[\\w-]+)\\s*(?:=\\s*(?P<quote>[\"']?)(?P<value>.*?)(?P=quote)\\s*)?''')\nTag = namedtuple(\"Tag\", \"tag attributes text\")\n\n\ndef itertags(html, tag):\n \"\"\"\n Brute force regex based HTML tag parser. This is a rough-and-ready searcher to find HTML tags when\n standards compliance is not required. Will find tags that are commented out, or inside script tag etc.\n\n :param html: HTML page\n :param tag: tag name to find\n :return: generator with Tags\n \"\"\"\n for match in tag_re.finditer(html):\n if match.group(\"tag\") == tag:\n attrs = {a.group(\"key\").lower(): a.group(\"value\") for a in attr_re.finditer(match.group(\"attr\"))}\n yield Tag(match.group(\"tag\"), attrs, match.group(\"inner\"))\n", "path": "src/streamlink/plugin/api/utils.py"}], "after_files": [{"content": null, "path": "src/streamlink/plugin/api/utils.py"}]}
1,136
407
gh_patches_debug_3218
rasdani/github-patches
git_diff
translate__pootle-6745
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Init from templates doesnt create directories correctly ### Steps to reproduce: - create a new project which has subdirectories - load templates from fs - init a new tp from admin ui ### Results (Expected/Actual): - new dirs/stores are created, but the dirs dont have the correct link to their tp ### Environment (i.e. 'pootle --version', DB, OS, Browser): 2.9 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pootle/apps/pootle_translationproject/models.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # 3 # Copyright (C) Pootle contributors. 4 # 5 # This file is a part of the Pootle project. It is distributed under the GPL3 6 # or later license. See the LICENSE file for a copy of the license and the 7 # AUTHORS file for copyright and authorship information. 8 9 import logging 10 import posixpath 11 from pathlib import PurePosixPath 12 13 from django.contrib.contenttypes.fields import GenericRelation 14 from django.db import models 15 from django.urls import reverse 16 from django.utils.functional import cached_property 17 18 from pootle.core.delegate import data_tool 19 from pootle.core.mixins import CachedTreeItem 20 from pootle.core.url_helpers import get_editor_filter, split_pootle_path 21 from pootle_app.models.directory import Directory 22 from pootle_checks.constants import EXCLUDED_FILTERS 23 from pootle_language.models import Language 24 from pootle_project.models import Project 25 from pootle_revision.models import Revision 26 from staticpages.models import StaticPage 27 28 29 logger = logging.getLogger(__name__) 30 31 32 class TranslationProjectManager(models.Manager): 33 34 def get_terminology_project(self, language_id): 35 # FIXME: the code below currently uses the same approach to determine 36 # the 'terminology' kind of a project as 'Project.is_terminology()', 37 # which means it checks the value of 'checkstyle' field 38 # (see pootle_project/models.py:240). 39 # 40 # This should probably be replaced in the future with a dedicated 41 # project property. 42 return self.get(language=language_id, 43 project__checkstyle='terminology') 44 45 def live(self): 46 """Filters translation projects that have non-obsolete directories.""" 47 return self.filter(directory__obsolete=False) 48 49 def for_user(self, user, select_related=None): 50 """Filters translation projects for a specific user. 51 52 - Admins always get all translation projects. 53 - Regular users only get enabled translation projects 54 accessible to them. 55 56 :param user: The user for whom the translation projects need to be 57 retrieved for. 58 :return: A filtered queryset with `TranslationProject`s for `user`. 59 """ 60 qs = self.live() 61 if select_related is not None: 62 qs = qs.select_related(*select_related) 63 64 if user.is_superuser: 65 return qs 66 67 return qs.filter( 68 project__disabled=False, 69 project__code__in=Project.accessible_by_user(user)) 70 71 def get_for_user(self, user, project_code, language_code, 72 select_related=None): 73 """Gets a `language_code`/`project_code` translation project 74 for a specific `user`. 75 76 - Admins can get the translation project even 77 if its project is disabled. 78 - Regular users only get a translation project 79 if its project isn't disabled and it is accessible to them. 80 81 :param user: The user for whom the translation project needs 82 to be retrieved. 83 :param project_code: The code of a project for the TP to retrieve. 84 :param language_code: The code of the language fro the TP to retrieve. 85 :return: The `TranslationProject` matching the params, raises 86 otherwise. 87 """ 88 return self.for_user( 89 user, select_related).get( 90 project__code=project_code, 91 language__code=language_code) 92 93 94 class TranslationProject(models.Model, CachedTreeItem): 95 96 language = models.ForeignKey( 97 Language, db_index=False, on_delete=models.CASCADE) 98 project = models.ForeignKey( 99 Project, db_index=True, on_delete=models.CASCADE) 100 directory = models.OneToOneField( 101 Directory, db_index=True, editable=False, on_delete=models.CASCADE) 102 pootle_path = models.CharField(max_length=255, null=False, unique=True, 103 db_index=True, editable=False) 104 creation_time = models.DateTimeField(auto_now_add=True, db_index=True, 105 editable=False, null=True) 106 revisions = GenericRelation(Revision) 107 108 objects = TranslationProjectManager() 109 110 class Meta(object): 111 unique_together = ( 112 ('language', 'project'), 113 ('project', 'language')) 114 db_table = 'pootle_app_translationproject' 115 # disabled objects are hidden for related objects too 116 base_manager_name = 'objects' 117 118 @cached_property 119 def code(self): 120 return u'-'.join([self.language.code, self.project.code]) 121 122 @cached_property 123 def data_tool(self): 124 return data_tool.get(self.__class__)(self) 125 126 # # # # # # # # # # # # # # Properties # # # # # # # # # # # # # # # # # # 127 128 @property 129 def name(self): 130 # TODO: See if `self.fullname` can be removed 131 return self.fullname 132 133 @property 134 def fullname(self): 135 return "%s [%s]" % (self.project.fullname, self.language.name) 136 137 @property 138 def checker(self): 139 from translate.filters import checks 140 checkerclasses = [ 141 checks.projectcheckers.get( 142 self.project.checkstyle, 143 checks.StandardChecker)] 144 return checks.TeeChecker(checkerclasses=checkerclasses, 145 excludefilters=EXCLUDED_FILTERS, 146 errorhandler=self.filtererrorhandler, 147 languagecode=self.language.code) 148 149 @property 150 def disabled(self): 151 return self.project.disabled 152 153 @cached_property 154 def templates_tp(self): 155 return self.project.get_template_translationproject() 156 157 @property 158 def is_template_project(self): 159 return self == self.templates_tp 160 161 # # # # # # # # # # # # # # Methods # # # # # # # # # # # # # # # # # # # 162 163 def __unicode__(self): 164 return self.pootle_path 165 166 def __init__(self, *args, **kwargs): 167 super(TranslationProject, self).__init__(*args, **kwargs) 168 169 def save(self, *args, **kwargs): 170 self.directory = ( 171 self.language.directory.get_or_make_subdir(self.project.code)) 172 self.pootle_path = self.directory.pootle_path 173 super(TranslationProject, self).save(*args, **kwargs) 174 if self.directory.tp_id != self.pk: 175 self.directory.tp = self 176 self.directory.save() 177 178 def delete(self, *args, **kwargs): 179 directory = self.directory 180 181 super(TranslationProject, self).delete(*args, **kwargs) 182 directory.delete() 183 184 def get_absolute_url(self): 185 return reverse( 186 'pootle-tp-browse', 187 args=split_pootle_path(self.pootle_path)[:-1]) 188 189 def get_translate_url(self, **kwargs): 190 return u''.join( 191 [reverse("pootle-tp-translate", 192 args=split_pootle_path(self.pootle_path)[:-1]), 193 get_editor_filter(**kwargs)]) 194 195 def get_announcement(self, user=None): 196 """Return the related announcement, if any.""" 197 return StaticPage.get_announcement_for(self.pootle_path, user) 198 199 def filtererrorhandler(self, functionname, str1, str2, e): 200 logger.error( 201 u"Error in filter %s: %r, %r, %s", 202 functionname, 203 str1, 204 str2, e) 205 return False 206 207 def is_accessible_by(self, user): 208 """Returns `True` if the current translation project is accessible 209 by `user`. 210 """ 211 if user.is_superuser: 212 return True 213 214 return self.project.code in Project.accessible_by_user(user) 215 216 def can_be_inited_from_templates(self): 217 """Returns `True` if the current translation project hasn't been 218 saved yet and can be initialized from templates. 219 """ 220 221 # This method checks if the current translation project directory 222 # doesn't exist. So it won't work if the translation project is already 223 # saved the database because the translation project directory is 224 # auto-created in `save()` method. 225 return ( 226 not self.is_template_project 227 and self.templates_tp is not None) 228 229 def create_parent_dirs(self, pootle_path): 230 parent = self.directory 231 dirs_to_create = [] 232 for path in PurePosixPath(pootle_path).parents: 233 path = posixpath.join(str(path), "") 234 if path == self.pootle_path: 235 break 236 dirs_to_create.append(path) 237 for path in reversed(dirs_to_create): 238 parent, __ = Directory.objects.get_or_create( 239 pootle_path=path, 240 parent=parent, 241 name=posixpath.basename(path.rstrip("/"))) 242 return parent 243 244 def init_store_from_template(self, template_store): 245 """Initialize a new file for `self` using `template_store`. 246 """ 247 pootle_path = posixpath.join( 248 self.pootle_path.rstrip("/"), 249 template_store.tp_path.lstrip("/")) 250 pootle_path = ".".join( 251 [posixpath.splitext(pootle_path)[0], 252 template_store.filetype.extension.name]) 253 name = posixpath.basename(pootle_path) 254 if name in ["template.po", "templates.po"]: 255 # gnu-style layout 256 # use language code instead of template name 257 name = ".".join( 258 [self.language.code, 259 template_store.filetype.extension.name]) 260 dirname = posixpath.dirname(pootle_path) 261 pootle_path = posixpath.join(dirname, name) 262 if not self.stores.filter(pootle_path=pootle_path).exists(): 263 return self.stores.create( 264 parent=self.create_parent_dirs(pootle_path), 265 pootle_path=pootle_path, 266 name=name) 267 268 def init_from_templates(self): 269 """Initializes the current translation project files using 270 the templates TP ones. 271 """ 272 template_stores = self.templates_tp.stores.live().select_related( 273 "filetype__template_extension", 274 "filetype__extension").order_by("creation_time") 275 for template_store in template_stores.iterator(): 276 new_store = self.init_store_from_template(template_store) 277 if new_store: 278 new_store.update( 279 new_store.deserialize(template_store.serialize())) 280 281 # # # TreeItem 282 def get_children(self): 283 return self.directory.children 284 285 def get_parents(self): 286 return [self.project] 287 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pootle/apps/pootle_translationproject/models.py b/pootle/apps/pootle_translationproject/models.py --- a/pootle/apps/pootle_translationproject/models.py +++ b/pootle/apps/pootle_translationproject/models.py @@ -238,6 +238,7 @@ parent, __ = Directory.objects.get_or_create( pootle_path=path, parent=parent, + tp=self, name=posixpath.basename(path.rstrip("/"))) return parent
{"golden_diff": "diff --git a/pootle/apps/pootle_translationproject/models.py b/pootle/apps/pootle_translationproject/models.py\n--- a/pootle/apps/pootle_translationproject/models.py\n+++ b/pootle/apps/pootle_translationproject/models.py\n@@ -238,6 +238,7 @@\n parent, __ = Directory.objects.get_or_create(\n pootle_path=path,\n parent=parent,\n+ tp=self,\n name=posixpath.basename(path.rstrip(\"/\")))\n return parent\n", "issue": "Init from templates doesnt create directories correctly\n### Steps to reproduce:\r\n\r\n- create a new project which has subdirectories\r\n- load templates from fs\r\n- init a new tp from admin ui\r\n\r\n### Results (Expected/Actual):\r\n\r\n- new dirs/stores are created, but the dirs dont have the correct link to their tp\r\n\r\n### Environment (i.e. 'pootle --version', DB, OS, Browser):\r\n\r\n2.9\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport logging\nimport posixpath\nfrom pathlib import PurePosixPath\n\nfrom django.contrib.contenttypes.fields import GenericRelation\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils.functional import cached_property\n\nfrom pootle.core.delegate import data_tool\nfrom pootle.core.mixins import CachedTreeItem\nfrom pootle.core.url_helpers import get_editor_filter, split_pootle_path\nfrom pootle_app.models.directory import Directory\nfrom pootle_checks.constants import EXCLUDED_FILTERS\nfrom pootle_language.models import Language\nfrom pootle_project.models import Project\nfrom pootle_revision.models import Revision\nfrom staticpages.models import StaticPage\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass TranslationProjectManager(models.Manager):\n\n def get_terminology_project(self, language_id):\n # FIXME: the code below currently uses the same approach to determine\n # the 'terminology' kind of a project as 'Project.is_terminology()',\n # which means it checks the value of 'checkstyle' field\n # (see pootle_project/models.py:240).\n #\n # This should probably be replaced in the future with a dedicated\n # project property.\n return self.get(language=language_id,\n project__checkstyle='terminology')\n\n def live(self):\n \"\"\"Filters translation projects that have non-obsolete directories.\"\"\"\n return self.filter(directory__obsolete=False)\n\n def for_user(self, user, select_related=None):\n \"\"\"Filters translation projects for a specific user.\n\n - Admins always get all translation projects.\n - Regular users only get enabled translation projects\n accessible to them.\n\n :param user: The user for whom the translation projects need to be\n retrieved for.\n :return: A filtered queryset with `TranslationProject`s for `user`.\n \"\"\"\n qs = self.live()\n if select_related is not None:\n qs = qs.select_related(*select_related)\n\n if user.is_superuser:\n return qs\n\n return qs.filter(\n project__disabled=False,\n project__code__in=Project.accessible_by_user(user))\n\n def get_for_user(self, user, project_code, language_code,\n select_related=None):\n \"\"\"Gets a `language_code`/`project_code` translation project\n for a specific `user`.\n\n - Admins can get the translation project even\n if its project is disabled.\n - Regular users only get a translation project\n if its project isn't disabled and it is accessible to them.\n\n :param user: The user for whom the translation project needs\n to be retrieved.\n :param project_code: The code of a project for the TP to retrieve.\n :param language_code: The code of the language fro the TP to retrieve.\n :return: The `TranslationProject` matching the params, raises\n otherwise.\n \"\"\"\n return self.for_user(\n user, select_related).get(\n project__code=project_code,\n language__code=language_code)\n\n\nclass TranslationProject(models.Model, CachedTreeItem):\n\n language = models.ForeignKey(\n Language, db_index=False, on_delete=models.CASCADE)\n project = models.ForeignKey(\n Project, db_index=True, on_delete=models.CASCADE)\n directory = models.OneToOneField(\n Directory, db_index=True, editable=False, on_delete=models.CASCADE)\n pootle_path = models.CharField(max_length=255, null=False, unique=True,\n db_index=True, editable=False)\n creation_time = models.DateTimeField(auto_now_add=True, db_index=True,\n editable=False, null=True)\n revisions = GenericRelation(Revision)\n\n objects = TranslationProjectManager()\n\n class Meta(object):\n unique_together = (\n ('language', 'project'),\n ('project', 'language'))\n db_table = 'pootle_app_translationproject'\n # disabled objects are hidden for related objects too\n base_manager_name = 'objects'\n\n @cached_property\n def code(self):\n return u'-'.join([self.language.code, self.project.code])\n\n @cached_property\n def data_tool(self):\n return data_tool.get(self.__class__)(self)\n\n # # # # # # # # # # # # # # Properties # # # # # # # # # # # # # # # # # #\n\n @property\n def name(self):\n # TODO: See if `self.fullname` can be removed\n return self.fullname\n\n @property\n def fullname(self):\n return \"%s [%s]\" % (self.project.fullname, self.language.name)\n\n @property\n def checker(self):\n from translate.filters import checks\n checkerclasses = [\n checks.projectcheckers.get(\n self.project.checkstyle,\n checks.StandardChecker)]\n return checks.TeeChecker(checkerclasses=checkerclasses,\n excludefilters=EXCLUDED_FILTERS,\n errorhandler=self.filtererrorhandler,\n languagecode=self.language.code)\n\n @property\n def disabled(self):\n return self.project.disabled\n\n @cached_property\n def templates_tp(self):\n return self.project.get_template_translationproject()\n\n @property\n def is_template_project(self):\n return self == self.templates_tp\n\n # # # # # # # # # # # # # # Methods # # # # # # # # # # # # # # # # # # #\n\n def __unicode__(self):\n return self.pootle_path\n\n def __init__(self, *args, **kwargs):\n super(TranslationProject, self).__init__(*args, **kwargs)\n\n def save(self, *args, **kwargs):\n self.directory = (\n self.language.directory.get_or_make_subdir(self.project.code))\n self.pootle_path = self.directory.pootle_path\n super(TranslationProject, self).save(*args, **kwargs)\n if self.directory.tp_id != self.pk:\n self.directory.tp = self\n self.directory.save()\n\n def delete(self, *args, **kwargs):\n directory = self.directory\n\n super(TranslationProject, self).delete(*args, **kwargs)\n directory.delete()\n\n def get_absolute_url(self):\n return reverse(\n 'pootle-tp-browse',\n args=split_pootle_path(self.pootle_path)[:-1])\n\n def get_translate_url(self, **kwargs):\n return u''.join(\n [reverse(\"pootle-tp-translate\",\n args=split_pootle_path(self.pootle_path)[:-1]),\n get_editor_filter(**kwargs)])\n\n def get_announcement(self, user=None):\n \"\"\"Return the related announcement, if any.\"\"\"\n return StaticPage.get_announcement_for(self.pootle_path, user)\n\n def filtererrorhandler(self, functionname, str1, str2, e):\n logger.error(\n u\"Error in filter %s: %r, %r, %s\",\n functionname,\n str1,\n str2, e)\n return False\n\n def is_accessible_by(self, user):\n \"\"\"Returns `True` if the current translation project is accessible\n by `user`.\n \"\"\"\n if user.is_superuser:\n return True\n\n return self.project.code in Project.accessible_by_user(user)\n\n def can_be_inited_from_templates(self):\n \"\"\"Returns `True` if the current translation project hasn't been\n saved yet and can be initialized from templates.\n \"\"\"\n\n # This method checks if the current translation project directory\n # doesn't exist. So it won't work if the translation project is already\n # saved the database because the translation project directory is\n # auto-created in `save()` method.\n return (\n not self.is_template_project\n and self.templates_tp is not None)\n\n def create_parent_dirs(self, pootle_path):\n parent = self.directory\n dirs_to_create = []\n for path in PurePosixPath(pootle_path).parents:\n path = posixpath.join(str(path), \"\")\n if path == self.pootle_path:\n break\n dirs_to_create.append(path)\n for path in reversed(dirs_to_create):\n parent, __ = Directory.objects.get_or_create(\n pootle_path=path,\n parent=parent,\n name=posixpath.basename(path.rstrip(\"/\")))\n return parent\n\n def init_store_from_template(self, template_store):\n \"\"\"Initialize a new file for `self` using `template_store`.\n \"\"\"\n pootle_path = posixpath.join(\n self.pootle_path.rstrip(\"/\"),\n template_store.tp_path.lstrip(\"/\"))\n pootle_path = \".\".join(\n [posixpath.splitext(pootle_path)[0],\n template_store.filetype.extension.name])\n name = posixpath.basename(pootle_path)\n if name in [\"template.po\", \"templates.po\"]:\n # gnu-style layout\n # use language code instead of template name\n name = \".\".join(\n [self.language.code,\n template_store.filetype.extension.name])\n dirname = posixpath.dirname(pootle_path)\n pootle_path = posixpath.join(dirname, name)\n if not self.stores.filter(pootle_path=pootle_path).exists():\n return self.stores.create(\n parent=self.create_parent_dirs(pootle_path),\n pootle_path=pootle_path,\n name=name)\n\n def init_from_templates(self):\n \"\"\"Initializes the current translation project files using\n the templates TP ones.\n \"\"\"\n template_stores = self.templates_tp.stores.live().select_related(\n \"filetype__template_extension\",\n \"filetype__extension\").order_by(\"creation_time\")\n for template_store in template_stores.iterator():\n new_store = self.init_store_from_template(template_store)\n if new_store:\n new_store.update(\n new_store.deserialize(template_store.serialize()))\n\n # # # TreeItem\n def get_children(self):\n return self.directory.children\n\n def get_parents(self):\n return [self.project]\n", "path": "pootle/apps/pootle_translationproject/models.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport logging\nimport posixpath\nfrom pathlib import PurePosixPath\n\nfrom django.contrib.contenttypes.fields import GenericRelation\nfrom django.db import models\nfrom django.urls import reverse\nfrom django.utils.functional import cached_property\n\nfrom pootle.core.delegate import data_tool\nfrom pootle.core.mixins import CachedTreeItem\nfrom pootle.core.url_helpers import get_editor_filter, split_pootle_path\nfrom pootle_app.models.directory import Directory\nfrom pootle_checks.constants import EXCLUDED_FILTERS\nfrom pootle_language.models import Language\nfrom pootle_project.models import Project\nfrom pootle_revision.models import Revision\nfrom staticpages.models import StaticPage\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass TranslationProjectManager(models.Manager):\n\n def get_terminology_project(self, language_id):\n # FIXME: the code below currently uses the same approach to determine\n # the 'terminology' kind of a project as 'Project.is_terminology()',\n # which means it checks the value of 'checkstyle' field\n # (see pootle_project/models.py:240).\n #\n # This should probably be replaced in the future with a dedicated\n # project property.\n return self.get(language=language_id,\n project__checkstyle='terminology')\n\n def live(self):\n \"\"\"Filters translation projects that have non-obsolete directories.\"\"\"\n return self.filter(directory__obsolete=False)\n\n def for_user(self, user, select_related=None):\n \"\"\"Filters translation projects for a specific user.\n\n - Admins always get all translation projects.\n - Regular users only get enabled translation projects\n accessible to them.\n\n :param user: The user for whom the translation projects need to be\n retrieved for.\n :return: A filtered queryset with `TranslationProject`s for `user`.\n \"\"\"\n qs = self.live()\n if select_related is not None:\n qs = qs.select_related(*select_related)\n\n if user.is_superuser:\n return qs\n\n return qs.filter(\n project__disabled=False,\n project__code__in=Project.accessible_by_user(user))\n\n def get_for_user(self, user, project_code, language_code,\n select_related=None):\n \"\"\"Gets a `language_code`/`project_code` translation project\n for a specific `user`.\n\n - Admins can get the translation project even\n if its project is disabled.\n - Regular users only get a translation project\n if its project isn't disabled and it is accessible to them.\n\n :param user: The user for whom the translation project needs\n to be retrieved.\n :param project_code: The code of a project for the TP to retrieve.\n :param language_code: The code of the language fro the TP to retrieve.\n :return: The `TranslationProject` matching the params, raises\n otherwise.\n \"\"\"\n return self.for_user(\n user, select_related).get(\n project__code=project_code,\n language__code=language_code)\n\n\nclass TranslationProject(models.Model, CachedTreeItem):\n\n language = models.ForeignKey(\n Language, db_index=False, on_delete=models.CASCADE)\n project = models.ForeignKey(\n Project, db_index=True, on_delete=models.CASCADE)\n directory = models.OneToOneField(\n Directory, db_index=True, editable=False, on_delete=models.CASCADE)\n pootle_path = models.CharField(max_length=255, null=False, unique=True,\n db_index=True, editable=False)\n creation_time = models.DateTimeField(auto_now_add=True, db_index=True,\n editable=False, null=True)\n revisions = GenericRelation(Revision)\n\n objects = TranslationProjectManager()\n\n class Meta(object):\n unique_together = (\n ('language', 'project'),\n ('project', 'language'))\n db_table = 'pootle_app_translationproject'\n # disabled objects are hidden for related objects too\n base_manager_name = 'objects'\n\n @cached_property\n def code(self):\n return u'-'.join([self.language.code, self.project.code])\n\n @cached_property\n def data_tool(self):\n return data_tool.get(self.__class__)(self)\n\n # # # # # # # # # # # # # # Properties # # # # # # # # # # # # # # # # # #\n\n @property\n def name(self):\n # TODO: See if `self.fullname` can be removed\n return self.fullname\n\n @property\n def fullname(self):\n return \"%s [%s]\" % (self.project.fullname, self.language.name)\n\n @property\n def checker(self):\n from translate.filters import checks\n checkerclasses = [\n checks.projectcheckers.get(\n self.project.checkstyle,\n checks.StandardChecker)]\n return checks.TeeChecker(checkerclasses=checkerclasses,\n excludefilters=EXCLUDED_FILTERS,\n errorhandler=self.filtererrorhandler,\n languagecode=self.language.code)\n\n @property\n def disabled(self):\n return self.project.disabled\n\n @cached_property\n def templates_tp(self):\n return self.project.get_template_translationproject()\n\n @property\n def is_template_project(self):\n return self == self.templates_tp\n\n # # # # # # # # # # # # # # Methods # # # # # # # # # # # # # # # # # # #\n\n def __unicode__(self):\n return self.pootle_path\n\n def __init__(self, *args, **kwargs):\n super(TranslationProject, self).__init__(*args, **kwargs)\n\n def save(self, *args, **kwargs):\n self.directory = (\n self.language.directory.get_or_make_subdir(self.project.code))\n self.pootle_path = self.directory.pootle_path\n super(TranslationProject, self).save(*args, **kwargs)\n if self.directory.tp_id != self.pk:\n self.directory.tp = self\n self.directory.save()\n\n def delete(self, *args, **kwargs):\n directory = self.directory\n\n super(TranslationProject, self).delete(*args, **kwargs)\n directory.delete()\n\n def get_absolute_url(self):\n return reverse(\n 'pootle-tp-browse',\n args=split_pootle_path(self.pootle_path)[:-1])\n\n def get_translate_url(self, **kwargs):\n return u''.join(\n [reverse(\"pootle-tp-translate\",\n args=split_pootle_path(self.pootle_path)[:-1]),\n get_editor_filter(**kwargs)])\n\n def get_announcement(self, user=None):\n \"\"\"Return the related announcement, if any.\"\"\"\n return StaticPage.get_announcement_for(self.pootle_path, user)\n\n def filtererrorhandler(self, functionname, str1, str2, e):\n logger.error(\n u\"Error in filter %s: %r, %r, %s\",\n functionname,\n str1,\n str2, e)\n return False\n\n def is_accessible_by(self, user):\n \"\"\"Returns `True` if the current translation project is accessible\n by `user`.\n \"\"\"\n if user.is_superuser:\n return True\n\n return self.project.code in Project.accessible_by_user(user)\n\n def can_be_inited_from_templates(self):\n \"\"\"Returns `True` if the current translation project hasn't been\n saved yet and can be initialized from templates.\n \"\"\"\n\n # This method checks if the current translation project directory\n # doesn't exist. So it won't work if the translation project is already\n # saved the database because the translation project directory is\n # auto-created in `save()` method.\n return (\n not self.is_template_project\n and self.templates_tp is not None)\n\n def create_parent_dirs(self, pootle_path):\n parent = self.directory\n dirs_to_create = []\n for path in PurePosixPath(pootle_path).parents:\n path = posixpath.join(str(path), \"\")\n if path == self.pootle_path:\n break\n dirs_to_create.append(path)\n for path in reversed(dirs_to_create):\n parent, __ = Directory.objects.get_or_create(\n pootle_path=path,\n parent=parent,\n tp=self,\n name=posixpath.basename(path.rstrip(\"/\")))\n return parent\n\n def init_store_from_template(self, template_store):\n \"\"\"Initialize a new file for `self` using `template_store`.\n \"\"\"\n pootle_path = posixpath.join(\n self.pootle_path.rstrip(\"/\"),\n template_store.tp_path.lstrip(\"/\"))\n pootle_path = \".\".join(\n [posixpath.splitext(pootle_path)[0],\n template_store.filetype.extension.name])\n name = posixpath.basename(pootle_path)\n if name in [\"template.po\", \"templates.po\"]:\n # gnu-style layout\n # use language code instead of template name\n name = \".\".join(\n [self.language.code,\n template_store.filetype.extension.name])\n dirname = posixpath.dirname(pootle_path)\n pootle_path = posixpath.join(dirname, name)\n if not self.stores.filter(pootle_path=pootle_path).exists():\n return self.stores.create(\n parent=self.create_parent_dirs(pootle_path),\n pootle_path=pootle_path,\n name=name)\n\n def init_from_templates(self):\n \"\"\"Initializes the current translation project files using\n the templates TP ones.\n \"\"\"\n template_stores = self.templates_tp.stores.live().select_related(\n \"filetype__template_extension\",\n \"filetype__extension\").order_by(\"creation_time\")\n for template_store in template_stores.iterator():\n new_store = self.init_store_from_template(template_store)\n if new_store:\n new_store.update(\n new_store.deserialize(template_store.serialize()))\n\n # # # TreeItem\n def get_children(self):\n return self.directory.children\n\n def get_parents(self):\n return [self.project]\n", "path": "pootle/apps/pootle_translationproject/models.py"}]}
3,327
112
gh_patches_debug_60797
rasdani/github-patches
git_diff
engnadeau__pybotics-751
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Create a way to add your own arm model[FEATURE] ## User Story <!-- A clear and concise description of what the problem is. I want to add my own arm configuration to the list of pre-trained models. ## Potential Solutions <!-- A clear and concise description of what you want to happen. --> If there was a comment next to each line of one of the arrays containing the pre-trained model saying what exactly each value was supposed to represent, that would help. <!-- A clear and concise description of any alternative solutions or features you've considered. --> I tried looking at the spec sheets of the arms and matching up values but I couldn't figure much out. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pybotics/predefined_models.py` Content: ``` 1 """Predefined robot models.""" 2 import numpy as np # type: ignore 3 4 5 def kuka_lbr_iiwa_7() -> np.ndarray: # pragma: no cover 6 """Get KUKA LBR iiwa 7 MDH model.""" 7 return np.array( 8 [ 9 [0, 0, 0, 340], 10 [-np.pi / 2, 0, 0, 0], 11 [np.pi / 2, 0, 0, 400], 12 [np.pi / 2, 0, 0, 0], 13 [-np.pi / 2, 0, 0, 400], 14 [-np.pi / 2, 0, 0, 0], 15 [np.pi / 2, 0, 0, 126], 16 ] 17 ) 18 19 20 def mecademic_meca500() -> np.ndarray: # pragma: no cover 21 """Get Meca500 MDH model.""" 22 return np.array( 23 [ 24 [0, 0, 0, 135], 25 [-np.pi / 2, 0, -np.pi / 2, 0], 26 [0, 135, 0, 0], 27 [-np.pi / 2, 38, 0, 120], 28 [np.pi / 2, 0, 0, 0], 29 [-np.pi / 2, 0, np.pi, 72], 30 ] 31 ) 32 33 34 def puma560() -> np.ndarray: # pragma: no cover 35 """Get PUMA560 MDH model.""" 36 return np.array( 37 [ 38 [0, 0, 0, 0], 39 [-np.pi / 2, 0, 0, 0], 40 [0, 612.7, 0, 0], 41 [0, 571.6, 0, 163.9], 42 [-np.pi / 2, 0, 0, 115.7], 43 [np.pi / 2, 0, np.pi, 92.2], 44 ] 45 ) 46 47 48 def ur10() -> np.ndarray: # pragma: no cover 49 """Get UR10 MDH model.""" 50 return np.array( 51 [ 52 [0, 0, 0, 118], 53 [np.pi / 2, 0, np.pi, 0], 54 [0, 612.7, 0, 0], 55 [0, 571.6, 0, 163.9], 56 [-np.pi / 2, 0, 0, 115.7], 57 [np.pi / 2, 0, np.pi, 92.2], 58 ] 59 ) 60 61 62 def abb_irb120() -> np.ndarray: # pragma: no cover 63 """Get ABB irb120 MDH model.""" 64 return np.array( 65 [ 66 [0, 0, 0, 290], 67 [-np.pi / 2, 0, -np.pi / 2, 0], 68 [0, 270, 0, 0], 69 [-np.pi / 2, 70, 0, 302], 70 [np.pi / 2, 0, 0, 0], 71 [-np.pi / 2, 0, np.pi, 72], 72 ] 73 ) 74 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pybotics/predefined_models.py b/pybotics/predefined_models.py --- a/pybotics/predefined_models.py +++ b/pybotics/predefined_models.py @@ -1,4 +1,8 @@ -"""Predefined robot models.""" +"""Predefined robot models. + +These models correspond to the Modified Denavit–Hartenberg parameters: +https://en.wikipedia.org/wiki/Denavit%E2%80%93Hartenberg_parameters +""" import numpy as np # type: ignore
{"golden_diff": "diff --git a/pybotics/predefined_models.py b/pybotics/predefined_models.py\n--- a/pybotics/predefined_models.py\n+++ b/pybotics/predefined_models.py\n@@ -1,4 +1,8 @@\n-\"\"\"Predefined robot models.\"\"\"\n+\"\"\"Predefined robot models.\n+\n+These models correspond to the Modified Denavit\u2013Hartenberg parameters:\n+https://en.wikipedia.org/wiki/Denavit%E2%80%93Hartenberg_parameters\n+\"\"\"\n import numpy as np # type: ignore\n", "issue": "Create a way to add your own arm model[FEATURE]\n## User Story\r\n\r\n<!-- A clear and concise description of what the problem is. \r\nI want to add my own arm configuration to the list of pre-trained models.\r\n\r\n## Potential Solutions\r\n\r\n<!-- A clear and concise description of what you want to happen. -->\r\nIf there was a comment next to each line of one of the arrays containing the pre-trained model saying what exactly each value was supposed to represent, that would help.\r\n<!-- A clear and concise description of any alternative solutions or features you've considered. -->\r\nI tried looking at the spec sheets of the arms and matching up values but I couldn't figure much out. \r\n\r\n\n", "before_files": [{"content": "\"\"\"Predefined robot models.\"\"\"\nimport numpy as np # type: ignore\n\n\ndef kuka_lbr_iiwa_7() -> np.ndarray: # pragma: no cover\n \"\"\"Get KUKA LBR iiwa 7 MDH model.\"\"\"\n return np.array(\n [\n [0, 0, 0, 340],\n [-np.pi / 2, 0, 0, 0],\n [np.pi / 2, 0, 0, 400],\n [np.pi / 2, 0, 0, 0],\n [-np.pi / 2, 0, 0, 400],\n [-np.pi / 2, 0, 0, 0],\n [np.pi / 2, 0, 0, 126],\n ]\n )\n\n\ndef mecademic_meca500() -> np.ndarray: # pragma: no cover\n \"\"\"Get Meca500 MDH model.\"\"\"\n return np.array(\n [\n [0, 0, 0, 135],\n [-np.pi / 2, 0, -np.pi / 2, 0],\n [0, 135, 0, 0],\n [-np.pi / 2, 38, 0, 120],\n [np.pi / 2, 0, 0, 0],\n [-np.pi / 2, 0, np.pi, 72],\n ]\n )\n\n\ndef puma560() -> np.ndarray: # pragma: no cover\n \"\"\"Get PUMA560 MDH model.\"\"\"\n return np.array(\n [\n [0, 0, 0, 0],\n [-np.pi / 2, 0, 0, 0],\n [0, 612.7, 0, 0],\n [0, 571.6, 0, 163.9],\n [-np.pi / 2, 0, 0, 115.7],\n [np.pi / 2, 0, np.pi, 92.2],\n ]\n )\n\n\ndef ur10() -> np.ndarray: # pragma: no cover\n \"\"\"Get UR10 MDH model.\"\"\"\n return np.array(\n [\n [0, 0, 0, 118],\n [np.pi / 2, 0, np.pi, 0],\n [0, 612.7, 0, 0],\n [0, 571.6, 0, 163.9],\n [-np.pi / 2, 0, 0, 115.7],\n [np.pi / 2, 0, np.pi, 92.2],\n ]\n )\n\n\ndef abb_irb120() -> np.ndarray: # pragma: no cover\n \"\"\"Get ABB irb120 MDH model.\"\"\"\n return np.array(\n [\n [0, 0, 0, 290],\n [-np.pi / 2, 0, -np.pi / 2, 0],\n [0, 270, 0, 0],\n [-np.pi / 2, 70, 0, 302],\n [np.pi / 2, 0, 0, 0],\n [-np.pi / 2, 0, np.pi, 72],\n ]\n )\n", "path": "pybotics/predefined_models.py"}], "after_files": [{"content": "\"\"\"Predefined robot models.\n\nThese models correspond to the Modified Denavit\u2013Hartenberg parameters:\nhttps://en.wikipedia.org/wiki/Denavit%E2%80%93Hartenberg_parameters\n\"\"\"\nimport numpy as np # type: ignore\n\n\ndef kuka_lbr_iiwa_7() -> np.ndarray: # pragma: no cover\n \"\"\"Get KUKA LBR iiwa 7 MDH model.\"\"\"\n return np.array(\n [\n [0, 0, 0, 340],\n [-np.pi / 2, 0, 0, 0],\n [np.pi / 2, 0, 0, 400],\n [np.pi / 2, 0, 0, 0],\n [-np.pi / 2, 0, 0, 400],\n [-np.pi / 2, 0, 0, 0],\n [np.pi / 2, 0, 0, 126],\n ]\n )\n\n\ndef mecademic_meca500() -> np.ndarray: # pragma: no cover\n \"\"\"Get Meca500 MDH model.\"\"\"\n return np.array(\n [\n [0, 0, 0, 135],\n [-np.pi / 2, 0, -np.pi / 2, 0],\n [0, 135, 0, 0],\n [-np.pi / 2, 38, 0, 120],\n [np.pi / 2, 0, 0, 0],\n [-np.pi / 2, 0, np.pi, 72],\n ]\n )\n\n\ndef puma560() -> np.ndarray: # pragma: no cover\n \"\"\"Get PUMA560 MDH model.\"\"\"\n return np.array(\n [\n [0, 0, 0, 0],\n [-np.pi / 2, 0, 0, 0],\n [0, 612.7, 0, 0],\n [0, 571.6, 0, 163.9],\n [-np.pi / 2, 0, 0, 115.7],\n [np.pi / 2, 0, np.pi, 92.2],\n ]\n )\n\n\ndef ur10() -> np.ndarray: # pragma: no cover\n \"\"\"Get UR10 MDH model.\"\"\"\n return np.array(\n [\n [0, 0, 0, 118],\n [np.pi / 2, 0, np.pi, 0],\n [0, 612.7, 0, 0],\n [0, 571.6, 0, 163.9],\n [-np.pi / 2, 0, 0, 115.7],\n [np.pi / 2, 0, np.pi, 92.2],\n ]\n )\n\n\ndef abb_irb120() -> np.ndarray: # pragma: no cover\n \"\"\"Get ABB irb120 MDH model.\"\"\"\n return np.array(\n [\n [0, 0, 0, 290],\n [-np.pi / 2, 0, -np.pi / 2, 0],\n [0, 270, 0, 0],\n [-np.pi / 2, 70, 0, 302],\n [np.pi / 2, 0, 0, 0],\n [-np.pi / 2, 0, np.pi, 72],\n ]\n )\n", "path": "pybotics/predefined_models.py"}]}
1,335
115
gh_patches_debug_19312
rasdani/github-patches
git_diff
autogluon__autogluon-417
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- About ENAS_Scheduler While using ENAS_Scheduler Maybe: ``` tbar.set_description('avg reward: {}'.format(self.baseline)) ``` is better than ``` tbar.set_description('avg reward: {:.2f}'.format(self.baseline)) ``` in run in enas_scheduler.py, because when self.baseline=None, it will report error --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `autogluon/contrib/enas/enas_scheduler.py` Content: ``` 1 import os 2 import pickle 3 import logging 4 from collections import OrderedDict 5 from multiprocessing.pool import ThreadPool 6 7 import mxnet as mx 8 9 from ...searcher import RLSearcher 10 from ...scheduler.resource import get_gpu_count, get_cpu_count 11 from ...task.image_classification.dataset import get_built_in_dataset 12 from ...task.image_classification.utils import * 13 from ...utils import (mkdir, save, load, update_params, collect_params, DataLoader, tqdm, in_ipynb) 14 from .enas_utils import * 15 16 __all__ = ['ENAS_Scheduler'] 17 18 logger = logging.getLogger(__name__) 19 20 IMAGENET_TRAINING_SAMPLES = 1281167 21 22 class ENAS_Scheduler(object): 23 """ENAS Scheduler, which automatically creates LSTM controller based on the search spaces. 24 """ 25 def __init__(self, supernet, train_set='imagenet', val_set=None, 26 train_fn=default_train_fn, eval_fn=default_val_fn, 27 train_args={}, val_args={}, reward_fn= default_reward_fn, 28 num_gpus=0, num_cpus=4, 29 batch_size=256, epochs=120, warmup_epochs=5, 30 controller_lr=1e-3, controller_type='lstm', 31 controller_batch_size=10, ema_baseline_decay=0.95, 32 update_arch_frequency=20, checkname='./enas/checkpoint.ag', 33 plot_frequency=0, **kwargs): 34 num_cpus = get_cpu_count() if num_cpus > get_cpu_count() else num_cpus 35 num_gpus = get_gpu_count() if num_gpus > get_gpu_count() else num_gpus 36 self.supernet = supernet 37 self.train_fn = train_fn 38 self.eval_fn = eval_fn 39 self.reward_fn = reward_fn 40 self.checkname = checkname 41 self.plot_frequency = plot_frequency 42 self.epochs = epochs 43 self.warmup_epochs = warmup_epochs 44 self.controller_batch_size = controller_batch_size 45 kwspaces = self.supernet.kwspaces 46 47 self.initialize_miscs(train_set, val_set, batch_size, num_cpus, num_gpus, 48 train_args, val_args) 49 50 # create RL searcher/controller 51 self.baseline = None 52 self.ema_decay = ema_baseline_decay 53 self.searcher = RLSearcher(kwspaces, controller_type=controller_type, 54 prefetch=4, num_workers=4) 55 # controller setup 56 self.controller = self.searcher.controller 57 self.controller_optimizer = mx.gluon.Trainer( 58 self.controller.collect_params(), 'adam', 59 optimizer_params={'learning_rate': controller_lr}) 60 self.update_arch_frequency = update_arch_frequency 61 self.val_acc = 0 62 # async controller sample 63 self._worker_pool = ThreadPool(2) 64 self._data_buffer = {} 65 self._rcvd_idx = 0 66 self._sent_idx = 0 67 self._timeout = 20 68 # logging history 69 self.training_history = [] 70 self._prefetch_controller() 71 72 def initialize_miscs(self, train_set, val_set, batch_size, num_cpus, num_gpus, 73 train_args, val_args): 74 """Initialize framework related miscs, such as train/val data and train/val 75 function arguments. 76 """ 77 ctx = [mx.gpu(i) for i in range(num_gpus)] if num_gpus > 0 else [mx.cpu(0)] 78 self.supernet.collect_params().reset_ctx(ctx) 79 self.supernet.hybridize() 80 dataset_name = train_set 81 82 if isinstance(train_set, str): 83 train_set = get_built_in_dataset(dataset_name, train=True, batch_size=batch_size, 84 num_workers=num_cpus, shuffle=True) 85 val_set = get_built_in_dataset(dataset_name, train=False, batch_size=batch_size, 86 num_workers=num_cpus, shuffle=True) 87 if isinstance(train_set, gluon.data.Dataset): 88 self.train_data = DataLoader( 89 train_set, batch_size=batch_size, shuffle=True, 90 last_batch="discard", num_workers=num_cpus) 91 # very important, make shuffle for training contoller 92 self.val_data = DataLoader( 93 val_set, batch_size=batch_size, shuffle=True, 94 num_workers=num_cpus, prefetch=0, sample_times=self.controller_batch_size) 95 else: 96 self.train_data = train_set 97 self.val_data = val_set 98 iters_per_epoch = len(self.train_data) if hasattr(self.train_data, '__len__') else \ 99 IMAGENET_TRAINING_SAMPLES // batch_size 100 self.train_args = init_default_train_args(batch_size, self.supernet, self.epochs, iters_per_epoch) \ 101 if len(train_args) == 0 else train_args 102 self.val_args = val_args 103 self.val_args['ctx'] = ctx 104 self.val_args['batch_fn'] = imagenet_batch_fn if dataset_name == 'imagenet' else default_batch_fn 105 self.train_args['ctx'] = ctx 106 self.train_args['batch_fn'] = imagenet_batch_fn if dataset_name == 'imagenet' else default_batch_fn 107 self.ctx = ctx 108 109 def run(self): 110 tq = tqdm(range(self.epochs)) 111 for epoch in tq: 112 # for recordio data 113 if hasattr(self.train_data, 'reset'): self.train_data.reset() 114 tbar = tqdm(self.train_data) 115 idx = 0 116 for batch in tbar: 117 # sample network configuration 118 config = self.controller.pre_sample()[0] 119 self.supernet.sample(**config) 120 # self.train_fn(self.supernet, batch, **self.train_args) 121 self.train_fn(epoch, self.epochs, self.supernet, batch, **self.train_args) 122 mx.nd.waitall() 123 if epoch >= self.warmup_epochs and (idx % self.update_arch_frequency) == 0: 124 self.train_controller() 125 if self.plot_frequency > 0 and idx % self.plot_frequency == 0 and in_ipynb(): 126 graph = self.supernet.graph 127 graph.attr(rankdir='LR', size='8,3') 128 tbar.set_svg(graph._repr_svg_()) 129 tbar.set_description('avg reward: {:.2f}'.format(self.baseline)) 130 idx += 1 131 self.validation() 132 self.save() 133 tq.set_description('epoch {}, val_acc: {:.2f}, avg reward: {:.2f}' \ 134 .format(epoch, self.val_acc, self.baseline)) 135 136 def validation(self): 137 if hasattr(self.val_data, 'reset'): self.val_data.reset() 138 # data iter, avoid memory leak 139 it = iter(self.val_data) 140 if hasattr(it, 'reset_sample_times'): it.reset_sample_times() 141 tbar = tqdm(it) 142 # update network arc 143 config = self.controller.inference() 144 self.supernet.sample(**config) 145 metric = mx.metric.Accuracy() 146 for batch in tbar: 147 self.eval_fn(self.supernet, batch, metric=metric, **self.val_args) 148 reward = metric.get()[1] 149 tbar.set_description('Val Acc: {}'.format(reward)) 150 151 self.val_acc = reward 152 self.training_history.append(reward) 153 154 def _sample_controller(self): 155 assert self._rcvd_idx < self._sent_idx, "rcvd_idx must be smaller than sent_idx" 156 try: 157 ret = self._data_buffer.pop(self._rcvd_idx) 158 self._rcvd_idx += 1 159 return ret.get(timeout=self._timeout) 160 except Exception: 161 self._worker_pool.terminate() 162 raise 163 164 def _prefetch_controller(self): 165 async_ret = self._worker_pool.apply_async(self._async_sample, ()) 166 self._data_buffer[self._sent_idx] = async_ret 167 self._sent_idx += 1 168 169 def _async_sample(self): 170 with mx.autograd.record(): 171 # sample controller_batch_size number of configurations 172 configs, log_probs, entropies = self.controller.sample(batch_size=self.controller_batch_size, 173 with_details=True) 174 return configs, log_probs, entropies 175 176 def train_controller(self): 177 """Run multiple number of trials 178 """ 179 decay = self.ema_decay 180 if hasattr(self.val_data, 'reset'): self.val_data.reset() 181 # update 182 metric = mx.metric.Accuracy() 183 with mx.autograd.record(): 184 # sample controller_batch_size number of configurations 185 configs, log_probs, entropies = self._sample_controller() 186 for i, batch in enumerate(self.val_data): 187 if i >= self.controller_batch_size: break 188 self.supernet.sample(**configs[i]) 189 # schedule the training tasks and gather the reward 190 metric.reset() 191 self.eval_fn(self.supernet, batch, metric=metric, **self.val_args) 192 reward = metric.get()[1] 193 reward = self.reward_fn(reward, self.supernet) 194 self.baseline = reward if not self.baseline else self.baseline 195 # substract baseline 196 avg_rewards = mx.nd.array([reward - self.baseline], 197 ctx=self.controller.context) 198 # EMA baseline 199 self.baseline = decay * self.baseline + (1 - decay) * reward 200 # negative policy gradient 201 log_prob = log_probs[i] 202 log_prob = log_prob.sum() 203 loss = - log_prob * avg_rewards 204 loss = loss.sum() 205 206 # update 207 loss.backward() 208 self.controller_optimizer.step(self.controller_batch_size) 209 self._prefetch_controller() 210 211 def load(self, checkname=None): 212 checkname = checkname if checkname else self.checkname 213 state_dict = load(checkname) 214 self.load_state_dict(state_dict) 215 216 def save(self, checkname=None): 217 checkname = checkname if checkname else self.checkname 218 mkdir(os.path.dirname(checkname)) 219 save(self.state_dict(), checkname) 220 221 def state_dict(self, destination=None): 222 if destination is None: 223 destination = OrderedDict() 224 destination._metadata = OrderedDict() 225 destination['supernet_params'] = collect_params(self.supernet) 226 destination['controller_params'] = collect_params(self.controller) 227 destination['training_history'] = self.training_history 228 return destination 229 230 def load_state_dict(self, state_dict): 231 update_params(self.supernet, state_dict['supernet_params'], ctx=self.ctx) 232 update_params(self.controller, state_dict['controller_params'], ctx=self.controller.context) 233 self.training_history = state_dict['training_history'] 234 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/autogluon/contrib/enas/enas_scheduler.py b/autogluon/contrib/enas/enas_scheduler.py --- a/autogluon/contrib/enas/enas_scheduler.py +++ b/autogluon/contrib/enas/enas_scheduler.py @@ -126,12 +126,15 @@ graph = self.supernet.graph graph.attr(rankdir='LR', size='8,3') tbar.set_svg(graph._repr_svg_()) - tbar.set_description('avg reward: {:.2f}'.format(self.baseline)) + if self.baseline: + tbar.set_description('avg reward: {:.2f}'.format(self.baseline)) idx += 1 self.validation() self.save() - tq.set_description('epoch {}, val_acc: {:.2f}, avg reward: {:.2f}' \ - .format(epoch, self.val_acc, self.baseline)) + msg = 'epoch {}, val_acc: {:.2f}'.format(epoch, self.val_acc) + if self.baseline: + msg += ', avg reward: {:.2f}'.format(self.baseline) + tq.set_description(msg) def validation(self): if hasattr(self.val_data, 'reset'): self.val_data.reset()
{"golden_diff": "diff --git a/autogluon/contrib/enas/enas_scheduler.py b/autogluon/contrib/enas/enas_scheduler.py\n--- a/autogluon/contrib/enas/enas_scheduler.py\n+++ b/autogluon/contrib/enas/enas_scheduler.py\n@@ -126,12 +126,15 @@\n graph = self.supernet.graph\n graph.attr(rankdir='LR', size='8,3')\n tbar.set_svg(graph._repr_svg_())\n- tbar.set_description('avg reward: {:.2f}'.format(self.baseline))\n+ if self.baseline:\n+ tbar.set_description('avg reward: {:.2f}'.format(self.baseline))\n idx += 1\n self.validation()\n self.save()\n- tq.set_description('epoch {}, val_acc: {:.2f}, avg reward: {:.2f}' \\\n- .format(epoch, self.val_acc, self.baseline))\n+ msg = 'epoch {}, val_acc: {:.2f}'.format(epoch, self.val_acc)\n+ if self.baseline:\n+ msg += ', avg reward: {:.2f}'.format(self.baseline)\n+ tq.set_description(msg)\n \n def validation(self):\n if hasattr(self.val_data, 'reset'): self.val_data.reset()\n", "issue": "About ENAS_Scheduler\nWhile using ENAS_Scheduler\r\nMaybe\uff1a\r\n```\r\ntbar.set_description('avg reward: {}'.format(self.baseline))\r\n```\r\nis better than \r\n\r\n```\r\ntbar.set_description('avg reward: {:.2f}'.format(self.baseline))\r\n```\r\nin run in enas_scheduler.py, because when self.baseline=None, it will report error\n", "before_files": [{"content": "import os\nimport pickle\nimport logging\nfrom collections import OrderedDict\nfrom multiprocessing.pool import ThreadPool\n\nimport mxnet as mx\n\nfrom ...searcher import RLSearcher\nfrom ...scheduler.resource import get_gpu_count, get_cpu_count\nfrom ...task.image_classification.dataset import get_built_in_dataset\nfrom ...task.image_classification.utils import *\nfrom ...utils import (mkdir, save, load, update_params, collect_params, DataLoader, tqdm, in_ipynb)\nfrom .enas_utils import *\n\n__all__ = ['ENAS_Scheduler']\n\nlogger = logging.getLogger(__name__)\n\nIMAGENET_TRAINING_SAMPLES = 1281167\n\nclass ENAS_Scheduler(object):\n \"\"\"ENAS Scheduler, which automatically creates LSTM controller based on the search spaces.\n \"\"\"\n def __init__(self, supernet, train_set='imagenet', val_set=None,\n train_fn=default_train_fn, eval_fn=default_val_fn,\n train_args={}, val_args={}, reward_fn= default_reward_fn,\n num_gpus=0, num_cpus=4,\n batch_size=256, epochs=120, warmup_epochs=5,\n controller_lr=1e-3, controller_type='lstm',\n controller_batch_size=10, ema_baseline_decay=0.95,\n update_arch_frequency=20, checkname='./enas/checkpoint.ag',\n plot_frequency=0, **kwargs):\n num_cpus = get_cpu_count() if num_cpus > get_cpu_count() else num_cpus\n num_gpus = get_gpu_count() if num_gpus > get_gpu_count() else num_gpus\n self.supernet = supernet\n self.train_fn = train_fn\n self.eval_fn = eval_fn\n self.reward_fn = reward_fn\n self.checkname = checkname\n self.plot_frequency = plot_frequency\n self.epochs = epochs\n self.warmup_epochs = warmup_epochs\n self.controller_batch_size = controller_batch_size\n kwspaces = self.supernet.kwspaces\n\n self.initialize_miscs(train_set, val_set, batch_size, num_cpus, num_gpus,\n train_args, val_args)\n\n # create RL searcher/controller\n self.baseline = None\n self.ema_decay = ema_baseline_decay\n self.searcher = RLSearcher(kwspaces, controller_type=controller_type,\n prefetch=4, num_workers=4)\n # controller setup\n self.controller = self.searcher.controller\n self.controller_optimizer = mx.gluon.Trainer(\n self.controller.collect_params(), 'adam',\n optimizer_params={'learning_rate': controller_lr})\n self.update_arch_frequency = update_arch_frequency\n self.val_acc = 0\n # async controller sample\n self._worker_pool = ThreadPool(2)\n self._data_buffer = {}\n self._rcvd_idx = 0\n self._sent_idx = 0\n self._timeout = 20\n # logging history\n self.training_history = []\n self._prefetch_controller()\n\n def initialize_miscs(self, train_set, val_set, batch_size, num_cpus, num_gpus,\n train_args, val_args):\n \"\"\"Initialize framework related miscs, such as train/val data and train/val\n function arguments.\n \"\"\"\n ctx = [mx.gpu(i) for i in range(num_gpus)] if num_gpus > 0 else [mx.cpu(0)]\n self.supernet.collect_params().reset_ctx(ctx)\n self.supernet.hybridize()\n dataset_name = train_set\n\n if isinstance(train_set, str):\n train_set = get_built_in_dataset(dataset_name, train=True, batch_size=batch_size,\n num_workers=num_cpus, shuffle=True)\n val_set = get_built_in_dataset(dataset_name, train=False, batch_size=batch_size,\n num_workers=num_cpus, shuffle=True)\n if isinstance(train_set, gluon.data.Dataset):\n self.train_data = DataLoader(\n train_set, batch_size=batch_size, shuffle=True,\n last_batch=\"discard\", num_workers=num_cpus)\n # very important, make shuffle for training contoller\n self.val_data = DataLoader(\n val_set, batch_size=batch_size, shuffle=True,\n num_workers=num_cpus, prefetch=0, sample_times=self.controller_batch_size)\n else:\n self.train_data = train_set\n self.val_data = val_set\n iters_per_epoch = len(self.train_data) if hasattr(self.train_data, '__len__') else \\\n IMAGENET_TRAINING_SAMPLES // batch_size\n self.train_args = init_default_train_args(batch_size, self.supernet, self.epochs, iters_per_epoch) \\\n if len(train_args) == 0 else train_args\n self.val_args = val_args\n self.val_args['ctx'] = ctx\n self.val_args['batch_fn'] = imagenet_batch_fn if dataset_name == 'imagenet' else default_batch_fn\n self.train_args['ctx'] = ctx\n self.train_args['batch_fn'] = imagenet_batch_fn if dataset_name == 'imagenet' else default_batch_fn\n self.ctx = ctx\n\n def run(self):\n tq = tqdm(range(self.epochs))\n for epoch in tq:\n # for recordio data\n if hasattr(self.train_data, 'reset'): self.train_data.reset()\n tbar = tqdm(self.train_data)\n idx = 0\n for batch in tbar:\n # sample network configuration\n config = self.controller.pre_sample()[0]\n self.supernet.sample(**config)\n # self.train_fn(self.supernet, batch, **self.train_args)\n self.train_fn(epoch, self.epochs, self.supernet, batch, **self.train_args)\n mx.nd.waitall()\n if epoch >= self.warmup_epochs and (idx % self.update_arch_frequency) == 0:\n self.train_controller()\n if self.plot_frequency > 0 and idx % self.plot_frequency == 0 and in_ipynb():\n graph = self.supernet.graph\n graph.attr(rankdir='LR', size='8,3')\n tbar.set_svg(graph._repr_svg_())\n tbar.set_description('avg reward: {:.2f}'.format(self.baseline))\n idx += 1\n self.validation()\n self.save()\n tq.set_description('epoch {}, val_acc: {:.2f}, avg reward: {:.2f}' \\\n .format(epoch, self.val_acc, self.baseline))\n\n def validation(self):\n if hasattr(self.val_data, 'reset'): self.val_data.reset()\n # data iter, avoid memory leak\n it = iter(self.val_data)\n if hasattr(it, 'reset_sample_times'): it.reset_sample_times()\n tbar = tqdm(it)\n # update network arc\n config = self.controller.inference()\n self.supernet.sample(**config)\n metric = mx.metric.Accuracy()\n for batch in tbar:\n self.eval_fn(self.supernet, batch, metric=metric, **self.val_args)\n reward = metric.get()[1]\n tbar.set_description('Val Acc: {}'.format(reward))\n\n self.val_acc = reward\n self.training_history.append(reward)\n\n def _sample_controller(self):\n assert self._rcvd_idx < self._sent_idx, \"rcvd_idx must be smaller than sent_idx\"\n try:\n ret = self._data_buffer.pop(self._rcvd_idx)\n self._rcvd_idx += 1\n return ret.get(timeout=self._timeout)\n except Exception:\n self._worker_pool.terminate()\n raise\n\n def _prefetch_controller(self):\n async_ret = self._worker_pool.apply_async(self._async_sample, ())\n self._data_buffer[self._sent_idx] = async_ret\n self._sent_idx += 1\n\n def _async_sample(self):\n with mx.autograd.record():\n # sample controller_batch_size number of configurations\n configs, log_probs, entropies = self.controller.sample(batch_size=self.controller_batch_size,\n with_details=True)\n return configs, log_probs, entropies\n\n def train_controller(self):\n \"\"\"Run multiple number of trials\n \"\"\"\n decay = self.ema_decay\n if hasattr(self.val_data, 'reset'): self.val_data.reset()\n # update \n metric = mx.metric.Accuracy()\n with mx.autograd.record():\n # sample controller_batch_size number of configurations\n configs, log_probs, entropies = self._sample_controller()\n for i, batch in enumerate(self.val_data):\n if i >= self.controller_batch_size: break\n self.supernet.sample(**configs[i])\n # schedule the training tasks and gather the reward\n metric.reset()\n self.eval_fn(self.supernet, batch, metric=metric, **self.val_args)\n reward = metric.get()[1]\n reward = self.reward_fn(reward, self.supernet)\n self.baseline = reward if not self.baseline else self.baseline\n # substract baseline\n avg_rewards = mx.nd.array([reward - self.baseline],\n ctx=self.controller.context)\n # EMA baseline\n self.baseline = decay * self.baseline + (1 - decay) * reward\n # negative policy gradient\n log_prob = log_probs[i]\n log_prob = log_prob.sum()\n loss = - log_prob * avg_rewards\n loss = loss.sum()\n\n # update\n loss.backward()\n self.controller_optimizer.step(self.controller_batch_size)\n self._prefetch_controller()\n\n def load(self, checkname=None):\n checkname = checkname if checkname else self.checkname\n state_dict = load(checkname)\n self.load_state_dict(state_dict)\n\n def save(self, checkname=None):\n checkname = checkname if checkname else self.checkname\n mkdir(os.path.dirname(checkname))\n save(self.state_dict(), checkname)\n\n def state_dict(self, destination=None):\n if destination is None:\n destination = OrderedDict()\n destination._metadata = OrderedDict()\n destination['supernet_params'] = collect_params(self.supernet)\n destination['controller_params'] = collect_params(self.controller)\n destination['training_history'] = self.training_history\n return destination\n\n def load_state_dict(self, state_dict):\n update_params(self.supernet, state_dict['supernet_params'], ctx=self.ctx)\n update_params(self.controller, state_dict['controller_params'], ctx=self.controller.context)\n self.training_history = state_dict['training_history']\n", "path": "autogluon/contrib/enas/enas_scheduler.py"}], "after_files": [{"content": "import os\nimport pickle\nimport logging\nfrom collections import OrderedDict\nfrom multiprocessing.pool import ThreadPool\n\nimport mxnet as mx\n\nfrom ...searcher import RLSearcher\nfrom ...scheduler.resource import get_gpu_count, get_cpu_count\nfrom ...task.image_classification.dataset import get_built_in_dataset\nfrom ...task.image_classification.utils import *\nfrom ...utils import (mkdir, save, load, update_params, collect_params, DataLoader, tqdm, in_ipynb)\nfrom .enas_utils import *\n\n__all__ = ['ENAS_Scheduler']\n\nlogger = logging.getLogger(__name__)\n\nIMAGENET_TRAINING_SAMPLES = 1281167\n\nclass ENAS_Scheduler(object):\n \"\"\"ENAS Scheduler, which automatically creates LSTM controller based on the search spaces.\n \"\"\"\n def __init__(self, supernet, train_set='imagenet', val_set=None,\n train_fn=default_train_fn, eval_fn=default_val_fn,\n train_args={}, val_args={}, reward_fn= default_reward_fn,\n num_gpus=0, num_cpus=4,\n batch_size=256, epochs=120, warmup_epochs=5,\n controller_lr=1e-3, controller_type='lstm',\n controller_batch_size=10, ema_baseline_decay=0.95,\n update_arch_frequency=20, checkname='./enas/checkpoint.ag',\n plot_frequency=0, **kwargs):\n num_cpus = get_cpu_count() if num_cpus > get_cpu_count() else num_cpus\n num_gpus = get_gpu_count() if num_gpus > get_gpu_count() else num_gpus\n self.supernet = supernet\n self.train_fn = train_fn\n self.eval_fn = eval_fn\n self.reward_fn = reward_fn\n self.checkname = checkname\n self.plot_frequency = plot_frequency\n self.epochs = epochs\n self.warmup_epochs = warmup_epochs\n self.controller_batch_size = controller_batch_size\n kwspaces = self.supernet.kwspaces\n\n self.initialize_miscs(train_set, val_set, batch_size, num_cpus, num_gpus,\n train_args, val_args)\n\n # create RL searcher/controller\n self.baseline = None\n self.ema_decay = ema_baseline_decay\n self.searcher = RLSearcher(kwspaces, controller_type=controller_type,\n prefetch=4, num_workers=4)\n # controller setup\n self.controller = self.searcher.controller\n self.controller_optimizer = mx.gluon.Trainer(\n self.controller.collect_params(), 'adam',\n optimizer_params={'learning_rate': controller_lr})\n self.update_arch_frequency = update_arch_frequency\n self.val_acc = 0\n # async controller sample\n self._worker_pool = ThreadPool(2)\n self._data_buffer = {}\n self._rcvd_idx = 0\n self._sent_idx = 0\n self._timeout = 20\n # logging history\n self.training_history = []\n self._prefetch_controller()\n\n def initialize_miscs(self, train_set, val_set, batch_size, num_cpus, num_gpus,\n train_args, val_args):\n \"\"\"Initialize framework related miscs, such as train/val data and train/val\n function arguments.\n \"\"\"\n ctx = [mx.gpu(i) for i in range(num_gpus)] if num_gpus > 0 else [mx.cpu(0)]\n self.supernet.collect_params().reset_ctx(ctx)\n self.supernet.hybridize()\n dataset_name = train_set\n\n if isinstance(train_set, str):\n train_set = get_built_in_dataset(dataset_name, train=True, batch_size=batch_size,\n num_workers=num_cpus, shuffle=True)\n val_set = get_built_in_dataset(dataset_name, train=False, batch_size=batch_size,\n num_workers=num_cpus, shuffle=True)\n if isinstance(train_set, gluon.data.Dataset):\n self.train_data = DataLoader(\n train_set, batch_size=batch_size, shuffle=True,\n last_batch=\"discard\", num_workers=num_cpus)\n # very important, make shuffle for training contoller\n self.val_data = DataLoader(\n val_set, batch_size=batch_size, shuffle=True,\n num_workers=num_cpus, prefetch=0, sample_times=self.controller_batch_size)\n else:\n self.train_data = train_set\n self.val_data = val_set\n iters_per_epoch = len(self.train_data) if hasattr(self.train_data, '__len__') else \\\n IMAGENET_TRAINING_SAMPLES // batch_size\n self.train_args = init_default_train_args(batch_size, self.supernet, self.epochs, iters_per_epoch) \\\n if len(train_args) == 0 else train_args\n self.val_args = val_args\n self.val_args['ctx'] = ctx\n self.val_args['batch_fn'] = imagenet_batch_fn if dataset_name == 'imagenet' else default_batch_fn\n self.train_args['ctx'] = ctx\n self.train_args['batch_fn'] = imagenet_batch_fn if dataset_name == 'imagenet' else default_batch_fn\n self.ctx = ctx\n\n def run(self):\n tq = tqdm(range(self.epochs))\n for epoch in tq:\n # for recordio data\n if hasattr(self.train_data, 'reset'): self.train_data.reset()\n tbar = tqdm(self.train_data)\n idx = 0\n for batch in tbar:\n # sample network configuration\n config = self.controller.pre_sample()[0]\n self.supernet.sample(**config)\n # self.train_fn(self.supernet, batch, **self.train_args)\n self.train_fn(epoch, self.epochs, self.supernet, batch, **self.train_args)\n mx.nd.waitall()\n if epoch >= self.warmup_epochs and (idx % self.update_arch_frequency) == 0:\n self.train_controller()\n if self.plot_frequency > 0 and idx % self.plot_frequency == 0 and in_ipynb():\n graph = self.supernet.graph\n graph.attr(rankdir='LR', size='8,3')\n tbar.set_svg(graph._repr_svg_())\n if self.baseline:\n tbar.set_description('avg reward: {:.2f}'.format(self.baseline))\n idx += 1\n self.validation()\n self.save()\n msg = 'epoch {}, val_acc: {:.2f}'.format(epoch, self.val_acc)\n if self.baseline:\n msg += ', avg reward: {:.2f}'.format(self.baseline)\n tq.set_description(msg)\n\n def validation(self):\n if hasattr(self.val_data, 'reset'): self.val_data.reset()\n # data iter, avoid memory leak\n it = iter(self.val_data)\n if hasattr(it, 'reset_sample_times'): it.reset_sample_times()\n tbar = tqdm(it)\n # update network arc\n config = self.controller.inference()\n self.supernet.sample(**config)\n metric = mx.metric.Accuracy()\n for batch in tbar:\n self.eval_fn(self.supernet, batch, metric=metric, **self.val_args)\n reward = metric.get()[1]\n tbar.set_description('Val Acc: {}'.format(reward))\n\n self.val_acc = reward\n self.training_history.append(reward)\n\n def _sample_controller(self):\n assert self._rcvd_idx < self._sent_idx, \"rcvd_idx must be smaller than sent_idx\"\n try:\n ret = self._data_buffer.pop(self._rcvd_idx)\n self._rcvd_idx += 1\n return ret.get(timeout=self._timeout)\n except Exception:\n self._worker_pool.terminate()\n raise\n\n def _prefetch_controller(self):\n async_ret = self._worker_pool.apply_async(self._async_sample, ())\n self._data_buffer[self._sent_idx] = async_ret\n self._sent_idx += 1\n\n def _async_sample(self):\n with mx.autograd.record():\n # sample controller_batch_size number of configurations\n configs, log_probs, entropies = self.controller.sample(batch_size=self.controller_batch_size,\n with_details=True)\n return configs, log_probs, entropies\n\n def train_controller(self):\n \"\"\"Run multiple number of trials\n \"\"\"\n decay = self.ema_decay\n if hasattr(self.val_data, 'reset'): self.val_data.reset()\n # update \n metric = mx.metric.Accuracy()\n with mx.autograd.record():\n # sample controller_batch_size number of configurations\n configs, log_probs, entropies = self._sample_controller()\n for i, batch in enumerate(self.val_data):\n if i >= self.controller_batch_size: break\n self.supernet.sample(**configs[i])\n # schedule the training tasks and gather the reward\n metric.reset()\n self.eval_fn(self.supernet, batch, metric=metric, **self.val_args)\n reward = metric.get()[1]\n reward = self.reward_fn(reward, self.supernet)\n self.baseline = reward if not self.baseline else self.baseline\n # substract baseline\n avg_rewards = mx.nd.array([reward - self.baseline],\n ctx=self.controller.context)\n # EMA baseline\n self.baseline = decay * self.baseline + (1 - decay) * reward\n # negative policy gradient\n log_prob = log_probs[i]\n log_prob = log_prob.sum()\n loss = - log_prob * avg_rewards\n loss = loss.sum()\n\n # update\n loss.backward()\n self.controller_optimizer.step(self.controller_batch_size)\n self._prefetch_controller()\n\n def load(self, checkname=None):\n checkname = checkname if checkname else self.checkname\n state_dict = load(checkname)\n self.load_state_dict(state_dict)\n\n def save(self, checkname=None):\n checkname = checkname if checkname else self.checkname\n mkdir(os.path.dirname(checkname))\n save(self.state_dict(), checkname)\n\n def state_dict(self, destination=None):\n if destination is None:\n destination = OrderedDict()\n destination._metadata = OrderedDict()\n destination['supernet_params'] = collect_params(self.supernet)\n destination['controller_params'] = collect_params(self.controller)\n destination['training_history'] = self.training_history\n return destination\n\n def load_state_dict(self, state_dict):\n update_params(self.supernet, state_dict['supernet_params'], ctx=self.ctx)\n update_params(self.controller, state_dict['controller_params'], ctx=self.controller.context)\n self.training_history = state_dict['training_history']\n", "path": "autogluon/contrib/enas/enas_scheduler.py"}]}
3,171
282
gh_patches_debug_1536
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-2525
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Demo template management command unexpected args. ## Description <!-- A clear and concise description of what the bug is. --> After starting dev environment, the management command to setup the demo DB is broken. Trying to run: ```sh # docker exec -it mathesar_service_dev python manage.py setup_demo_template_db ``` results in: ``` Traceback (most recent call last): File "/code/manage.py", line 22, in <module> main() File "/code/manage.py", line 18, in main execute_from_command_line(sys.argv) File "/usr/local/lib/python3.9/site-packages/django/core/management/__init__.py", line 401, in execute_from_command_line utility.execute() File "/usr/local/lib/python3.9/site-packages/django/core/management/__init__.py", line 395, in execute self.fetch_command(subcommand).run_from_argv(self.argv) File "/usr/local/lib/python3.9/site-packages/django/core/management/base.py", line 330, in run_from_argv self.execute(*args, **cmd_options) File "/usr/local/lib/python3.9/site-packages/django/core/management/base.py", line 371, in execute output = self.handle(*args, **options) File "/code/demo/management/commands/setup_demo_template_db.py", line 15, in handle _setup_demo_template_db(*args, **options) TypeError: _setup_demo_template_db() got an unexpected keyword argument 'verbosity' ``` ## Expected behavior <!-- A clear and concise description of what you expected to happen. --> You should be able to run the command listed above successfully in the `dev` environment. ## To Reproduce <!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. --> Start the dev environment with a fresh docker state. Run the above command. ## Additional context <!-- Add any other context about the problem or screenshots here. --> The problem is in line 15 of `demo/management/commands/setup_demo_template.py`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `demo/management/commands/setup_demo_template_db.py` Content: ``` 1 from sqlalchemy import text 2 3 from django.conf import settings 4 from django.core.management import BaseCommand 5 6 from db.install import install_mathesar 7 from demo.install.datasets import load_datasets 8 from mathesar.database.base import create_mathesar_engine 9 10 11 class Command(BaseCommand): 12 help = 'Initialize the demo template database.' 13 14 def handle(self, *args, **options): 15 _setup_demo_template_db(*args, **options) 16 17 18 def _setup_demo_template_db(): 19 print("Initializing demo template database...") 20 21 template_db_name = settings.MATHESAR_DEMO_TEMPLATE 22 root_engine = create_mathesar_engine(settings.DATABASES["default"]["NAME"]) 23 with root_engine.connect() as conn: 24 conn.execution_options(isolation_level="AUTOCOMMIT") 25 conn.execute(text(f"DROP DATABASE IF EXISTS {template_db_name} WITH (FORCE)")) 26 root_engine.dispose() 27 install_mathesar( 28 database_name=template_db_name, 29 username=settings.DATABASES["default"]["USER"], 30 password=settings.DATABASES["default"]["PASSWORD"], 31 hostname=settings.DATABASES["default"]["HOST"], 32 port=settings.DATABASES["default"]["PORT"], 33 skip_confirm=True 34 ) 35 user_engine = create_mathesar_engine(template_db_name) 36 load_datasets(user_engine) 37 user_engine.dispose() 38 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/demo/management/commands/setup_demo_template_db.py b/demo/management/commands/setup_demo_template_db.py --- a/demo/management/commands/setup_demo_template_db.py +++ b/demo/management/commands/setup_demo_template_db.py @@ -12,7 +12,7 @@ help = 'Initialize the demo template database.' def handle(self, *args, **options): - _setup_demo_template_db(*args, **options) + _setup_demo_template_db() def _setup_demo_template_db():
{"golden_diff": "diff --git a/demo/management/commands/setup_demo_template_db.py b/demo/management/commands/setup_demo_template_db.py\n--- a/demo/management/commands/setup_demo_template_db.py\n+++ b/demo/management/commands/setup_demo_template_db.py\n@@ -12,7 +12,7 @@\n help = 'Initialize the demo template database.'\n \n def handle(self, *args, **options):\n- _setup_demo_template_db(*args, **options)\n+ _setup_demo_template_db()\n \n \n def _setup_demo_template_db():\n", "issue": "Demo template management command unexpected args.\n## Description\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\nAfter starting dev environment, the management command to setup the demo DB is broken. Trying to run:\r\n```sh\r\n# docker exec -it mathesar_service_dev python manage.py setup_demo_template_db\r\n```\r\nresults in:\r\n```\r\nTraceback (most recent call last):\r\n File \"/code/manage.py\", line 22, in <module>\r\n main()\r\n File \"/code/manage.py\", line 18, in main\r\n execute_from_command_line(sys.argv)\r\n File \"/usr/local/lib/python3.9/site-packages/django/core/management/__init__.py\", line 401, in execute_from_command_line\r\n utility.execute()\r\n File \"/usr/local/lib/python3.9/site-packages/django/core/management/__init__.py\", line 395, in execute\r\n self.fetch_command(subcommand).run_from_argv(self.argv)\r\n File \"/usr/local/lib/python3.9/site-packages/django/core/management/base.py\", line 330, in run_from_argv\r\n self.execute(*args, **cmd_options)\r\n File \"/usr/local/lib/python3.9/site-packages/django/core/management/base.py\", line 371, in execute\r\n output = self.handle(*args, **options)\r\n File \"/code/demo/management/commands/setup_demo_template_db.py\", line 15, in handle\r\n _setup_demo_template_db(*args, **options)\r\nTypeError: _setup_demo_template_db() got an unexpected keyword argument 'verbosity'\r\n```\r\n\r\n## Expected behavior\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\nYou should be able to run the command listed above successfully in the `dev` environment.\r\n\r\n## To Reproduce\r\n<!-- How can we recreate this bug? Please try to provide a Minimal, Complete, and Verifiable (http://stackoverflow.com/help/mcve) example if code-related. -->\r\n\r\nStart the dev environment with a fresh docker state. Run the above command.\r\n\r\n## Additional context\r\n<!-- Add any other context about the problem or screenshots here. -->\r\n\r\nThe problem is in line 15 of `demo/management/commands/setup_demo_template.py`.\n", "before_files": [{"content": "from sqlalchemy import text\n\nfrom django.conf import settings\nfrom django.core.management import BaseCommand\n\nfrom db.install import install_mathesar\nfrom demo.install.datasets import load_datasets\nfrom mathesar.database.base import create_mathesar_engine\n\n\nclass Command(BaseCommand):\n help = 'Initialize the demo template database.'\n\n def handle(self, *args, **options):\n _setup_demo_template_db(*args, **options)\n\n\ndef _setup_demo_template_db():\n print(\"Initializing demo template database...\")\n\n template_db_name = settings.MATHESAR_DEMO_TEMPLATE\n root_engine = create_mathesar_engine(settings.DATABASES[\"default\"][\"NAME\"])\n with root_engine.connect() as conn:\n conn.execution_options(isolation_level=\"AUTOCOMMIT\")\n conn.execute(text(f\"DROP DATABASE IF EXISTS {template_db_name} WITH (FORCE)\"))\n root_engine.dispose()\n install_mathesar(\n database_name=template_db_name,\n username=settings.DATABASES[\"default\"][\"USER\"],\n password=settings.DATABASES[\"default\"][\"PASSWORD\"],\n hostname=settings.DATABASES[\"default\"][\"HOST\"],\n port=settings.DATABASES[\"default\"][\"PORT\"],\n skip_confirm=True\n )\n user_engine = create_mathesar_engine(template_db_name)\n load_datasets(user_engine)\n user_engine.dispose()\n", "path": "demo/management/commands/setup_demo_template_db.py"}], "after_files": [{"content": "from sqlalchemy import text\n\nfrom django.conf import settings\nfrom django.core.management import BaseCommand\n\nfrom db.install import install_mathesar\nfrom demo.install.datasets import load_datasets\nfrom mathesar.database.base import create_mathesar_engine\n\n\nclass Command(BaseCommand):\n help = 'Initialize the demo template database.'\n\n def handle(self, *args, **options):\n _setup_demo_template_db()\n\n\ndef _setup_demo_template_db():\n print(\"Initializing demo template database...\")\n\n template_db_name = settings.MATHESAR_DEMO_TEMPLATE\n root_engine = create_mathesar_engine(settings.DATABASES[\"default\"][\"NAME\"])\n with root_engine.connect() as conn:\n conn.execution_options(isolation_level=\"AUTOCOMMIT\")\n conn.execute(text(f\"DROP DATABASE IF EXISTS {template_db_name} WITH (FORCE)\"))\n root_engine.dispose()\n install_mathesar(\n database_name=template_db_name,\n username=settings.DATABASES[\"default\"][\"USER\"],\n password=settings.DATABASES[\"default\"][\"PASSWORD\"],\n hostname=settings.DATABASES[\"default\"][\"HOST\"],\n port=settings.DATABASES[\"default\"][\"PORT\"],\n skip_confirm=True\n )\n user_engine = create_mathesar_engine(template_db_name)\n load_datasets(user_engine)\n user_engine.dispose()\n", "path": "demo/management/commands/setup_demo_template_db.py"}]}
1,068
115
gh_patches_debug_15065
rasdani/github-patches
git_diff
mkdocs__mkdocs-1986
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Nested dot folders copied upon build Sometimes it's necessary to include files in a theme which MkDocs should conditionally include in the templates, but not copy to the `site` directory when the documentation is built. As discussed in #1980, MkDocs will ignore files that start with a `.`, but this only applies to top-level directories. It would be great if MkDocs could exclude nested dot directories from the build. A concrete example: the next version of the Material theme will bundle the FontAwesome files as SVGs for inlining during build time, but MkDocs copies all of them to the `site` directory. The folder structure is: ``` assets/images/icons/fontawesome/... ``` The current workaround is to add a `*.html` to the files bundles with the theme, resulting in a `*.svg.html` extension for each of the 1.500 icons. While this works, it's semantically incorrect. If MkDocs would ignore nested dot directories, we could just use: ``` assets/images/icons/.fontawesome/... ``` As pointed out by @waylan, the code in question is here: https://github.com/mkdocs/mkdocs/blob/2fca717794c0c2e581b8ba17149bc292edeb83e4/mkdocs/structure/files.py#L67-L69 I'm not proficient in Python, but since this looks like a glob implementation, I would guess that `**/.*` would solve the problem --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mkdocs/structure/files.py` Content: ``` 1 import fnmatch 2 import os 3 import logging 4 from functools import cmp_to_key 5 from urllib.parse import quote as urlquote 6 7 from mkdocs import utils 8 9 10 log = logging.getLogger(__name__) 11 log.addFilter(utils.warning_filter) 12 13 14 class Files: 15 """ A collection of File objects. """ 16 def __init__(self, files): 17 self._files = files 18 self.src_paths = {file.src_path: file for file in files} 19 20 def __iter__(self): 21 return iter(self._files) 22 23 def __len__(self): 24 return len(self._files) 25 26 def __contains__(self, path): 27 return path in self.src_paths 28 29 def get_file_from_path(self, path): 30 """ Return a File instance with File.src_path equal to path. """ 31 return self.src_paths.get(os.path.normpath(path)) 32 33 def append(self, file): 34 """ Append file to Files collection. """ 35 self._files.append(file) 36 self.src_paths[file.src_path] = file 37 38 def copy_static_files(self, dirty=False): 39 """ Copy static files from source to destination. """ 40 for file in self: 41 if not file.is_documentation_page(): 42 file.copy_file(dirty) 43 44 def documentation_pages(self): 45 """ Return iterable of all Markdown page file objects. """ 46 return [file for file in self if file.is_documentation_page()] 47 48 def static_pages(self): 49 """ Return iterable of all static page file objects. """ 50 return [file for file in self if file.is_static_page()] 51 52 def media_files(self): 53 """ Return iterable of all file objects which are not documentation or static pages. """ 54 return [file for file in self if file.is_media_file()] 55 56 def javascript_files(self): 57 """ Return iterable of all javascript file objects. """ 58 return [file for file in self if file.is_javascript()] 59 60 def css_files(self): 61 """ Return iterable of all CSS file objects. """ 62 return [file for file in self if file.is_css()] 63 64 def add_files_from_theme(self, env, config): 65 """ Retrieve static files from Jinja environment and add to collection. """ 66 def filter(name): 67 patterns = ['.*', '*.py', '*.pyc', '*.html', '*readme*', 'mkdocs_theme.yml'] 68 patterns.extend('*{}'.format(x) for x in utils.markdown_extensions) 69 patterns.extend(config['theme'].static_templates) 70 for pattern in patterns: 71 if fnmatch.fnmatch(name.lower(), pattern): 72 return False 73 return True 74 for path in env.list_templates(filter_func=filter): 75 # Theme files do not override docs_dir files 76 path = os.path.normpath(path) 77 if path not in self: 78 for dir in config['theme'].dirs: 79 # Find the first theme dir which contains path 80 if os.path.isfile(os.path.join(dir, path)): 81 self.append(File(path, dir, config['site_dir'], config['use_directory_urls'])) 82 break 83 84 85 class File: 86 """ 87 A MkDocs File object. 88 89 Points to the source and destination locations of a file. 90 91 The `path` argument must be a path that exists relative to `src_dir`. 92 93 The `src_dir` and `dest_dir` must be absolute paths on the local file system. 94 95 The `use_directory_urls` argument controls how destination paths are generated. If `False`, a Markdown file is 96 mapped to an HTML file of the same name (the file extension is changed to `.html`). If True, a Markdown file is 97 mapped to an HTML index file (`index.html`) nested in a directory using the "name" of the file in `path`. The 98 `use_directory_urls` argument has no effect on non-Markdown files. 99 100 File objects have the following properties, which are Unicode strings: 101 102 File.src_path 103 The pure path of the source file relative to the source directory. 104 105 File.abs_src_path 106 The absolute concrete path of the source file. 107 108 File.dest_path 109 The pure path of the destination file relative to the destination directory. 110 111 File.abs_dest_path 112 The absolute concrete path of the destination file. 113 114 File.url 115 The url of the destination file relative to the destination directory as a string. 116 """ 117 def __init__(self, path, src_dir, dest_dir, use_directory_urls): 118 self.page = None 119 self.src_path = os.path.normpath(path) 120 self.abs_src_path = os.path.normpath(os.path.join(src_dir, self.src_path)) 121 self.name = self._get_stem() 122 self.dest_path = self._get_dest_path(use_directory_urls) 123 self.abs_dest_path = os.path.normpath(os.path.join(dest_dir, self.dest_path)) 124 self.url = self._get_url(use_directory_urls) 125 126 def __eq__(self, other): 127 128 def sub_dict(d): 129 return {key: value for key, value in d.items() if key in ['src_path', 'abs_src_path', 'url']} 130 131 return (isinstance(other, self.__class__) and sub_dict(self.__dict__) == sub_dict(other.__dict__)) 132 133 def __ne__(self, other): 134 return not self.__eq__(other) 135 136 def _get_stem(self): 137 """ Return the name of the file without it's extension. """ 138 filename = os.path.basename(self.src_path) 139 stem, ext = os.path.splitext(filename) 140 return 'index' if stem in ('index', 'README') else stem 141 142 def _get_dest_path(self, use_directory_urls): 143 """ Return destination path based on source path. """ 144 if self.is_documentation_page(): 145 if use_directory_urls: 146 parent, filename = os.path.split(self.src_path) 147 if self.name == 'index': 148 # index.md or README.md => index.html 149 return os.path.join(parent, 'index.html') 150 else: 151 # foo.md => foo/index.html 152 return os.path.join(parent, self.name, 'index.html') 153 else: 154 # foo.md => foo.html 155 root, ext = os.path.splitext(self.src_path) 156 return root + '.html' 157 return self.src_path 158 159 def _get_url(self, use_directory_urls): 160 """ Return url based in destination path. """ 161 url = self.dest_path.replace(os.path.sep, '/') 162 dirname, filename = os.path.split(url) 163 if use_directory_urls and filename == 'index.html': 164 if dirname == '': 165 url = '.' 166 else: 167 url = dirname + '/' 168 return urlquote(url) 169 170 def url_relative_to(self, other): 171 """ Return url for file relative to other file. """ 172 return utils.get_relative_url(self.url, other.url if isinstance(other, File) else other) 173 174 def copy_file(self, dirty=False): 175 """ Copy source file to destination, ensuring parent directories exist. """ 176 if dirty and not self.is_modified(): 177 log.debug("Skip copying unmodified file: '{}'".format(self.src_path)) 178 else: 179 log.debug("Copying media file: '{}'".format(self.src_path)) 180 utils.copy_file(self.abs_src_path, self.abs_dest_path) 181 182 def is_modified(self): 183 if os.path.isfile(self.abs_dest_path): 184 return os.path.getmtime(self.abs_dest_path) < os.path.getmtime(self.abs_src_path) 185 return True 186 187 def is_documentation_page(self): 188 """ Return True if file is a Markdown page. """ 189 return os.path.splitext(self.src_path)[1] in utils.markdown_extensions 190 191 def is_static_page(self): 192 """ Return True if file is a static page (html, xml, json). """ 193 return os.path.splitext(self.src_path)[1] in ( 194 '.html', 195 '.htm', 196 '.xml', 197 '.json', 198 ) 199 200 def is_media_file(self): 201 """ Return True if file is not a documentation or static page. """ 202 return not (self.is_documentation_page() or self.is_static_page()) 203 204 def is_javascript(self): 205 """ Return True if file is a JavaScript file. """ 206 return os.path.splitext(self.src_path)[1] in ( 207 '.js', 208 '.javascript', 209 ) 210 211 def is_css(self): 212 """ Return True if file is a CSS file. """ 213 return os.path.splitext(self.src_path)[1] in ( 214 '.css', 215 ) 216 217 218 def get_files(config): 219 """ Walk the `docs_dir` and return a Files collection. """ 220 files = [] 221 exclude = ['.*', '/templates'] 222 223 for source_dir, dirnames, filenames in os.walk(config['docs_dir'], followlinks=True): 224 relative_dir = os.path.relpath(source_dir, config['docs_dir']) 225 226 for dirname in list(dirnames): 227 path = os.path.normpath(os.path.join(relative_dir, dirname)) 228 # Skip any excluded directories 229 if _filter_paths(basename=dirname, path=path, is_dir=True, exclude=exclude): 230 dirnames.remove(dirname) 231 dirnames.sort() 232 233 for filename in _sort_files(filenames): 234 path = os.path.normpath(os.path.join(relative_dir, filename)) 235 # Skip any excluded files 236 if _filter_paths(basename=filename, path=path, is_dir=False, exclude=exclude): 237 continue 238 # Skip README.md if an index file also exists in dir 239 if filename.lower() == 'readme.md' and 'index.md' in filenames: 240 log.warning("Both index.md and readme.md found. Skipping readme.md from {}".format(source_dir)) 241 continue 242 files.append(File(path, config['docs_dir'], config['site_dir'], config['use_directory_urls'])) 243 244 return Files(files) 245 246 247 def _sort_files(filenames): 248 """ Always sort `index` or `README` as first filename in list. """ 249 250 def compare(x, y): 251 if x == y: 252 return 0 253 if os.path.splitext(y)[0] in ['index', 'README']: 254 return 1 255 if os.path.splitext(x)[0] in ['index', 'README'] or x < y: 256 return -1 257 return 1 258 259 return sorted(filenames, key=cmp_to_key(compare)) 260 261 262 def _filter_paths(basename, path, is_dir, exclude): 263 """ .gitignore style file filtering. """ 264 for item in exclude: 265 # Items ending in '/' apply only to directories. 266 if item.endswith('/') and not is_dir: 267 continue 268 # Items starting with '/' apply to the whole path. 269 # In any other cases just the basename is used. 270 match = path if item.startswith('/') else basename 271 if fnmatch.fnmatch(match, item.strip('/')): 272 return True 273 return False 274 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mkdocs/structure/files.py b/mkdocs/structure/files.py --- a/mkdocs/structure/files.py +++ b/mkdocs/structure/files.py @@ -64,7 +64,8 @@ def add_files_from_theme(self, env, config): """ Retrieve static files from Jinja environment and add to collection. """ def filter(name): - patterns = ['.*', '*.py', '*.pyc', '*.html', '*readme*', 'mkdocs_theme.yml'] + # '.*' filters dot files/dirs at root level whereas '*/.*' filters nested levels + patterns = ['.*', '*/.*', '*.py', '*.pyc', '*.html', '*readme*', 'mkdocs_theme.yml'] patterns.extend('*{}'.format(x) for x in utils.markdown_extensions) patterns.extend(config['theme'].static_templates) for pattern in patterns:
{"golden_diff": "diff --git a/mkdocs/structure/files.py b/mkdocs/structure/files.py\n--- a/mkdocs/structure/files.py\n+++ b/mkdocs/structure/files.py\n@@ -64,7 +64,8 @@\n def add_files_from_theme(self, env, config):\n \"\"\" Retrieve static files from Jinja environment and add to collection. \"\"\"\n def filter(name):\n- patterns = ['.*', '*.py', '*.pyc', '*.html', '*readme*', 'mkdocs_theme.yml']\n+ # '.*' filters dot files/dirs at root level whereas '*/.*' filters nested levels\n+ patterns = ['.*', '*/.*', '*.py', '*.pyc', '*.html', '*readme*', 'mkdocs_theme.yml']\n patterns.extend('*{}'.format(x) for x in utils.markdown_extensions)\n patterns.extend(config['theme'].static_templates)\n for pattern in patterns:\n", "issue": "Nested dot folders copied upon build\nSometimes it's necessary to include files in a theme which MkDocs should conditionally include in the templates, but not copy to the `site` directory when the documentation is built. As discussed in #1980, MkDocs will ignore files that start with a `.`, but this only applies to top-level directories. It would be great if MkDocs could exclude nested dot directories from the build.\r\n\r\nA concrete example: the next version of the Material theme will bundle the FontAwesome files as SVGs for inlining during build time, but MkDocs copies all of them to the `site` directory. The folder structure is:\r\n\r\n```\r\nassets/images/icons/fontawesome/...\r\n```\r\n\r\nThe current workaround is to add a `*.html` to the files bundles with the theme, resulting in a `*.svg.html` extension for each of the 1.500 icons. While this works, it's semantically incorrect.\r\n\r\nIf MkDocs would ignore nested dot directories, we could just use:\r\n\r\n```\r\nassets/images/icons/.fontawesome/...\r\n```\r\n\r\nAs pointed out by @waylan, the code in question is here:\r\n\r\nhttps://github.com/mkdocs/mkdocs/blob/2fca717794c0c2e581b8ba17149bc292edeb83e4/mkdocs/structure/files.py#L67-L69\r\n\r\nI'm not proficient in Python, but since this looks like a glob implementation, I would guess that `**/.*` would solve the problem\n", "before_files": [{"content": "import fnmatch\nimport os\nimport logging\nfrom functools import cmp_to_key\nfrom urllib.parse import quote as urlquote\n\nfrom mkdocs import utils\n\n\nlog = logging.getLogger(__name__)\nlog.addFilter(utils.warning_filter)\n\n\nclass Files:\n \"\"\" A collection of File objects. \"\"\"\n def __init__(self, files):\n self._files = files\n self.src_paths = {file.src_path: file for file in files}\n\n def __iter__(self):\n return iter(self._files)\n\n def __len__(self):\n return len(self._files)\n\n def __contains__(self, path):\n return path in self.src_paths\n\n def get_file_from_path(self, path):\n \"\"\" Return a File instance with File.src_path equal to path. \"\"\"\n return self.src_paths.get(os.path.normpath(path))\n\n def append(self, file):\n \"\"\" Append file to Files collection. \"\"\"\n self._files.append(file)\n self.src_paths[file.src_path] = file\n\n def copy_static_files(self, dirty=False):\n \"\"\" Copy static files from source to destination. \"\"\"\n for file in self:\n if not file.is_documentation_page():\n file.copy_file(dirty)\n\n def documentation_pages(self):\n \"\"\" Return iterable of all Markdown page file objects. \"\"\"\n return [file for file in self if file.is_documentation_page()]\n\n def static_pages(self):\n \"\"\" Return iterable of all static page file objects. \"\"\"\n return [file for file in self if file.is_static_page()]\n\n def media_files(self):\n \"\"\" Return iterable of all file objects which are not documentation or static pages. \"\"\"\n return [file for file in self if file.is_media_file()]\n\n def javascript_files(self):\n \"\"\" Return iterable of all javascript file objects. \"\"\"\n return [file for file in self if file.is_javascript()]\n\n def css_files(self):\n \"\"\" Return iterable of all CSS file objects. \"\"\"\n return [file for file in self if file.is_css()]\n\n def add_files_from_theme(self, env, config):\n \"\"\" Retrieve static files from Jinja environment and add to collection. \"\"\"\n def filter(name):\n patterns = ['.*', '*.py', '*.pyc', '*.html', '*readme*', 'mkdocs_theme.yml']\n patterns.extend('*{}'.format(x) for x in utils.markdown_extensions)\n patterns.extend(config['theme'].static_templates)\n for pattern in patterns:\n if fnmatch.fnmatch(name.lower(), pattern):\n return False\n return True\n for path in env.list_templates(filter_func=filter):\n # Theme files do not override docs_dir files\n path = os.path.normpath(path)\n if path not in self:\n for dir in config['theme'].dirs:\n # Find the first theme dir which contains path\n if os.path.isfile(os.path.join(dir, path)):\n self.append(File(path, dir, config['site_dir'], config['use_directory_urls']))\n break\n\n\nclass File:\n \"\"\"\n A MkDocs File object.\n\n Points to the source and destination locations of a file.\n\n The `path` argument must be a path that exists relative to `src_dir`.\n\n The `src_dir` and `dest_dir` must be absolute paths on the local file system.\n\n The `use_directory_urls` argument controls how destination paths are generated. If `False`, a Markdown file is\n mapped to an HTML file of the same name (the file extension is changed to `.html`). If True, a Markdown file is\n mapped to an HTML index file (`index.html`) nested in a directory using the \"name\" of the file in `path`. The\n `use_directory_urls` argument has no effect on non-Markdown files.\n\n File objects have the following properties, which are Unicode strings:\n\n File.src_path\n The pure path of the source file relative to the source directory.\n\n File.abs_src_path\n The absolute concrete path of the source file.\n\n File.dest_path\n The pure path of the destination file relative to the destination directory.\n\n File.abs_dest_path\n The absolute concrete path of the destination file.\n\n File.url\n The url of the destination file relative to the destination directory as a string.\n \"\"\"\n def __init__(self, path, src_dir, dest_dir, use_directory_urls):\n self.page = None\n self.src_path = os.path.normpath(path)\n self.abs_src_path = os.path.normpath(os.path.join(src_dir, self.src_path))\n self.name = self._get_stem()\n self.dest_path = self._get_dest_path(use_directory_urls)\n self.abs_dest_path = os.path.normpath(os.path.join(dest_dir, self.dest_path))\n self.url = self._get_url(use_directory_urls)\n\n def __eq__(self, other):\n\n def sub_dict(d):\n return {key: value for key, value in d.items() if key in ['src_path', 'abs_src_path', 'url']}\n\n return (isinstance(other, self.__class__) and sub_dict(self.__dict__) == sub_dict(other.__dict__))\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def _get_stem(self):\n \"\"\" Return the name of the file without it's extension. \"\"\"\n filename = os.path.basename(self.src_path)\n stem, ext = os.path.splitext(filename)\n return 'index' if stem in ('index', 'README') else stem\n\n def _get_dest_path(self, use_directory_urls):\n \"\"\" Return destination path based on source path. \"\"\"\n if self.is_documentation_page():\n if use_directory_urls:\n parent, filename = os.path.split(self.src_path)\n if self.name == 'index':\n # index.md or README.md => index.html\n return os.path.join(parent, 'index.html')\n else:\n # foo.md => foo/index.html\n return os.path.join(parent, self.name, 'index.html')\n else:\n # foo.md => foo.html\n root, ext = os.path.splitext(self.src_path)\n return root + '.html'\n return self.src_path\n\n def _get_url(self, use_directory_urls):\n \"\"\" Return url based in destination path. \"\"\"\n url = self.dest_path.replace(os.path.sep, '/')\n dirname, filename = os.path.split(url)\n if use_directory_urls and filename == 'index.html':\n if dirname == '':\n url = '.'\n else:\n url = dirname + '/'\n return urlquote(url)\n\n def url_relative_to(self, other):\n \"\"\" Return url for file relative to other file. \"\"\"\n return utils.get_relative_url(self.url, other.url if isinstance(other, File) else other)\n\n def copy_file(self, dirty=False):\n \"\"\" Copy source file to destination, ensuring parent directories exist. \"\"\"\n if dirty and not self.is_modified():\n log.debug(\"Skip copying unmodified file: '{}'\".format(self.src_path))\n else:\n log.debug(\"Copying media file: '{}'\".format(self.src_path))\n utils.copy_file(self.abs_src_path, self.abs_dest_path)\n\n def is_modified(self):\n if os.path.isfile(self.abs_dest_path):\n return os.path.getmtime(self.abs_dest_path) < os.path.getmtime(self.abs_src_path)\n return True\n\n def is_documentation_page(self):\n \"\"\" Return True if file is a Markdown page. \"\"\"\n return os.path.splitext(self.src_path)[1] in utils.markdown_extensions\n\n def is_static_page(self):\n \"\"\" Return True if file is a static page (html, xml, json). \"\"\"\n return os.path.splitext(self.src_path)[1] in (\n '.html',\n '.htm',\n '.xml',\n '.json',\n )\n\n def is_media_file(self):\n \"\"\" Return True if file is not a documentation or static page. \"\"\"\n return not (self.is_documentation_page() or self.is_static_page())\n\n def is_javascript(self):\n \"\"\" Return True if file is a JavaScript file. \"\"\"\n return os.path.splitext(self.src_path)[1] in (\n '.js',\n '.javascript',\n )\n\n def is_css(self):\n \"\"\" Return True if file is a CSS file. \"\"\"\n return os.path.splitext(self.src_path)[1] in (\n '.css',\n )\n\n\ndef get_files(config):\n \"\"\" Walk the `docs_dir` and return a Files collection. \"\"\"\n files = []\n exclude = ['.*', '/templates']\n\n for source_dir, dirnames, filenames in os.walk(config['docs_dir'], followlinks=True):\n relative_dir = os.path.relpath(source_dir, config['docs_dir'])\n\n for dirname in list(dirnames):\n path = os.path.normpath(os.path.join(relative_dir, dirname))\n # Skip any excluded directories\n if _filter_paths(basename=dirname, path=path, is_dir=True, exclude=exclude):\n dirnames.remove(dirname)\n dirnames.sort()\n\n for filename in _sort_files(filenames):\n path = os.path.normpath(os.path.join(relative_dir, filename))\n # Skip any excluded files\n if _filter_paths(basename=filename, path=path, is_dir=False, exclude=exclude):\n continue\n # Skip README.md if an index file also exists in dir\n if filename.lower() == 'readme.md' and 'index.md' in filenames:\n log.warning(\"Both index.md and readme.md found. Skipping readme.md from {}\".format(source_dir))\n continue\n files.append(File(path, config['docs_dir'], config['site_dir'], config['use_directory_urls']))\n\n return Files(files)\n\n\ndef _sort_files(filenames):\n \"\"\" Always sort `index` or `README` as first filename in list. \"\"\"\n\n def compare(x, y):\n if x == y:\n return 0\n if os.path.splitext(y)[0] in ['index', 'README']:\n return 1\n if os.path.splitext(x)[0] in ['index', 'README'] or x < y:\n return -1\n return 1\n\n return sorted(filenames, key=cmp_to_key(compare))\n\n\ndef _filter_paths(basename, path, is_dir, exclude):\n \"\"\" .gitignore style file filtering. \"\"\"\n for item in exclude:\n # Items ending in '/' apply only to directories.\n if item.endswith('/') and not is_dir:\n continue\n # Items starting with '/' apply to the whole path.\n # In any other cases just the basename is used.\n match = path if item.startswith('/') else basename\n if fnmatch.fnmatch(match, item.strip('/')):\n return True\n return False\n", "path": "mkdocs/structure/files.py"}], "after_files": [{"content": "import fnmatch\nimport os\nimport logging\nfrom functools import cmp_to_key\nfrom urllib.parse import quote as urlquote\n\nfrom mkdocs import utils\n\n\nlog = logging.getLogger(__name__)\nlog.addFilter(utils.warning_filter)\n\n\nclass Files:\n \"\"\" A collection of File objects. \"\"\"\n def __init__(self, files):\n self._files = files\n self.src_paths = {file.src_path: file for file in files}\n\n def __iter__(self):\n return iter(self._files)\n\n def __len__(self):\n return len(self._files)\n\n def __contains__(self, path):\n return path in self.src_paths\n\n def get_file_from_path(self, path):\n \"\"\" Return a File instance with File.src_path equal to path. \"\"\"\n return self.src_paths.get(os.path.normpath(path))\n\n def append(self, file):\n \"\"\" Append file to Files collection. \"\"\"\n self._files.append(file)\n self.src_paths[file.src_path] = file\n\n def copy_static_files(self, dirty=False):\n \"\"\" Copy static files from source to destination. \"\"\"\n for file in self:\n if not file.is_documentation_page():\n file.copy_file(dirty)\n\n def documentation_pages(self):\n \"\"\" Return iterable of all Markdown page file objects. \"\"\"\n return [file for file in self if file.is_documentation_page()]\n\n def static_pages(self):\n \"\"\" Return iterable of all static page file objects. \"\"\"\n return [file for file in self if file.is_static_page()]\n\n def media_files(self):\n \"\"\" Return iterable of all file objects which are not documentation or static pages. \"\"\"\n return [file for file in self if file.is_media_file()]\n\n def javascript_files(self):\n \"\"\" Return iterable of all javascript file objects. \"\"\"\n return [file for file in self if file.is_javascript()]\n\n def css_files(self):\n \"\"\" Return iterable of all CSS file objects. \"\"\"\n return [file for file in self if file.is_css()]\n\n def add_files_from_theme(self, env, config):\n \"\"\" Retrieve static files from Jinja environment and add to collection. \"\"\"\n def filter(name):\n # '.*' filters dot files/dirs at root level whereas '*/.*' filters nested levels\n patterns = ['.*', '*/.*', '*.py', '*.pyc', '*.html', '*readme*', 'mkdocs_theme.yml']\n patterns.extend('*{}'.format(x) for x in utils.markdown_extensions)\n patterns.extend(config['theme'].static_templates)\n for pattern in patterns:\n if fnmatch.fnmatch(name.lower(), pattern):\n return False\n return True\n for path in env.list_templates(filter_func=filter):\n # Theme files do not override docs_dir files\n path = os.path.normpath(path)\n if path not in self:\n for dir in config['theme'].dirs:\n # Find the first theme dir which contains path\n if os.path.isfile(os.path.join(dir, path)):\n self.append(File(path, dir, config['site_dir'], config['use_directory_urls']))\n break\n\n\nclass File:\n \"\"\"\n A MkDocs File object.\n\n Points to the source and destination locations of a file.\n\n The `path` argument must be a path that exists relative to `src_dir`.\n\n The `src_dir` and `dest_dir` must be absolute paths on the local file system.\n\n The `use_directory_urls` argument controls how destination paths are generated. If `False`, a Markdown file is\n mapped to an HTML file of the same name (the file extension is changed to `.html`). If True, a Markdown file is\n mapped to an HTML index file (`index.html`) nested in a directory using the \"name\" of the file in `path`. The\n `use_directory_urls` argument has no effect on non-Markdown files.\n\n File objects have the following properties, which are Unicode strings:\n\n File.src_path\n The pure path of the source file relative to the source directory.\n\n File.abs_src_path\n The absolute concrete path of the source file.\n\n File.dest_path\n The pure path of the destination file relative to the destination directory.\n\n File.abs_dest_path\n The absolute concrete path of the destination file.\n\n File.url\n The url of the destination file relative to the destination directory as a string.\n \"\"\"\n def __init__(self, path, src_dir, dest_dir, use_directory_urls):\n self.page = None\n self.src_path = os.path.normpath(path)\n self.abs_src_path = os.path.normpath(os.path.join(src_dir, self.src_path))\n self.name = self._get_stem()\n self.dest_path = self._get_dest_path(use_directory_urls)\n self.abs_dest_path = os.path.normpath(os.path.join(dest_dir, self.dest_path))\n self.url = self._get_url(use_directory_urls)\n\n def __eq__(self, other):\n\n def sub_dict(d):\n return {key: value for key, value in d.items() if key in ['src_path', 'abs_src_path', 'url']}\n\n return (isinstance(other, self.__class__) and sub_dict(self.__dict__) == sub_dict(other.__dict__))\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def _get_stem(self):\n \"\"\" Return the name of the file without it's extension. \"\"\"\n filename = os.path.basename(self.src_path)\n stem, ext = os.path.splitext(filename)\n return 'index' if stem in ('index', 'README') else stem\n\n def _get_dest_path(self, use_directory_urls):\n \"\"\" Return destination path based on source path. \"\"\"\n if self.is_documentation_page():\n if use_directory_urls:\n parent, filename = os.path.split(self.src_path)\n if self.name == 'index':\n # index.md or README.md => index.html\n return os.path.join(parent, 'index.html')\n else:\n # foo.md => foo/index.html\n return os.path.join(parent, self.name, 'index.html')\n else:\n # foo.md => foo.html\n root, ext = os.path.splitext(self.src_path)\n return root + '.html'\n return self.src_path\n\n def _get_url(self, use_directory_urls):\n \"\"\" Return url based in destination path. \"\"\"\n url = self.dest_path.replace(os.path.sep, '/')\n dirname, filename = os.path.split(url)\n if use_directory_urls and filename == 'index.html':\n if dirname == '':\n url = '.'\n else:\n url = dirname + '/'\n return urlquote(url)\n\n def url_relative_to(self, other):\n \"\"\" Return url for file relative to other file. \"\"\"\n return utils.get_relative_url(self.url, other.url if isinstance(other, File) else other)\n\n def copy_file(self, dirty=False):\n \"\"\" Copy source file to destination, ensuring parent directories exist. \"\"\"\n if dirty and not self.is_modified():\n log.debug(\"Skip copying unmodified file: '{}'\".format(self.src_path))\n else:\n log.debug(\"Copying media file: '{}'\".format(self.src_path))\n utils.copy_file(self.abs_src_path, self.abs_dest_path)\n\n def is_modified(self):\n if os.path.isfile(self.abs_dest_path):\n return os.path.getmtime(self.abs_dest_path) < os.path.getmtime(self.abs_src_path)\n return True\n\n def is_documentation_page(self):\n \"\"\" Return True if file is a Markdown page. \"\"\"\n return os.path.splitext(self.src_path)[1] in utils.markdown_extensions\n\n def is_static_page(self):\n \"\"\" Return True if file is a static page (html, xml, json). \"\"\"\n return os.path.splitext(self.src_path)[1] in (\n '.html',\n '.htm',\n '.xml',\n '.json',\n )\n\n def is_media_file(self):\n \"\"\" Return True if file is not a documentation or static page. \"\"\"\n return not (self.is_documentation_page() or self.is_static_page())\n\n def is_javascript(self):\n \"\"\" Return True if file is a JavaScript file. \"\"\"\n return os.path.splitext(self.src_path)[1] in (\n '.js',\n '.javascript',\n )\n\n def is_css(self):\n \"\"\" Return True if file is a CSS file. \"\"\"\n return os.path.splitext(self.src_path)[1] in (\n '.css',\n )\n\n\ndef get_files(config):\n \"\"\" Walk the `docs_dir` and return a Files collection. \"\"\"\n files = []\n exclude = ['.*', '/templates']\n\n for source_dir, dirnames, filenames in os.walk(config['docs_dir'], followlinks=True):\n relative_dir = os.path.relpath(source_dir, config['docs_dir'])\n\n for dirname in list(dirnames):\n path = os.path.normpath(os.path.join(relative_dir, dirname))\n # Skip any excluded directories\n if _filter_paths(basename=dirname, path=path, is_dir=True, exclude=exclude):\n dirnames.remove(dirname)\n dirnames.sort()\n\n for filename in _sort_files(filenames):\n path = os.path.normpath(os.path.join(relative_dir, filename))\n # Skip any excluded files\n if _filter_paths(basename=filename, path=path, is_dir=False, exclude=exclude):\n continue\n # Skip README.md if an index file also exists in dir\n if filename.lower() == 'readme.md' and 'index.md' in filenames:\n log.warning(\"Both index.md and readme.md found. Skipping readme.md from {}\".format(source_dir))\n continue\n files.append(File(path, config['docs_dir'], config['site_dir'], config['use_directory_urls']))\n\n return Files(files)\n\n\ndef _sort_files(filenames):\n \"\"\" Always sort `index` or `README` as first filename in list. \"\"\"\n\n def compare(x, y):\n if x == y:\n return 0\n if os.path.splitext(y)[0] in ['index', 'README']:\n return 1\n if os.path.splitext(x)[0] in ['index', 'README'] or x < y:\n return -1\n return 1\n\n return sorted(filenames, key=cmp_to_key(compare))\n\n\ndef _filter_paths(basename, path, is_dir, exclude):\n \"\"\" .gitignore style file filtering. \"\"\"\n for item in exclude:\n # Items ending in '/' apply only to directories.\n if item.endswith('/') and not is_dir:\n continue\n # Items starting with '/' apply to the whole path.\n # In any other cases just the basename is used.\n match = path if item.startswith('/') else basename\n if fnmatch.fnmatch(match, item.strip('/')):\n return True\n return False\n", "path": "mkdocs/structure/files.py"}]}
3,578
197
gh_patches_debug_5188
rasdani/github-patches
git_diff
Lightning-Universe__lightning-flash-104
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Download Data Fails if Content Length Not Defined in Header ## 🐛 Bug When I try to download a zip file using download_data from flash.core.data it fails because the response header does not contain a value for 'Content Length' this should be check for and handled in the code. ### To Reproduce Steps to reproduce the behavior: KeyError Traceback (most recent call last) <ipython-input-7-aa10e89f3a8e> in <module>() 1 # 1. Download the data ----> 2 download_data("https://github.com/karoldvl/ESC-50/archive/master.zip", 'data/') 2 frames /content/gdrive/MyDrive/lightning-flash/flash/core/data/utils.py in download_data(url, path) 75 76 """ ---> 77 download_file(url, path) 78 79 /content/gdrive/MyDrive/lightning-flash/flash/core/data/utils.py in download_file(url, path, verbose) 36 local_filename = os.path.join(path, url.split('/')[-1]) 37 r = requests.get(url, stream=True) ---> 38 file_size = int(r.headers['Content-Length']) 39 chunk = 1 40 chunk_size = 1024 /usr/local/lib/python3.6/dist-packages/requests/structures.py in __getitem__(self, key) 52 53 def __getitem__(self, key): ---> 54 return self._store[key.lower()][1] 55 56 def __delitem__(self, key): KeyError: 'content-length' <!-- If you have a code sample, error messages, stack traces, please provide it here as well --> #### Code sample <!-- Ideally attach a minimal code sample to reproduce the decried issue. Minimal means having the shortest code but still preserving the bug. --> ```python import flash from flash.core.data import download_data download_data("https://github.com/karoldvl/ESC-50/archive/master.zip", 'data/') ``` ### Expected behavior File downloads and extracts ESC-50 data into datasets folder ### Environment Default Collab Configuration ### Additional context <!-- Add any other context about the problem here. --> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `flash/core/data/utils.py` Content: ``` 1 # Copyright The PyTorch Lightning team. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import os.path 16 import zipfile 17 from typing import Any, Type 18 19 import requests 20 import torch 21 from tqdm.auto import tqdm as tq 22 23 24 # Code taken from: https://gist.github.com/ruxi/5d6803c116ec1130d484a4ab8c00c603 25 # __author__ = "github.com/ruxi" 26 # __license__ = "MIT" 27 def download_file(url: str, path: str, verbose: bool = False) -> None: 28 """ 29 Download file with progressbar 30 31 Usage: 32 download_file('http://web4host.net/5MB.zip') 33 """ 34 if not os.path.exists(path): 35 os.makedirs(path) 36 local_filename = os.path.join(path, url.split('/')[-1]) 37 r = requests.get(url, stream=True) 38 file_size = int(r.headers['Content-Length']) 39 chunk = 1 40 chunk_size = 1024 41 num_bars = int(file_size / chunk_size) 42 if verbose: 43 print(dict(file_size=file_size)) 44 print(dict(num_bars=num_bars)) 45 46 if not os.path.exists(local_filename): 47 with open(local_filename, 'wb') as fp: 48 for chunk in tq( 49 r.iter_content(chunk_size=chunk_size), 50 total=num_bars, 51 unit='KB', 52 desc=local_filename, 53 leave=True # progressbar stays 54 ): 55 fp.write(chunk) # type: ignore 56 57 if '.zip' in local_filename: 58 if os.path.exists(local_filename): 59 with zipfile.ZipFile(local_filename, 'r') as zip_ref: 60 zip_ref.extractall(path) 61 62 63 def download_data(url: str, path: str = "data/") -> None: 64 """ 65 Downloads data automatically from the given url to the path. Defaults to data/ for the path. 66 Automatically handles .csv, .zip 67 68 Example:: 69 70 from flash import download_data 71 72 Args: 73 url: path 74 path: local 75 76 """ 77 download_file(url, path) 78 79 80 def _contains_any_tensor(value: Any, dtype: Type = torch.Tensor) -> bool: 81 # TODO: we should refactor FlashDatasetFolder to better integrate 82 # with DataPipeline. That way, we wouldn't need this check. 83 # This is because we are running transforms in both places. 84 if isinstance(value, dtype): 85 return True 86 if isinstance(value, (list, tuple)): 87 return any(_contains_any_tensor(v, dtype=dtype) for v in value) 88 elif isinstance(value, dict): 89 return any(_contains_any_tensor(v, dtype=dtype) for v in value.values()) 90 return False 91 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/flash/core/data/utils.py b/flash/core/data/utils.py --- a/flash/core/data/utils.py +++ b/flash/core/data/utils.py @@ -35,7 +35,7 @@ os.makedirs(path) local_filename = os.path.join(path, url.split('/')[-1]) r = requests.get(url, stream=True) - file_size = int(r.headers['Content-Length']) + file_size = int(r.headers['Content-Length']) if 'Content-Length' in r.headers else 0 chunk = 1 chunk_size = 1024 num_bars = int(file_size / chunk_size)
{"golden_diff": "diff --git a/flash/core/data/utils.py b/flash/core/data/utils.py\n--- a/flash/core/data/utils.py\n+++ b/flash/core/data/utils.py\n@@ -35,7 +35,7 @@\n os.makedirs(path)\n local_filename = os.path.join(path, url.split('/')[-1])\n r = requests.get(url, stream=True)\n- file_size = int(r.headers['Content-Length'])\n+ file_size = int(r.headers['Content-Length']) if 'Content-Length' in r.headers else 0\n chunk = 1\n chunk_size = 1024\n num_bars = int(file_size / chunk_size)\n", "issue": "Download Data Fails if Content Length Not Defined in Header\n## \ud83d\udc1b Bug\r\n\r\nWhen I try to download a zip file using download_data from flash.core.data it fails because the response header does not contain a value for 'Content Length' this should be check for and handled in the code. \r\n\r\n### To Reproduce\r\n\r\nSteps to reproduce the behavior:\r\n\r\nKeyError Traceback (most recent call last)\r\n<ipython-input-7-aa10e89f3a8e> in <module>()\r\n 1 # 1. Download the data\r\n----> 2 download_data(\"https://github.com/karoldvl/ESC-50/archive/master.zip\", 'data/')\r\n\r\n2 frames\r\n/content/gdrive/MyDrive/lightning-flash/flash/core/data/utils.py in download_data(url, path)\r\n 75 \r\n 76 \"\"\"\r\n---> 77 download_file(url, path)\r\n 78 \r\n 79 \r\n\r\n/content/gdrive/MyDrive/lightning-flash/flash/core/data/utils.py in download_file(url, path, verbose)\r\n 36 local_filename = os.path.join(path, url.split('/')[-1])\r\n 37 r = requests.get(url, stream=True)\r\n---> 38 file_size = int(r.headers['Content-Length'])\r\n 39 chunk = 1\r\n 40 chunk_size = 1024\r\n\r\n/usr/local/lib/python3.6/dist-packages/requests/structures.py in __getitem__(self, key)\r\n 52 \r\n 53 def __getitem__(self, key):\r\n---> 54 return self._store[key.lower()][1]\r\n 55 \r\n 56 def __delitem__(self, key):\r\n\r\nKeyError: 'content-length'\r\n<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->\r\n\r\n\r\n#### Code sample\r\n<!-- Ideally attach a minimal code sample to reproduce the decried issue.\r\nMinimal means having the shortest code but still preserving the bug. -->\r\n\r\n```python\r\nimport flash\r\nfrom flash.core.data import download_data\r\ndownload_data(\"https://github.com/karoldvl/ESC-50/archive/master.zip\", 'data/')\r\n```\r\n\r\n### Expected behavior\r\n\r\nFile downloads and extracts ESC-50 data into datasets folder\r\n\r\n### Environment\r\n\r\nDefault Collab Configuration \r\n\r\n### Additional context\r\n\r\n<!-- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os.path\nimport zipfile\nfrom typing import Any, Type\n\nimport requests\nimport torch\nfrom tqdm.auto import tqdm as tq\n\n\n# Code taken from: https://gist.github.com/ruxi/5d6803c116ec1130d484a4ab8c00c603\n# __author__ = \"github.com/ruxi\"\n# __license__ = \"MIT\"\ndef download_file(url: str, path: str, verbose: bool = False) -> None:\n \"\"\"\n Download file with progressbar\n\n Usage:\n download_file('http://web4host.net/5MB.zip')\n \"\"\"\n if not os.path.exists(path):\n os.makedirs(path)\n local_filename = os.path.join(path, url.split('/')[-1])\n r = requests.get(url, stream=True)\n file_size = int(r.headers['Content-Length'])\n chunk = 1\n chunk_size = 1024\n num_bars = int(file_size / chunk_size)\n if verbose:\n print(dict(file_size=file_size))\n print(dict(num_bars=num_bars))\n\n if not os.path.exists(local_filename):\n with open(local_filename, 'wb') as fp:\n for chunk in tq(\n r.iter_content(chunk_size=chunk_size),\n total=num_bars,\n unit='KB',\n desc=local_filename,\n leave=True # progressbar stays\n ):\n fp.write(chunk) # type: ignore\n\n if '.zip' in local_filename:\n if os.path.exists(local_filename):\n with zipfile.ZipFile(local_filename, 'r') as zip_ref:\n zip_ref.extractall(path)\n\n\ndef download_data(url: str, path: str = \"data/\") -> None:\n \"\"\"\n Downloads data automatically from the given url to the path. Defaults to data/ for the path.\n Automatically handles .csv, .zip\n\n Example::\n\n from flash import download_data\n\n Args:\n url: path\n path: local\n\n \"\"\"\n download_file(url, path)\n\n\ndef _contains_any_tensor(value: Any, dtype: Type = torch.Tensor) -> bool:\n # TODO: we should refactor FlashDatasetFolder to better integrate\n # with DataPipeline. That way, we wouldn't need this check.\n # This is because we are running transforms in both places.\n if isinstance(value, dtype):\n return True\n if isinstance(value, (list, tuple)):\n return any(_contains_any_tensor(v, dtype=dtype) for v in value)\n elif isinstance(value, dict):\n return any(_contains_any_tensor(v, dtype=dtype) for v in value.values())\n return False\n", "path": "flash/core/data/utils.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os.path\nimport zipfile\nfrom typing import Any, Type\n\nimport requests\nimport torch\nfrom tqdm.auto import tqdm as tq\n\n\n# Code taken from: https://gist.github.com/ruxi/5d6803c116ec1130d484a4ab8c00c603\n# __author__ = \"github.com/ruxi\"\n# __license__ = \"MIT\"\ndef download_file(url: str, path: str, verbose: bool = False) -> None:\n \"\"\"\n Download file with progressbar\n\n Usage:\n download_file('http://web4host.net/5MB.zip')\n \"\"\"\n if not os.path.exists(path):\n os.makedirs(path)\n local_filename = os.path.join(path, url.split('/')[-1])\n r = requests.get(url, stream=True)\n file_size = int(r.headers['Content-Length']) if 'Content-Length' in r.headers else 0\n chunk = 1\n chunk_size = 1024\n num_bars = int(file_size / chunk_size)\n if verbose:\n print(dict(file_size=file_size))\n print(dict(num_bars=num_bars))\n\n if not os.path.exists(local_filename):\n with open(local_filename, 'wb') as fp:\n for chunk in tq(\n r.iter_content(chunk_size=chunk_size),\n total=num_bars,\n unit='KB',\n desc=local_filename,\n leave=True # progressbar stays\n ):\n fp.write(chunk) # type: ignore\n\n if '.zip' in local_filename:\n if os.path.exists(local_filename):\n with zipfile.ZipFile(local_filename, 'r') as zip_ref:\n zip_ref.extractall(path)\n\n\ndef download_data(url: str, path: str = \"data/\") -> None:\n \"\"\"\n Downloads data automatically from the given url to the path. Defaults to data/ for the path.\n Automatically handles .csv, .zip\n\n Example::\n\n from flash import download_data\n\n Args:\n url: path\n path: local\n\n \"\"\"\n download_file(url, path)\n\n\ndef _contains_any_tensor(value: Any, dtype: Type = torch.Tensor) -> bool:\n # TODO: we should refactor FlashDatasetFolder to better integrate\n # with DataPipeline. That way, we wouldn't need this check.\n # This is because we are running transforms in both places.\n if isinstance(value, dtype):\n return True\n if isinstance(value, (list, tuple)):\n return any(_contains_any_tensor(v, dtype=dtype) for v in value)\n elif isinstance(value, dict):\n return any(_contains_any_tensor(v, dtype=dtype) for v in value.values())\n return False\n", "path": "flash/core/data/utils.py"}]}
1,660
141
gh_patches_debug_7848
rasdani/github-patches
git_diff
liqd__a4-meinberlin-977
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Maptopicprio: Polygon may not be set When I try to create a maptopic in the dashboard, it is not guaranteed that a polygon is already set. The map for setting a point therefore fails to display. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `meinberlin/apps/maptopicprio/dashboard.py` Content: ``` 1 from django.urls import reverse 2 from django.utils.translation import ugettext_lazy as _ 3 4 from meinberlin.apps.dashboard2 import DashboardComponent 5 from meinberlin.apps.dashboard2 import components 6 7 from . import models 8 from . import views 9 10 11 class MapTopicEditComponent(DashboardComponent): 12 identifier = 'map_topic_edit' 13 weight = 20 14 label = _('Places') 15 16 def is_effective(self, module): 17 module_app = module.phases[0].content().app 18 return module_app == 'meinberlin_maptopicprio' 19 20 def get_progress(self, module): 21 if models.MapTopic.objects.filter(module=module).exists(): 22 return 1, 1 23 return 0, 1 24 25 def get_base_url(self, module): 26 return reverse('a4dashboard:maptopic-list', kwargs={ 27 'module_slug': module.slug 28 }) 29 30 def get_urls(self): 31 return [ 32 (r'^maptopics/module/(?P<module_slug>[-\w_]+)/$', 33 views.MapTopicListDashboardView.as_view(component=self), 34 'maptopic-list'), 35 (r'^maptopics/create/module/(?P<module_slug>[-\w_]+)/$', 36 views.MapTopicCreateView.as_view(component=self), 37 'maptopic-create'), 38 (r'^maptopics/(?P<slug>[-\w_]+)/update/$', 39 views.MapTopicUpdateView.as_view(component=self), 40 'maptopic-update'), 41 (r'^maptopics/(?P<slug>[-\w_]+)/delete/$', 42 views.MapTopicDeleteView.as_view(component=self), 43 'maptopic-delete') 44 ] 45 46 47 components.register_module(MapTopicEditComponent()) 48 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/meinberlin/apps/maptopicprio/dashboard.py b/meinberlin/apps/maptopicprio/dashboard.py --- a/meinberlin/apps/maptopicprio/dashboard.py +++ b/meinberlin/apps/maptopicprio/dashboard.py @@ -15,7 +15,12 @@ def is_effective(self, module): module_app = module.phases[0].content().app - return module_app == 'meinberlin_maptopicprio' + if module_app != 'meinberlin_maptopicprio': + return False + elif module.settings_instance.polygon == '': + return False + else: + return True def get_progress(self, module): if models.MapTopic.objects.filter(module=module).exists():
{"golden_diff": "diff --git a/meinberlin/apps/maptopicprio/dashboard.py b/meinberlin/apps/maptopicprio/dashboard.py\n--- a/meinberlin/apps/maptopicprio/dashboard.py\n+++ b/meinberlin/apps/maptopicprio/dashboard.py\n@@ -15,7 +15,12 @@\n \n def is_effective(self, module):\n module_app = module.phases[0].content().app\n- return module_app == 'meinberlin_maptopicprio'\n+ if module_app != 'meinberlin_maptopicprio':\n+ return False\n+ elif module.settings_instance.polygon == '':\n+ return False\n+ else:\n+ return True\n \n def get_progress(self, module):\n if models.MapTopic.objects.filter(module=module).exists():\n", "issue": "Maptopicprio: Polygon may not be set\nWhen I try to create a maptopic in the dashboard, it is not guaranteed that a polygon is already set. The map for setting a point therefore fails to display.\n", "before_files": [{"content": "from django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom meinberlin.apps.dashboard2 import DashboardComponent\nfrom meinberlin.apps.dashboard2 import components\n\nfrom . import models\nfrom . import views\n\n\nclass MapTopicEditComponent(DashboardComponent):\n identifier = 'map_topic_edit'\n weight = 20\n label = _('Places')\n\n def is_effective(self, module):\n module_app = module.phases[0].content().app\n return module_app == 'meinberlin_maptopicprio'\n\n def get_progress(self, module):\n if models.MapTopic.objects.filter(module=module).exists():\n return 1, 1\n return 0, 1\n\n def get_base_url(self, module):\n return reverse('a4dashboard:maptopic-list', kwargs={\n 'module_slug': module.slug\n })\n\n def get_urls(self):\n return [\n (r'^maptopics/module/(?P<module_slug>[-\\w_]+)/$',\n views.MapTopicListDashboardView.as_view(component=self),\n 'maptopic-list'),\n (r'^maptopics/create/module/(?P<module_slug>[-\\w_]+)/$',\n views.MapTopicCreateView.as_view(component=self),\n 'maptopic-create'),\n (r'^maptopics/(?P<slug>[-\\w_]+)/update/$',\n views.MapTopicUpdateView.as_view(component=self),\n 'maptopic-update'),\n (r'^maptopics/(?P<slug>[-\\w_]+)/delete/$',\n views.MapTopicDeleteView.as_view(component=self),\n 'maptopic-delete')\n ]\n\n\ncomponents.register_module(MapTopicEditComponent())\n", "path": "meinberlin/apps/maptopicprio/dashboard.py"}], "after_files": [{"content": "from django.urls import reverse\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom meinberlin.apps.dashboard2 import DashboardComponent\nfrom meinberlin.apps.dashboard2 import components\n\nfrom . import models\nfrom . import views\n\n\nclass MapTopicEditComponent(DashboardComponent):\n identifier = 'map_topic_edit'\n weight = 20\n label = _('Places')\n\n def is_effective(self, module):\n module_app = module.phases[0].content().app\n if module_app != 'meinberlin_maptopicprio':\n return False\n elif module.settings_instance.polygon == '':\n return False\n else:\n return True\n\n def get_progress(self, module):\n if models.MapTopic.objects.filter(module=module).exists():\n return 1, 1\n return 0, 1\n\n def get_base_url(self, module):\n return reverse('a4dashboard:maptopic-list', kwargs={\n 'module_slug': module.slug\n })\n\n def get_urls(self):\n return [\n (r'^maptopics/module/(?P<module_slug>[-\\w_]+)/$',\n views.MapTopicListDashboardView.as_view(component=self),\n 'maptopic-list'),\n (r'^maptopics/create/module/(?P<module_slug>[-\\w_]+)/$',\n views.MapTopicCreateView.as_view(component=self),\n 'maptopic-create'),\n (r'^maptopics/(?P<slug>[-\\w_]+)/update/$',\n views.MapTopicUpdateView.as_view(component=self),\n 'maptopic-update'),\n (r'^maptopics/(?P<slug>[-\\w_]+)/delete/$',\n views.MapTopicDeleteView.as_view(component=self),\n 'maptopic-delete')\n ]\n\n\ncomponents.register_module(MapTopicEditComponent())\n", "path": "meinberlin/apps/maptopicprio/dashboard.py"}]}
760
177
gh_patches_debug_12751
rasdani/github-patches
git_diff
bridgecrewio__checkov-2372
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- crash introduce by https://github.com/bridgecrewio/checkov/pull/2356 **Describe the issue** ``` checkov -d test1/ ``` **Examples** test1/job.tf ```hcl resource "aws_batch_job_definition" "foobar" { name = "foobar" type = "container" container_properties = file("${path.module}/job_definition.json") } ``` **Exception Trace** Please share the trace for the exception and all relevant output by checkov. To maximize the understanding, please run checkov with LOG_LEVEL set to debug as follows: ```sh 2022-02-08 16:55:00,816 [MainThread ] [DEBUG] Checkov version: 2.0.809 2022-02-08 16:55:00,816 [MainThread ] [DEBUG] Python executable: /usr/bin/python3 2022-02-08 16:55:00,816 [MainThread ] [DEBUG] Python version: 3.10.2 (main, Jan 17 2022, 00:00:00) [GCC 11.2.1 20211203 (Red Hat 11.2.1-7)] 2022-02-08 16:55:00,816 [MainThread ] [DEBUG] Checkov executable (argv[0]): /home/foobar/.local/bin/checkov 2022-02-08 16:55:00,816 [MainThread ] [DEBUG] Command Line Args: -d test1/ Config File (/home/foobar/git/production/.checkov.yaml): framework: terraform baseline: .checkov.baseline evaluate-variables:True compact: True quiet: True Defaults: --branch: master --download-external-modules:False --external-modules-download-path:.external_modules --min-cve-severity:none 2022-02-08 16:55:00,817 [MainThread ] [INFO ] Resultant set of frameworks (removing skipped frameworks): terraform 2022-02-08 16:55:00,817 [MainThread ] [DEBUG] terraform_runner declares no system dependency checks required. 2022-02-08 16:55:00,817 [MainThread ] [DEBUG] No API key found. Scanning locally only. 2022-02-08 16:55:01,377 [MainThread ] [DEBUG] Got checkov mappings from Bridgecrew BE 2022-02-08 16:55:01,378 [MainThread ] [INFO ] Scanning root folder and producing fresh tf_definitions and context 2022-02-08 16:55:01,378 [MainThread ] [INFO ] Parsing HCL files in source dir 2022-02-08 16:55:01,379 [MainThread ] [DEBUG] Parsing /home/foobar/git/production/test1/job.tf 2022-02-08 16:55:01,383 [MainThread ] [DEBUG] Module load loop 0 2022-02-08 16:55:01,384 [MainThread ] [INFO ] Building graph from parsed module 2022-02-08 16:55:01,384 [MainThread ] [INFO ] Creating vertices 2022-02-08 16:55:01,384 [MainThread ] [INFO ] Creating edges 2022-02-08 16:55:01,385 [MainThread ] [INFO ] Rendering variables, graph has 1 vertices and 0 edges 2022-02-08 16:55:01,385 [MainThread ] [INFO ] done evaluating edges 2022-02-08 16:55:01,385 [MainThread ] [INFO ] done evaluate_non_rendered_values 2022-02-08 16:55:01,386 [MainThread ] [DEBUG] created context for aws_batch_job_definition foobar 2022-02-08 16:55:01,386 [MainThread ] [DEBUG] Created definitions context 2022-02-08 16:55:01,386 [MainThread ] [DEBUG] Scanning file: /job.tf 2022-02-08 16:55:01,386 [MainThread ] [DEBUG] Running check: Batch job does not define a privileged container on file /job.tf 2022-02-08 16:55:01,386 [MainThread ] [ERROR] Failed to run check: Batch job does not define a privileged container for configuration: {'container_properties': ['file("path.module/job_definition.json")'], 'name': ['foobar'], 'type': ['container']} at file: /job.tf Traceback (most recent call last): File "/home/foobar/.local/bin/checkov", line 9, in <module> sys.exit(run()) File "/home/foobar/.local/lib/python3.10/site-packages/checkov/main.py", line 223, in run scan_reports = runner_registry.run(root_folder=root_folder, external_checks_dir=external_checks_dir, File "/home/foobar/.local/lib/python3.10/site-packages/checkov/common/runners/runner_registry.py", line 59, in run reports = [self.runners[0].run(root_folder, external_checks_dir=external_checks_dir, files=files, File "/home/foobar/.local/lib/python3.10/site-packages/checkov/terraform/runner.py", line 119, in run self.check_tf_definition(report, root_folder, runner_filter, collect_skip_comments) File "/home/foobar/.local/lib/python3.10/site-packages/checkov/terraform/runner.py", line 212, in check_tf_definition self.run_all_blocks(definition, self.context, full_file_path, root_folder, report, File "/home/foobar/.local/lib/python3.10/site-packages/checkov/terraform/runner.py", line 222, in run_all_blocks self.run_block(definition[block_type], definitions_context, File "/home/foobar/.local/lib/python3.10/site-packages/checkov/terraform/runner.py", line 294, in run_block results = registry.scan(scanned_file, entity, skipped_checks, runner_filter) File "/home/foobar/.local/lib/python3.10/site-packages/checkov/common/checks/base_check_registry.py", line 121, in scan result = self.run_check(check, entity_configuration, entity_name, entity_type, scanned_file, skip_info) File "/home/foobar/.local/lib/python3.10/site-packages/checkov/common/checks/base_check_registry.py", line 135, in run_check result = check.run( File "/home/foobar/.local/lib/python3.10/site-packages/checkov/common/checks/base_check.py", line 81, in run raise e File "/home/foobar/.local/lib/python3.10/site-packages/checkov/common/checks/base_check.py", line 68, in run check_result["result"] = self.scan_entity_conf(entity_configuration, entity_type) File "/home/foobar/.local/lib/python3.10/site-packages/checkov/terraform/checks/resource/base_resource_check.py", line 54, in scan_entity_conf return self.scan_resource_conf(conf) File "/home/foobar/.local/lib/python3.10/site-packages/checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py", line 17, in scan_resource_conf container = json.loads(conf.get("container_properties")[0]) File "/usr/lib64/python3.10/json/__init__.py", line 346, in loads return _default_decoder.decode(s) File "/usr/lib64/python3.10/json/decoder.py", line 337, in decode obj, end = self.raw_decode(s, idx=_w(s, 0).end()) File "/usr/lib64/python3.10/json/decoder.py", line 355, in raw_decode raise JSONDecodeError("Expecting value", s, err.value) from None json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0) ``` **Desktop (please complete the following information):** - OS: Fedora 35 - Checkov Version: 2.0.809 **Additional context** This issue appears from https://github.com/bridgecrewio/checkov/pull/2356 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py` Content: ``` 1 import json 2 from checkov.common.models.enums import CheckResult, CheckCategories 3 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck 4 5 6 class BatchJobIsNotPrivileged(BaseResourceCheck): 7 def __init__(self): 8 name = "Batch job does not define a privileged container" 9 id = "CKV_AWS_210" 10 supported_resources = ['aws_batch_job_definition'] 11 categories = [CheckCategories.GENERAL_SECURITY] 12 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) 13 14 def scan_resource_conf(self, conf): 15 if conf.get("container_properties"): 16 if type(conf.get("container_properties")[0]) is str: 17 container = json.loads(conf.get("container_properties")[0]) 18 else: 19 container = conf.get("container_properties")[0] 20 if container.get("privileged"): 21 return CheckResult.FAILED 22 return CheckResult.PASSED 23 return CheckResult.UNKNOWN 24 25 26 check = BatchJobIsNotPrivileged() 27 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py b/checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py --- a/checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py +++ b/checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py @@ -14,7 +14,10 @@ def scan_resource_conf(self, conf): if conf.get("container_properties"): if type(conf.get("container_properties")[0]) is str: - container = json.loads(conf.get("container_properties")[0]) + try: + container = json.loads(conf.get("container_properties")[0]) + except json.JSONDecodeError as e: + return CheckResult.UNKNOWN else: container = conf.get("container_properties")[0] if container.get("privileged"):
{"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py b/checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py\n--- a/checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py\n+++ b/checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py\n@@ -14,7 +14,10 @@\n def scan_resource_conf(self, conf):\n if conf.get(\"container_properties\"):\n if type(conf.get(\"container_properties\")[0]) is str:\n- container = json.loads(conf.get(\"container_properties\")[0])\n+ try:\n+ container = json.loads(conf.get(\"container_properties\")[0])\n+ except json.JSONDecodeError as e:\n+ return CheckResult.UNKNOWN\n else:\n container = conf.get(\"container_properties\")[0]\n if container.get(\"privileged\"):\n", "issue": "crash introduce by https://github.com/bridgecrewio/checkov/pull/2356\n**Describe the issue**\r\n\r\n```\r\ncheckov -d test1/\r\n```\r\n\r\n**Examples**\r\ntest1/job.tf\r\n```hcl\r\nresource \"aws_batch_job_definition\" \"foobar\" {\r\n name = \"foobar\"\r\n type = \"container\"\r\n container_properties = file(\"${path.module}/job_definition.json\")\r\n}\r\n```\r\n\r\n**Exception Trace**\r\nPlease share the trace for the exception and all relevant output by checkov.\r\nTo maximize the understanding, please run checkov with LOG_LEVEL set to debug\r\nas follows:\r\n```sh\r\n2022-02-08 16:55:00,816 [MainThread ] [DEBUG] Checkov version: 2.0.809\r\n2022-02-08 16:55:00,816 [MainThread ] [DEBUG] Python executable: /usr/bin/python3\r\n2022-02-08 16:55:00,816 [MainThread ] [DEBUG] Python version: 3.10.2 (main, Jan 17 2022, 00:00:00) [GCC 11.2.1 20211203 (Red Hat 11.2.1-7)]\r\n2022-02-08 16:55:00,816 [MainThread ] [DEBUG] Checkov executable (argv[0]): /home/foobar/.local/bin/checkov\r\n2022-02-08 16:55:00,816 [MainThread ] [DEBUG] Command Line Args: -d test1/\r\nConfig File (/home/foobar/git/production/.checkov.yaml):\r\n framework: terraform\r\n baseline: .checkov.baseline\r\n evaluate-variables:True\r\n compact: True\r\n quiet: True\r\nDefaults:\r\n --branch: master\r\n --download-external-modules:False\r\n --external-modules-download-path:.external_modules\r\n --min-cve-severity:none\r\n\r\n2022-02-08 16:55:00,817 [MainThread ] [INFO ] Resultant set of frameworks (removing skipped frameworks): terraform\r\n2022-02-08 16:55:00,817 [MainThread ] [DEBUG] terraform_runner declares no system dependency checks required.\r\n2022-02-08 16:55:00,817 [MainThread ] [DEBUG] No API key found. Scanning locally only.\r\n2022-02-08 16:55:01,377 [MainThread ] [DEBUG] Got checkov mappings from Bridgecrew BE\r\n2022-02-08 16:55:01,378 [MainThread ] [INFO ] Scanning root folder and producing fresh tf_definitions and context\r\n2022-02-08 16:55:01,378 [MainThread ] [INFO ] Parsing HCL files in source dir\r\n2022-02-08 16:55:01,379 [MainThread ] [DEBUG] Parsing /home/foobar/git/production/test1/job.tf\r\n2022-02-08 16:55:01,383 [MainThread ] [DEBUG] Module load loop 0\r\n2022-02-08 16:55:01,384 [MainThread ] [INFO ] Building graph from parsed module\r\n2022-02-08 16:55:01,384 [MainThread ] [INFO ] Creating vertices\r\n2022-02-08 16:55:01,384 [MainThread ] [INFO ] Creating edges\r\n2022-02-08 16:55:01,385 [MainThread ] [INFO ] Rendering variables, graph has 1 vertices and 0 edges\r\n2022-02-08 16:55:01,385 [MainThread ] [INFO ] done evaluating edges\r\n2022-02-08 16:55:01,385 [MainThread ] [INFO ] done evaluate_non_rendered_values\r\n2022-02-08 16:55:01,386 [MainThread ] [DEBUG] created context for aws_batch_job_definition foobar\r\n2022-02-08 16:55:01,386 [MainThread ] [DEBUG] Created definitions context\r\n2022-02-08 16:55:01,386 [MainThread ] [DEBUG] Scanning file: /job.tf\r\n2022-02-08 16:55:01,386 [MainThread ] [DEBUG] Running check: Batch job does not define a privileged container on file /job.tf\r\n2022-02-08 16:55:01,386 [MainThread ] [ERROR] Failed to run check: Batch job does not define a privileged container for configuration: {'container_properties': ['file(\"path.module/job_definition.json\")'], 'name': ['foobar'], 'type': ['container']} at file: /job.tf\r\nTraceback (most recent call last):\r\n File \"/home/foobar/.local/bin/checkov\", line 9, in <module>\r\n sys.exit(run())\r\n File \"/home/foobar/.local/lib/python3.10/site-packages/checkov/main.py\", line 223, in run\r\n scan_reports = runner_registry.run(root_folder=root_folder, external_checks_dir=external_checks_dir,\r\n File \"/home/foobar/.local/lib/python3.10/site-packages/checkov/common/runners/runner_registry.py\", line 59, in run\r\n reports = [self.runners[0].run(root_folder, external_checks_dir=external_checks_dir, files=files,\r\n File \"/home/foobar/.local/lib/python3.10/site-packages/checkov/terraform/runner.py\", line 119, in run\r\n self.check_tf_definition(report, root_folder, runner_filter, collect_skip_comments)\r\n File \"/home/foobar/.local/lib/python3.10/site-packages/checkov/terraform/runner.py\", line 212, in check_tf_definition\r\n self.run_all_blocks(definition, self.context, full_file_path, root_folder, report,\r\n File \"/home/foobar/.local/lib/python3.10/site-packages/checkov/terraform/runner.py\", line 222, in run_all_blocks\r\n self.run_block(definition[block_type], definitions_context,\r\n File \"/home/foobar/.local/lib/python3.10/site-packages/checkov/terraform/runner.py\", line 294, in run_block\r\n results = registry.scan(scanned_file, entity, skipped_checks, runner_filter)\r\n File \"/home/foobar/.local/lib/python3.10/site-packages/checkov/common/checks/base_check_registry.py\", line 121, in scan\r\n result = self.run_check(check, entity_configuration, entity_name, entity_type, scanned_file, skip_info)\r\n File \"/home/foobar/.local/lib/python3.10/site-packages/checkov/common/checks/base_check_registry.py\", line 135, in run_check\r\n result = check.run(\r\n File \"/home/foobar/.local/lib/python3.10/site-packages/checkov/common/checks/base_check.py\", line 81, in run\r\n raise e\r\n File \"/home/foobar/.local/lib/python3.10/site-packages/checkov/common/checks/base_check.py\", line 68, in run\r\n check_result[\"result\"] = self.scan_entity_conf(entity_configuration, entity_type)\r\n File \"/home/foobar/.local/lib/python3.10/site-packages/checkov/terraform/checks/resource/base_resource_check.py\", line 54, in scan_entity_conf\r\n return self.scan_resource_conf(conf)\r\n File \"/home/foobar/.local/lib/python3.10/site-packages/checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py\", line 17, in scan_resource_conf\r\n container = json.loads(conf.get(\"container_properties\")[0])\r\n File \"/usr/lib64/python3.10/json/__init__.py\", line 346, in loads\r\n return _default_decoder.decode(s)\r\n File \"/usr/lib64/python3.10/json/decoder.py\", line 337, in decode\r\n obj, end = self.raw_decode(s, idx=_w(s, 0).end())\r\n File \"/usr/lib64/python3.10/json/decoder.py\", line 355, in raw_decode\r\n raise JSONDecodeError(\"Expecting value\", s, err.value) from None\r\njson.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0)\r\n```\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: Fedora 35\r\n - Checkov Version: 2.0.809\r\n\r\n**Additional context**\r\nThis issue appears from https://github.com/bridgecrewio/checkov/pull/2356\n", "before_files": [{"content": "import json\nfrom checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass BatchJobIsNotPrivileged(BaseResourceCheck):\n def __init__(self):\n name = \"Batch job does not define a privileged container\"\n id = \"CKV_AWS_210\"\n supported_resources = ['aws_batch_job_definition']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n if conf.get(\"container_properties\"):\n if type(conf.get(\"container_properties\")[0]) is str:\n container = json.loads(conf.get(\"container_properties\")[0])\n else:\n container = conf.get(\"container_properties\")[0]\n if container.get(\"privileged\"):\n return CheckResult.FAILED\n return CheckResult.PASSED\n return CheckResult.UNKNOWN\n\n\ncheck = BatchJobIsNotPrivileged()\n", "path": "checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py"}], "after_files": [{"content": "import json\nfrom checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass BatchJobIsNotPrivileged(BaseResourceCheck):\n def __init__(self):\n name = \"Batch job does not define a privileged container\"\n id = \"CKV_AWS_210\"\n supported_resources = ['aws_batch_job_definition']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n if conf.get(\"container_properties\"):\n if type(conf.get(\"container_properties\")[0]) is str:\n try:\n container = json.loads(conf.get(\"container_properties\")[0])\n except json.JSONDecodeError as e:\n return CheckResult.UNKNOWN\n else:\n container = conf.get(\"container_properties\")[0]\n if container.get(\"privileged\"):\n return CheckResult.FAILED\n return CheckResult.PASSED\n return CheckResult.UNKNOWN\n\n\ncheck = BatchJobIsNotPrivileged()\n", "path": "checkov/terraform/checks/resource/aws/BatchJobIsNotPrivileged.py"}]}
2,659
190
gh_patches_debug_33454
rasdani/github-patches
git_diff
bookwyrm-social__bookwyrm-1690
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- button to add group lists from group page **Is your feature request related to a problem? Please describe.** At the moment if you want to add a group-curated list, you have to do it from the Lists page, rather than the home page of the Group. This makes sense if you know how the backend functionality works but is not very intuitive. **Describe the solution you'd like** Add a `Create list` button (like the one in the lists view) to the Group page. **Describe alternatives you've considered** Leaving the current, obscure, functionality. **Additional context** I intended to add this once the initial group functionality was completed, but I forgot about it. This Issue is to remind me to put in a PR to add this. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bookwyrm/views/group.py` Content: ``` 1 """group views""" 2 from django.apps import apps 3 from django.contrib.auth.decorators import login_required 4 from django.db import IntegrityError 5 from django.core.paginator import Paginator 6 from django.http import HttpResponseBadRequest 7 from django.shortcuts import get_object_or_404, redirect 8 from django.template.response import TemplateResponse 9 from django.utils.decorators import method_decorator 10 from django.views import View 11 from django.views.decorators.http import require_POST 12 from django.contrib.postgres.search import TrigramSimilarity 13 from django.db.models.functions import Greatest 14 15 from bookwyrm import forms, models 16 from bookwyrm.suggested_users import suggested_users 17 from .helpers import get_user_from_username 18 19 # pylint: disable=no-self-use 20 class Group(View): 21 """group page""" 22 23 def get(self, request, group_id): 24 """display a group""" 25 26 group = get_object_or_404(models.Group, id=group_id) 27 group.raise_visible_to_user(request.user) 28 lists = ( 29 models.List.privacy_filter(request.user) 30 .filter(group=group) 31 .order_by("-updated_date") 32 ) 33 34 data = { 35 "group": group, 36 "lists": lists, 37 "group_form": forms.GroupForm(instance=group), 38 "path": "/group", 39 } 40 return TemplateResponse(request, "groups/group.html", data) 41 42 @method_decorator(login_required, name="dispatch") 43 def post(self, request, group_id): 44 """edit a group""" 45 user_group = get_object_or_404(models.Group, id=group_id) 46 form = forms.GroupForm(request.POST, instance=user_group) 47 if not form.is_valid(): 48 return redirect("group", user_group.id) 49 user_group = form.save() 50 51 # let the other members know something about the group changed 52 memberships = models.GroupMember.objects.filter(group=user_group) 53 model = apps.get_model("bookwyrm.Notification", require_ready=True) 54 for field in form.changed_data: 55 notification_type = ( 56 "GROUP_PRIVACY" 57 if field == "privacy" 58 else "GROUP_NAME" 59 if field == "name" 60 else "GROUP_DESCRIPTION" 61 if field == "description" 62 else None 63 ) 64 if notification_type: 65 for membership in memberships: 66 member = membership.user 67 if member != request.user: 68 model.objects.create( 69 user=member, 70 related_user=request.user, 71 related_group=user_group, 72 notification_type=notification_type, 73 ) 74 75 return redirect("group", user_group.id) 76 77 78 @method_decorator(login_required, name="dispatch") 79 class UserGroups(View): 80 """a user's groups page""" 81 82 def get(self, request, username): 83 """display a group""" 84 user = get_user_from_username(request.user, username) 85 groups = ( 86 models.Group.privacy_filter(request.user) 87 .filter(memberships__user=user) 88 .order_by("-updated_date") 89 ) 90 paginated = Paginator(groups, 12) 91 92 data = { 93 "groups": paginated.get_page(request.GET.get("page")), 94 "is_self": request.user.id == user.id, 95 "user": user, 96 "group_form": forms.GroupForm(), 97 "path": user.local_path + "/group", 98 } 99 return TemplateResponse(request, "user/groups.html", data) 100 101 @method_decorator(login_required, name="dispatch") 102 # pylint: disable=unused-argument 103 def post(self, request, username): 104 """create a user group""" 105 form = forms.GroupForm(request.POST) 106 if not form.is_valid(): 107 return redirect(request.user.local_path + "/groups") 108 group = form.save() 109 # add the creator as a group member 110 models.GroupMember.objects.create(group=group, user=request.user) 111 return redirect("group", group.id) 112 113 114 @method_decorator(login_required, name="dispatch") 115 class FindUsers(View): 116 """find friends to add to your group""" 117 118 # this is mostly borrowed from the Get Started friend finder 119 120 def get(self, request, group_id): 121 """basic profile info""" 122 user_query = request.GET.get("user_query") 123 group = get_object_or_404(models.Group, id=group_id) 124 125 if not group: 126 return HttpResponseBadRequest() 127 128 if not group.user == request.user: 129 return HttpResponseBadRequest() 130 131 user_results = ( 132 models.User.viewer_aware_objects(request.user) 133 .exclude( 134 memberships__in=group.memberships.all() 135 ) # don't suggest users who are already members 136 .annotate( 137 similarity=Greatest( 138 TrigramSimilarity("username", user_query), 139 TrigramSimilarity("localname", user_query), 140 ) 141 ) 142 .filter(similarity__gt=0.5, local=True) 143 .order_by("-similarity")[:5] 144 ) 145 data = {"no_results": not user_results} 146 147 if user_results.count() < 5: 148 user_results = list(user_results) + suggested_users.get_suggestions( 149 request.user, local=True 150 ) 151 152 data = { 153 "suggested_users": user_results, 154 "group": group, 155 "group_form": forms.GroupForm(instance=group), 156 "user_query": user_query, 157 "requestor_is_manager": request.user == group.user, 158 } 159 return TemplateResponse(request, "groups/find_users.html", data) 160 161 162 @require_POST 163 @login_required 164 def delete_group(request, group_id): 165 """delete a group""" 166 group = get_object_or_404(models.Group, id=group_id) 167 168 # only the owner can delete a group 169 group.raise_not_deletable(request.user) 170 171 # deal with any group lists 172 models.List.objects.filter(group=group).update(curation="closed", group=None) 173 174 group.delete() 175 return redirect(request.user.local_path + "/groups") 176 177 178 @require_POST 179 @login_required 180 def invite_member(request): 181 """invite a member to the group""" 182 group = get_object_or_404(models.Group, id=request.POST.get("group")) 183 user = get_user_from_username(request.user, request.POST["user"]) 184 185 if not group.user == request.user: 186 return HttpResponseBadRequest() 187 188 try: 189 models.GroupMemberInvitation.objects.create(user=user, group=group) 190 except IntegrityError: 191 pass 192 193 return redirect(user.local_path) 194 195 196 @require_POST 197 @login_required 198 def remove_member(request): 199 """remove a member from the group""" 200 group = get_object_or_404(models.Group, id=request.POST.get("group")) 201 user = get_user_from_username(request.user, request.POST["user"]) 202 203 # you can't be removed from your own group 204 if user == group.user: 205 return HttpResponseBadRequest() 206 207 is_member = models.GroupMember.objects.filter(group=group, user=user).exists() 208 is_invited = models.GroupMemberInvitation.objects.filter( 209 group=group, user=user 210 ).exists() 211 212 if is_invited: 213 try: 214 invitation = models.GroupMemberInvitation.objects.get( 215 user=user, group=group 216 ) 217 218 invitation.reject() 219 220 except IntegrityError: 221 pass 222 223 if is_member: 224 try: 225 models.List.remove_from_group(group.user, user) 226 models.GroupMember.remove(group.user, user) 227 except IntegrityError: 228 pass 229 230 memberships = models.GroupMember.objects.filter(group=group) 231 model = apps.get_model("bookwyrm.Notification", require_ready=True) 232 notification_type = "LEAVE" if user == request.user else "REMOVE" 233 # let the other members know about it 234 for membership in memberships: 235 member = membership.user 236 if member != request.user: 237 model.objects.create( 238 user=member, 239 related_user=user, 240 related_group=group, 241 notification_type=notification_type, 242 ) 243 244 # let the user (now ex-member) know as well, if they were removed 245 if notification_type == "REMOVE": 246 model.objects.create( 247 user=user, 248 related_group=group, 249 notification_type=notification_type, 250 ) 251 252 return redirect(group.local_path) 253 254 255 @require_POST 256 @login_required 257 def accept_membership(request): 258 """accept an invitation to join a group""" 259 group = get_object_or_404(models.Group, id=request.POST.get("group")) 260 invite = get_object_or_404( 261 models.GroupMemberInvitation, group=group, user=request.user 262 ) 263 264 try: 265 invite.accept() 266 except IntegrityError: 267 pass 268 269 return redirect(group.local_path) 270 271 272 @require_POST 273 @login_required 274 def reject_membership(request): 275 """reject an invitation to join a group""" 276 group = get_object_or_404(models.Group, id=request.POST.get("group")) 277 invite = get_object_or_404( 278 models.GroupMemberInvitation, group=group, user=request.user 279 ) 280 281 invite.reject() 282 return redirect(request.user.local_path) 283 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bookwyrm/views/group.py b/bookwyrm/views/group.py --- a/bookwyrm/views/group.py +++ b/bookwyrm/views/group.py @@ -34,7 +34,8 @@ data = { "group": group, "lists": lists, - "group_form": forms.GroupForm(instance=group), + "group_form": forms.GroupForm(instance=group, auto_id="group_form_id_%s"), + "list_form": forms.ListForm(), "path": "/group", } return TemplateResponse(request, "groups/group.html", data) @@ -121,6 +122,11 @@ """basic profile info""" user_query = request.GET.get("user_query") group = get_object_or_404(models.Group, id=group_id) + lists = ( + models.List.privacy_filter(request.user) + .filter(group=group) + .order_by("-updated_date") + ) if not group: return HttpResponseBadRequest() @@ -142,7 +148,7 @@ .filter(similarity__gt=0.5, local=True) .order_by("-similarity")[:5] ) - data = {"no_results": not user_results} + no_results = not user_results if user_results.count() < 5: user_results = list(user_results) + suggested_users.get_suggestions( @@ -151,8 +157,11 @@ data = { "suggested_users": user_results, + "no_results": no_results, "group": group, - "group_form": forms.GroupForm(instance=group), + "lists": lists, + "group_form": forms.GroupForm(instance=group, auto_id="group_form_id_%s"), + "list_form": forms.ListForm(), "user_query": user_query, "requestor_is_manager": request.user == group.user, }
{"golden_diff": "diff --git a/bookwyrm/views/group.py b/bookwyrm/views/group.py\n--- a/bookwyrm/views/group.py\n+++ b/bookwyrm/views/group.py\n@@ -34,7 +34,8 @@\n data = {\n \"group\": group,\n \"lists\": lists,\n- \"group_form\": forms.GroupForm(instance=group),\n+ \"group_form\": forms.GroupForm(instance=group, auto_id=\"group_form_id_%s\"),\n+ \"list_form\": forms.ListForm(),\n \"path\": \"/group\",\n }\n return TemplateResponse(request, \"groups/group.html\", data)\n@@ -121,6 +122,11 @@\n \"\"\"basic profile info\"\"\"\n user_query = request.GET.get(\"user_query\")\n group = get_object_or_404(models.Group, id=group_id)\n+ lists = (\n+ models.List.privacy_filter(request.user)\n+ .filter(group=group)\n+ .order_by(\"-updated_date\")\n+ )\n \n if not group:\n return HttpResponseBadRequest()\n@@ -142,7 +148,7 @@\n .filter(similarity__gt=0.5, local=True)\n .order_by(\"-similarity\")[:5]\n )\n- data = {\"no_results\": not user_results}\n+ no_results = not user_results\n \n if user_results.count() < 5:\n user_results = list(user_results) + suggested_users.get_suggestions(\n@@ -151,8 +157,11 @@\n \n data = {\n \"suggested_users\": user_results,\n+ \"no_results\": no_results,\n \"group\": group,\n- \"group_form\": forms.GroupForm(instance=group),\n+ \"lists\": lists,\n+ \"group_form\": forms.GroupForm(instance=group, auto_id=\"group_form_id_%s\"),\n+ \"list_form\": forms.ListForm(),\n \"user_query\": user_query,\n \"requestor_is_manager\": request.user == group.user,\n }\n", "issue": "button to add group lists from group page\n**Is your feature request related to a problem? Please describe.**\r\nAt the moment if you want to add a group-curated list, you have to do it from the Lists page, rather than the home page of the Group. This makes sense if you know how the backend functionality works but is not very intuitive.\r\n\r\n**Describe the solution you'd like**\r\n Add a `Create list` button (like the one in the lists view) to the Group page.\r\n\r\n**Describe alternatives you've considered**\r\nLeaving the current, obscure, functionality.\r\n\r\n**Additional context**\r\nI intended to add this once the initial group functionality was completed, but I forgot about it.\r\nThis Issue is to remind me to put in a PR to add this.\r\n\n", "before_files": [{"content": "\"\"\"group views\"\"\"\nfrom django.apps import apps\nfrom django.contrib.auth.decorators import login_required\nfrom django.db import IntegrityError\nfrom django.core.paginator import Paginator\nfrom django.http import HttpResponseBadRequest\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\nfrom django.views.decorators.http import require_POST\nfrom django.contrib.postgres.search import TrigramSimilarity\nfrom django.db.models.functions import Greatest\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.suggested_users import suggested_users\nfrom .helpers import get_user_from_username\n\n# pylint: disable=no-self-use\nclass Group(View):\n \"\"\"group page\"\"\"\n\n def get(self, request, group_id):\n \"\"\"display a group\"\"\"\n\n group = get_object_or_404(models.Group, id=group_id)\n group.raise_visible_to_user(request.user)\n lists = (\n models.List.privacy_filter(request.user)\n .filter(group=group)\n .order_by(\"-updated_date\")\n )\n\n data = {\n \"group\": group,\n \"lists\": lists,\n \"group_form\": forms.GroupForm(instance=group),\n \"path\": \"/group\",\n }\n return TemplateResponse(request, \"groups/group.html\", data)\n\n @method_decorator(login_required, name=\"dispatch\")\n def post(self, request, group_id):\n \"\"\"edit a group\"\"\"\n user_group = get_object_or_404(models.Group, id=group_id)\n form = forms.GroupForm(request.POST, instance=user_group)\n if not form.is_valid():\n return redirect(\"group\", user_group.id)\n user_group = form.save()\n\n # let the other members know something about the group changed\n memberships = models.GroupMember.objects.filter(group=user_group)\n model = apps.get_model(\"bookwyrm.Notification\", require_ready=True)\n for field in form.changed_data:\n notification_type = (\n \"GROUP_PRIVACY\"\n if field == \"privacy\"\n else \"GROUP_NAME\"\n if field == \"name\"\n else \"GROUP_DESCRIPTION\"\n if field == \"description\"\n else None\n )\n if notification_type:\n for membership in memberships:\n member = membership.user\n if member != request.user:\n model.objects.create(\n user=member,\n related_user=request.user,\n related_group=user_group,\n notification_type=notification_type,\n )\n\n return redirect(\"group\", user_group.id)\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass UserGroups(View):\n \"\"\"a user's groups page\"\"\"\n\n def get(self, request, username):\n \"\"\"display a group\"\"\"\n user = get_user_from_username(request.user, username)\n groups = (\n models.Group.privacy_filter(request.user)\n .filter(memberships__user=user)\n .order_by(\"-updated_date\")\n )\n paginated = Paginator(groups, 12)\n\n data = {\n \"groups\": paginated.get_page(request.GET.get(\"page\")),\n \"is_self\": request.user.id == user.id,\n \"user\": user,\n \"group_form\": forms.GroupForm(),\n \"path\": user.local_path + \"/group\",\n }\n return TemplateResponse(request, \"user/groups.html\", data)\n\n @method_decorator(login_required, name=\"dispatch\")\n # pylint: disable=unused-argument\n def post(self, request, username):\n \"\"\"create a user group\"\"\"\n form = forms.GroupForm(request.POST)\n if not form.is_valid():\n return redirect(request.user.local_path + \"/groups\")\n group = form.save()\n # add the creator as a group member\n models.GroupMember.objects.create(group=group, user=request.user)\n return redirect(\"group\", group.id)\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass FindUsers(View):\n \"\"\"find friends to add to your group\"\"\"\n\n # this is mostly borrowed from the Get Started friend finder\n\n def get(self, request, group_id):\n \"\"\"basic profile info\"\"\"\n user_query = request.GET.get(\"user_query\")\n group = get_object_or_404(models.Group, id=group_id)\n\n if not group:\n return HttpResponseBadRequest()\n\n if not group.user == request.user:\n return HttpResponseBadRequest()\n\n user_results = (\n models.User.viewer_aware_objects(request.user)\n .exclude(\n memberships__in=group.memberships.all()\n ) # don't suggest users who are already members\n .annotate(\n similarity=Greatest(\n TrigramSimilarity(\"username\", user_query),\n TrigramSimilarity(\"localname\", user_query),\n )\n )\n .filter(similarity__gt=0.5, local=True)\n .order_by(\"-similarity\")[:5]\n )\n data = {\"no_results\": not user_results}\n\n if user_results.count() < 5:\n user_results = list(user_results) + suggested_users.get_suggestions(\n request.user, local=True\n )\n\n data = {\n \"suggested_users\": user_results,\n \"group\": group,\n \"group_form\": forms.GroupForm(instance=group),\n \"user_query\": user_query,\n \"requestor_is_manager\": request.user == group.user,\n }\n return TemplateResponse(request, \"groups/find_users.html\", data)\n\n\n@require_POST\n@login_required\ndef delete_group(request, group_id):\n \"\"\"delete a group\"\"\"\n group = get_object_or_404(models.Group, id=group_id)\n\n # only the owner can delete a group\n group.raise_not_deletable(request.user)\n\n # deal with any group lists\n models.List.objects.filter(group=group).update(curation=\"closed\", group=None)\n\n group.delete()\n return redirect(request.user.local_path + \"/groups\")\n\n\n@require_POST\n@login_required\ndef invite_member(request):\n \"\"\"invite a member to the group\"\"\"\n group = get_object_or_404(models.Group, id=request.POST.get(\"group\"))\n user = get_user_from_username(request.user, request.POST[\"user\"])\n\n if not group.user == request.user:\n return HttpResponseBadRequest()\n\n try:\n models.GroupMemberInvitation.objects.create(user=user, group=group)\n except IntegrityError:\n pass\n\n return redirect(user.local_path)\n\n\n@require_POST\n@login_required\ndef remove_member(request):\n \"\"\"remove a member from the group\"\"\"\n group = get_object_or_404(models.Group, id=request.POST.get(\"group\"))\n user = get_user_from_username(request.user, request.POST[\"user\"])\n\n # you can't be removed from your own group\n if user == group.user:\n return HttpResponseBadRequest()\n\n is_member = models.GroupMember.objects.filter(group=group, user=user).exists()\n is_invited = models.GroupMemberInvitation.objects.filter(\n group=group, user=user\n ).exists()\n\n if is_invited:\n try:\n invitation = models.GroupMemberInvitation.objects.get(\n user=user, group=group\n )\n\n invitation.reject()\n\n except IntegrityError:\n pass\n\n if is_member:\n try:\n models.List.remove_from_group(group.user, user)\n models.GroupMember.remove(group.user, user)\n except IntegrityError:\n pass\n\n memberships = models.GroupMember.objects.filter(group=group)\n model = apps.get_model(\"bookwyrm.Notification\", require_ready=True)\n notification_type = \"LEAVE\" if user == request.user else \"REMOVE\"\n # let the other members know about it\n for membership in memberships:\n member = membership.user\n if member != request.user:\n model.objects.create(\n user=member,\n related_user=user,\n related_group=group,\n notification_type=notification_type,\n )\n\n # let the user (now ex-member) know as well, if they were removed\n if notification_type == \"REMOVE\":\n model.objects.create(\n user=user,\n related_group=group,\n notification_type=notification_type,\n )\n\n return redirect(group.local_path)\n\n\n@require_POST\n@login_required\ndef accept_membership(request):\n \"\"\"accept an invitation to join a group\"\"\"\n group = get_object_or_404(models.Group, id=request.POST.get(\"group\"))\n invite = get_object_or_404(\n models.GroupMemberInvitation, group=group, user=request.user\n )\n\n try:\n invite.accept()\n except IntegrityError:\n pass\n\n return redirect(group.local_path)\n\n\n@require_POST\n@login_required\ndef reject_membership(request):\n \"\"\"reject an invitation to join a group\"\"\"\n group = get_object_or_404(models.Group, id=request.POST.get(\"group\"))\n invite = get_object_or_404(\n models.GroupMemberInvitation, group=group, user=request.user\n )\n\n invite.reject()\n return redirect(request.user.local_path)\n", "path": "bookwyrm/views/group.py"}], "after_files": [{"content": "\"\"\"group views\"\"\"\nfrom django.apps import apps\nfrom django.contrib.auth.decorators import login_required\nfrom django.db import IntegrityError\nfrom django.core.paginator import Paginator\nfrom django.http import HttpResponseBadRequest\nfrom django.shortcuts import get_object_or_404, redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\nfrom django.views.decorators.http import require_POST\nfrom django.contrib.postgres.search import TrigramSimilarity\nfrom django.db.models.functions import Greatest\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.suggested_users import suggested_users\nfrom .helpers import get_user_from_username\n\n# pylint: disable=no-self-use\nclass Group(View):\n \"\"\"group page\"\"\"\n\n def get(self, request, group_id):\n \"\"\"display a group\"\"\"\n\n group = get_object_or_404(models.Group, id=group_id)\n group.raise_visible_to_user(request.user)\n lists = (\n models.List.privacy_filter(request.user)\n .filter(group=group)\n .order_by(\"-updated_date\")\n )\n\n data = {\n \"group\": group,\n \"lists\": lists,\n \"group_form\": forms.GroupForm(instance=group, auto_id=\"group_form_id_%s\"),\n \"list_form\": forms.ListForm(),\n \"path\": \"/group\",\n }\n return TemplateResponse(request, \"groups/group.html\", data)\n\n @method_decorator(login_required, name=\"dispatch\")\n def post(self, request, group_id):\n \"\"\"edit a group\"\"\"\n user_group = get_object_or_404(models.Group, id=group_id)\n form = forms.GroupForm(request.POST, instance=user_group)\n if not form.is_valid():\n return redirect(\"group\", user_group.id)\n user_group = form.save()\n\n # let the other members know something about the group changed\n memberships = models.GroupMember.objects.filter(group=user_group)\n model = apps.get_model(\"bookwyrm.Notification\", require_ready=True)\n for field in form.changed_data:\n notification_type = (\n \"GROUP_PRIVACY\"\n if field == \"privacy\"\n else \"GROUP_NAME\"\n if field == \"name\"\n else \"GROUP_DESCRIPTION\"\n if field == \"description\"\n else None\n )\n if notification_type:\n for membership in memberships:\n member = membership.user\n if member != request.user:\n model.objects.create(\n user=member,\n related_user=request.user,\n related_group=user_group,\n notification_type=notification_type,\n )\n\n return redirect(\"group\", user_group.id)\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass UserGroups(View):\n \"\"\"a user's groups page\"\"\"\n\n def get(self, request, username):\n \"\"\"display a group\"\"\"\n user = get_user_from_username(request.user, username)\n groups = (\n models.Group.privacy_filter(request.user)\n .filter(memberships__user=user)\n .order_by(\"-updated_date\")\n )\n paginated = Paginator(groups, 12)\n\n data = {\n \"groups\": paginated.get_page(request.GET.get(\"page\")),\n \"is_self\": request.user.id == user.id,\n \"user\": user,\n \"group_form\": forms.GroupForm(),\n \"path\": user.local_path + \"/group\",\n }\n return TemplateResponse(request, \"user/groups.html\", data)\n\n @method_decorator(login_required, name=\"dispatch\")\n # pylint: disable=unused-argument\n def post(self, request, username):\n \"\"\"create a user group\"\"\"\n form = forms.GroupForm(request.POST)\n if not form.is_valid():\n return redirect(request.user.local_path + \"/groups\")\n group = form.save()\n # add the creator as a group member\n models.GroupMember.objects.create(group=group, user=request.user)\n return redirect(\"group\", group.id)\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass FindUsers(View):\n \"\"\"find friends to add to your group\"\"\"\n\n # this is mostly borrowed from the Get Started friend finder\n\n def get(self, request, group_id):\n \"\"\"basic profile info\"\"\"\n user_query = request.GET.get(\"user_query\")\n group = get_object_or_404(models.Group, id=group_id)\n lists = (\n models.List.privacy_filter(request.user)\n .filter(group=group)\n .order_by(\"-updated_date\")\n )\n\n if not group:\n return HttpResponseBadRequest()\n\n if not group.user == request.user:\n return HttpResponseBadRequest()\n\n user_results = (\n models.User.viewer_aware_objects(request.user)\n .exclude(\n memberships__in=group.memberships.all()\n ) # don't suggest users who are already members\n .annotate(\n similarity=Greatest(\n TrigramSimilarity(\"username\", user_query),\n TrigramSimilarity(\"localname\", user_query),\n )\n )\n .filter(similarity__gt=0.5, local=True)\n .order_by(\"-similarity\")[:5]\n )\n no_results = not user_results\n\n if user_results.count() < 5:\n user_results = list(user_results) + suggested_users.get_suggestions(\n request.user, local=True\n )\n\n data = {\n \"suggested_users\": user_results,\n \"no_results\": no_results,\n \"group\": group,\n \"lists\": lists,\n \"group_form\": forms.GroupForm(instance=group, auto_id=\"group_form_id_%s\"),\n \"list_form\": forms.ListForm(),\n \"user_query\": user_query,\n \"requestor_is_manager\": request.user == group.user,\n }\n return TemplateResponse(request, \"groups/find_users.html\", data)\n\n\n@require_POST\n@login_required\ndef delete_group(request, group_id):\n \"\"\"delete a group\"\"\"\n group = get_object_or_404(models.Group, id=group_id)\n\n # only the owner can delete a group\n group.raise_not_deletable(request.user)\n\n # deal with any group lists\n models.List.objects.filter(group=group).update(curation=\"closed\", group=None)\n\n group.delete()\n return redirect(request.user.local_path + \"/groups\")\n\n\n@require_POST\n@login_required\ndef invite_member(request):\n \"\"\"invite a member to the group\"\"\"\n group = get_object_or_404(models.Group, id=request.POST.get(\"group\"))\n user = get_user_from_username(request.user, request.POST[\"user\"])\n\n if not group.user == request.user:\n return HttpResponseBadRequest()\n\n try:\n models.GroupMemberInvitation.objects.create(user=user, group=group)\n except IntegrityError:\n pass\n\n return redirect(user.local_path)\n\n\n@require_POST\n@login_required\ndef remove_member(request):\n \"\"\"remove a member from the group\"\"\"\n group = get_object_or_404(models.Group, id=request.POST.get(\"group\"))\n user = get_user_from_username(request.user, request.POST[\"user\"])\n\n # you can't be removed from your own group\n if user == group.user:\n return HttpResponseBadRequest()\n\n is_member = models.GroupMember.objects.filter(group=group, user=user).exists()\n is_invited = models.GroupMemberInvitation.objects.filter(\n group=group, user=user\n ).exists()\n\n if is_invited:\n try:\n invitation = models.GroupMemberInvitation.objects.get(\n user=user, group=group\n )\n\n invitation.reject()\n\n except IntegrityError:\n pass\n\n if is_member:\n try:\n models.List.remove_from_group(group.user, user)\n models.GroupMember.remove(group.user, user)\n except IntegrityError:\n pass\n\n memberships = models.GroupMember.objects.filter(group=group)\n model = apps.get_model(\"bookwyrm.Notification\", require_ready=True)\n notification_type = \"LEAVE\" if user == request.user else \"REMOVE\"\n # let the other members know about it\n for membership in memberships:\n member = membership.user\n if member != request.user:\n model.objects.create(\n user=member,\n related_user=user,\n related_group=group,\n notification_type=notification_type,\n )\n\n # let the user (now ex-member) know as well, if they were removed\n if notification_type == \"REMOVE\":\n model.objects.create(\n user=user,\n related_group=group,\n notification_type=notification_type,\n )\n\n return redirect(group.local_path)\n\n\n@require_POST\n@login_required\ndef accept_membership(request):\n \"\"\"accept an invitation to join a group\"\"\"\n group = get_object_or_404(models.Group, id=request.POST.get(\"group\"))\n invite = get_object_or_404(\n models.GroupMemberInvitation, group=group, user=request.user\n )\n\n try:\n invite.accept()\n except IntegrityError:\n pass\n\n return redirect(group.local_path)\n\n\n@require_POST\n@login_required\ndef reject_membership(request):\n \"\"\"reject an invitation to join a group\"\"\"\n group = get_object_or_404(models.Group, id=request.POST.get(\"group\"))\n invite = get_object_or_404(\n models.GroupMemberInvitation, group=group, user=request.user\n )\n\n invite.reject()\n return redirect(request.user.local_path)\n", "path": "bookwyrm/views/group.py"}]}
3,071
434
gh_patches_debug_14220
rasdani/github-patches
git_diff
fossasia__open-event-server-5229
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Allow bank, cheque and onsite for payment_mode in orders schema **Describe the bug** Currently setting bank, cheque and onsite for payment_mode returns 422 error. **Expected behavior** Payment Mode should allow bank, cheque and onsite as options. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `app/api/schema/orders.py` Content: ``` 1 from flask import request 2 from marshmallow import post_dump, validates_schema, validate 3 from marshmallow_jsonapi import fields 4 from marshmallow_jsonapi.flask import Relationship 5 6 from app import db 7 from app.api.helpers.utilities import dasherize 8 from app.api.schema.base import SoftDeletionSchema 9 from utils.common import use_defaults 10 11 12 class OnSiteTicketSchema(SoftDeletionSchema): 13 class Meta: 14 type_ = 'on-site-ticket' 15 inflect = dasherize 16 17 id = fields.Str(load_only=True, required=True) 18 quantity = fields.Str(load_only=True, required=True) 19 20 21 @use_defaults() 22 class OrderSchema(SoftDeletionSchema): 23 class Meta: 24 type_ = 'order' 25 self_view = 'v1.order_detail' 26 self_view_kwargs = {'order_identifier': '<identifier>'} 27 inflect = dasherize 28 29 @post_dump 30 def generate_payment_url(self, data): 31 """ 32 generate payment url for an order 33 :param data: 34 :return: 35 """ 36 if 'POST' in request.method or ('GET' in request.method and 'regenerate' in request.args) and 'completed' != \ 37 data["status"]: 38 if data['payment_mode'] == 'stripe': 39 data['payment_url'] = 'stripe://payment' 40 return data 41 42 @validates_schema 43 def initial_values(self, data): 44 if data.get('payment_mode') is None and 'POST' in request.method: 45 data['payment_mode'] = 'free' 46 return data 47 48 id = fields.Str(dump_only=True) 49 identifier = fields.Str(dump_only=True) 50 amount = fields.Float(validate=lambda n: n > 0, allow_none=True) 51 address = fields.Str(allow_none=True) 52 city = fields.Str(allow_none=True) 53 state = fields.Str(db.String, allow_none=True) 54 country = fields.Str(allow_none=True) 55 zipcode = fields.Str(allow_none=True) 56 completed_at = fields.DateTime(dump_only=True) 57 created_at = fields.DateTime(dump_only=True) 58 transaction_id = fields.Str(dump_only=True) 59 payment_mode = fields.Str(default="free", 60 validate=validate.OneOf(choices=["free", "stripe", "paypal"]), allow_none=True) 61 paid_via = fields.Str(dump_only=True) 62 brand = fields.Str(dump_only=True) 63 exp_month = fields.Str(dump_only=True) 64 exp_year = fields.Str(dump_only=True) 65 last4 = fields.Str(dump_only=True) 66 status = fields.Str(validate=validate.OneOf(choices=["pending", "cancelled", "completed", "placed", "expired"])) 67 discount_code_id = fields.Str(allow_none=True) 68 payment_url = fields.Str(dump_only=True) 69 cancel_note = fields.Str(allow_none=True) 70 order_notes = fields.Str(allow_none=True) 71 tickets_pdf_url = fields.Url(dump_only=True) 72 73 # only used in the case of an on site attendee. 74 on_site_tickets = fields.List(cls_or_instance=fields.Nested(OnSiteTicketSchema), load_only=True, allow_none=True) 75 76 attendees = Relationship(attribute='ticket_holders', 77 self_view='v1.order_attendee', 78 self_view_kwargs={'order_identifier': '<identifier>'}, 79 related_view='v1.attendee_list', 80 related_view_kwargs={'order_identifier': '<identifier>'}, 81 schema='AttendeeSchemaPublic', 82 many=True, 83 type_='attendee') 84 85 tickets = Relationship(attribute='tickets', 86 self_view='v1.order_ticket', 87 self_view_kwargs={'order_identifier': '<identifier>'}, 88 related_view='v1.ticket_list', 89 related_view_kwargs={'order_identifier': '<identifier>'}, 90 schema='TicketSchemaPublic', 91 many=True, 92 type_="ticket") 93 94 user = Relationship(attribute='user', 95 self_view='v1.order_user', 96 self_view_kwargs={'order_identifier': '<identifier>'}, 97 related_view='v1.user_detail', 98 related_view_kwargs={'id': '<user_id>'}, 99 schema='UserSchemaPublic', 100 type_="user") 101 102 event = Relationship(attribute='event', 103 self_view='v1.order_event', 104 self_view_kwargs={'order_identifier': '<identifier>'}, 105 related_view='v1.event_detail', 106 related_view_kwargs={'id': '<event_id>'}, 107 schema='EventSchemaPublic', 108 type_="event") 109 110 marketer = Relationship(attribute='marketer', 111 self_view='v1.order_marketer', 112 self_view_kwargs={'order_identifier': '<identifier>'}, 113 related_view='v1.user_detail', 114 related_view_kwargs={'id': '<marketer_id>'}, 115 schema='UserSchemaPublic', 116 type_="user") 117 118 discount_code = Relationship(attribute='discount_code', 119 self_view='v1.order_discount', 120 self_view_kwargs={'order_identifier': '<identifier>'}, 121 related_view='v1.discount_code_detail', 122 related_view_kwargs={'id': '<discount_code_id>'}, 123 schema='DiscountCodeSchemaPublic', 124 type_="discount-code") 125 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/app/api/schema/orders.py b/app/api/schema/orders.py --- a/app/api/schema/orders.py +++ b/app/api/schema/orders.py @@ -56,8 +56,10 @@ completed_at = fields.DateTime(dump_only=True) created_at = fields.DateTime(dump_only=True) transaction_id = fields.Str(dump_only=True) - payment_mode = fields.Str(default="free", - validate=validate.OneOf(choices=["free", "stripe", "paypal"]), allow_none=True) + payment_mode = fields.Str( + default="free", + validate=validate.OneOf(choices=["free", "stripe", "paypal", "bank", "cheque", "onsite"]), + allow_none=True) paid_via = fields.Str(dump_only=True) brand = fields.Str(dump_only=True) exp_month = fields.Str(dump_only=True)
{"golden_diff": "diff --git a/app/api/schema/orders.py b/app/api/schema/orders.py\n--- a/app/api/schema/orders.py\n+++ b/app/api/schema/orders.py\n@@ -56,8 +56,10 @@\n completed_at = fields.DateTime(dump_only=True)\n created_at = fields.DateTime(dump_only=True)\n transaction_id = fields.Str(dump_only=True)\n- payment_mode = fields.Str(default=\"free\",\n- validate=validate.OneOf(choices=[\"free\", \"stripe\", \"paypal\"]), allow_none=True)\n+ payment_mode = fields.Str(\n+ default=\"free\",\n+ validate=validate.OneOf(choices=[\"free\", \"stripe\", \"paypal\", \"bank\", \"cheque\", \"onsite\"]),\n+ allow_none=True)\n paid_via = fields.Str(dump_only=True)\n brand = fields.Str(dump_only=True)\n exp_month = fields.Str(dump_only=True)\n", "issue": "Allow bank, cheque and onsite for payment_mode in orders schema\n**Describe the bug**\r\nCurrently setting bank, cheque and onsite for payment_mode returns 422 error.\r\n\r\n**Expected behavior**\r\nPayment Mode should allow bank, cheque and onsite as options.\n", "before_files": [{"content": "from flask import request\nfrom marshmallow import post_dump, validates_schema, validate\nfrom marshmallow_jsonapi import fields\nfrom marshmallow_jsonapi.flask import Relationship\n\nfrom app import db\nfrom app.api.helpers.utilities import dasherize\nfrom app.api.schema.base import SoftDeletionSchema\nfrom utils.common import use_defaults\n\n\nclass OnSiteTicketSchema(SoftDeletionSchema):\n class Meta:\n type_ = 'on-site-ticket'\n inflect = dasherize\n\n id = fields.Str(load_only=True, required=True)\n quantity = fields.Str(load_only=True, required=True)\n\n\n@use_defaults()\nclass OrderSchema(SoftDeletionSchema):\n class Meta:\n type_ = 'order'\n self_view = 'v1.order_detail'\n self_view_kwargs = {'order_identifier': '<identifier>'}\n inflect = dasherize\n\n @post_dump\n def generate_payment_url(self, data):\n \"\"\"\n generate payment url for an order\n :param data:\n :return:\n \"\"\"\n if 'POST' in request.method or ('GET' in request.method and 'regenerate' in request.args) and 'completed' != \\\n data[\"status\"]:\n if data['payment_mode'] == 'stripe':\n data['payment_url'] = 'stripe://payment'\n return data\n\n @validates_schema\n def initial_values(self, data):\n if data.get('payment_mode') is None and 'POST' in request.method:\n data['payment_mode'] = 'free'\n return data\n\n id = fields.Str(dump_only=True)\n identifier = fields.Str(dump_only=True)\n amount = fields.Float(validate=lambda n: n > 0, allow_none=True)\n address = fields.Str(allow_none=True)\n city = fields.Str(allow_none=True)\n state = fields.Str(db.String, allow_none=True)\n country = fields.Str(allow_none=True)\n zipcode = fields.Str(allow_none=True)\n completed_at = fields.DateTime(dump_only=True)\n created_at = fields.DateTime(dump_only=True)\n transaction_id = fields.Str(dump_only=True)\n payment_mode = fields.Str(default=\"free\",\n validate=validate.OneOf(choices=[\"free\", \"stripe\", \"paypal\"]), allow_none=True)\n paid_via = fields.Str(dump_only=True)\n brand = fields.Str(dump_only=True)\n exp_month = fields.Str(dump_only=True)\n exp_year = fields.Str(dump_only=True)\n last4 = fields.Str(dump_only=True)\n status = fields.Str(validate=validate.OneOf(choices=[\"pending\", \"cancelled\", \"completed\", \"placed\", \"expired\"]))\n discount_code_id = fields.Str(allow_none=True)\n payment_url = fields.Str(dump_only=True)\n cancel_note = fields.Str(allow_none=True)\n order_notes = fields.Str(allow_none=True)\n tickets_pdf_url = fields.Url(dump_only=True)\n\n # only used in the case of an on site attendee.\n on_site_tickets = fields.List(cls_or_instance=fields.Nested(OnSiteTicketSchema), load_only=True, allow_none=True)\n\n attendees = Relationship(attribute='ticket_holders',\n self_view='v1.order_attendee',\n self_view_kwargs={'order_identifier': '<identifier>'},\n related_view='v1.attendee_list',\n related_view_kwargs={'order_identifier': '<identifier>'},\n schema='AttendeeSchemaPublic',\n many=True,\n type_='attendee')\n\n tickets = Relationship(attribute='tickets',\n self_view='v1.order_ticket',\n self_view_kwargs={'order_identifier': '<identifier>'},\n related_view='v1.ticket_list',\n related_view_kwargs={'order_identifier': '<identifier>'},\n schema='TicketSchemaPublic',\n many=True,\n type_=\"ticket\")\n\n user = Relationship(attribute='user',\n self_view='v1.order_user',\n self_view_kwargs={'order_identifier': '<identifier>'},\n related_view='v1.user_detail',\n related_view_kwargs={'id': '<user_id>'},\n schema='UserSchemaPublic',\n type_=\"user\")\n\n event = Relationship(attribute='event',\n self_view='v1.order_event',\n self_view_kwargs={'order_identifier': '<identifier>'},\n related_view='v1.event_detail',\n related_view_kwargs={'id': '<event_id>'},\n schema='EventSchemaPublic',\n type_=\"event\")\n\n marketer = Relationship(attribute='marketer',\n self_view='v1.order_marketer',\n self_view_kwargs={'order_identifier': '<identifier>'},\n related_view='v1.user_detail',\n related_view_kwargs={'id': '<marketer_id>'},\n schema='UserSchemaPublic',\n type_=\"user\")\n\n discount_code = Relationship(attribute='discount_code',\n self_view='v1.order_discount',\n self_view_kwargs={'order_identifier': '<identifier>'},\n related_view='v1.discount_code_detail',\n related_view_kwargs={'id': '<discount_code_id>'},\n schema='DiscountCodeSchemaPublic',\n type_=\"discount-code\")\n", "path": "app/api/schema/orders.py"}], "after_files": [{"content": "from flask import request\nfrom marshmallow import post_dump, validates_schema, validate\nfrom marshmallow_jsonapi import fields\nfrom marshmallow_jsonapi.flask import Relationship\n\nfrom app import db\nfrom app.api.helpers.utilities import dasherize\nfrom app.api.schema.base import SoftDeletionSchema\nfrom utils.common import use_defaults\n\n\nclass OnSiteTicketSchema(SoftDeletionSchema):\n class Meta:\n type_ = 'on-site-ticket'\n inflect = dasherize\n\n id = fields.Str(load_only=True, required=True)\n quantity = fields.Str(load_only=True, required=True)\n\n\n@use_defaults()\nclass OrderSchema(SoftDeletionSchema):\n class Meta:\n type_ = 'order'\n self_view = 'v1.order_detail'\n self_view_kwargs = {'order_identifier': '<identifier>'}\n inflect = dasherize\n\n @post_dump\n def generate_payment_url(self, data):\n \"\"\"\n generate payment url for an order\n :param data:\n :return:\n \"\"\"\n if 'POST' in request.method or ('GET' in request.method and 'regenerate' in request.args) and 'completed' != \\\n data[\"status\"]:\n if data['payment_mode'] == 'stripe':\n data['payment_url'] = 'stripe://payment'\n return data\n\n @validates_schema\n def initial_values(self, data):\n if data.get('payment_mode') is None and 'POST' in request.method:\n data['payment_mode'] = 'free'\n return data\n\n id = fields.Str(dump_only=True)\n identifier = fields.Str(dump_only=True)\n amount = fields.Float(validate=lambda n: n > 0, allow_none=True)\n address = fields.Str(allow_none=True)\n city = fields.Str(allow_none=True)\n state = fields.Str(db.String, allow_none=True)\n country = fields.Str(allow_none=True)\n zipcode = fields.Str(allow_none=True)\n completed_at = fields.DateTime(dump_only=True)\n created_at = fields.DateTime(dump_only=True)\n transaction_id = fields.Str(dump_only=True)\n payment_mode = fields.Str(\n default=\"free\",\n validate=validate.OneOf(choices=[\"free\", \"stripe\", \"paypal\", \"bank\", \"cheque\", \"onsite\"]),\n allow_none=True)\n paid_via = fields.Str(dump_only=True)\n brand = fields.Str(dump_only=True)\n exp_month = fields.Str(dump_only=True)\n exp_year = fields.Str(dump_only=True)\n last4 = fields.Str(dump_only=True)\n status = fields.Str(validate=validate.OneOf(choices=[\"pending\", \"cancelled\", \"completed\", \"placed\", \"expired\"]))\n discount_code_id = fields.Str(allow_none=True)\n payment_url = fields.Str(dump_only=True)\n cancel_note = fields.Str(allow_none=True)\n order_notes = fields.Str(allow_none=True)\n tickets_pdf_url = fields.Url(dump_only=True)\n\n # only used in the case of an on site attendee.\n on_site_tickets = fields.List(cls_or_instance=fields.Nested(OnSiteTicketSchema), load_only=True, allow_none=True)\n\n attendees = Relationship(attribute='ticket_holders',\n self_view='v1.order_attendee',\n self_view_kwargs={'order_identifier': '<identifier>'},\n related_view='v1.attendee_list',\n related_view_kwargs={'order_identifier': '<identifier>'},\n schema='AttendeeSchemaPublic',\n many=True,\n type_='attendee')\n\n tickets = Relationship(attribute='tickets',\n self_view='v1.order_ticket',\n self_view_kwargs={'order_identifier': '<identifier>'},\n related_view='v1.ticket_list',\n related_view_kwargs={'order_identifier': '<identifier>'},\n schema='TicketSchemaPublic',\n many=True,\n type_=\"ticket\")\n\n user = Relationship(attribute='user',\n self_view='v1.order_user',\n self_view_kwargs={'order_identifier': '<identifier>'},\n related_view='v1.user_detail',\n related_view_kwargs={'id': '<user_id>'},\n schema='UserSchemaPublic',\n type_=\"user\")\n\n event = Relationship(attribute='event',\n self_view='v1.order_event',\n self_view_kwargs={'order_identifier': '<identifier>'},\n related_view='v1.event_detail',\n related_view_kwargs={'id': '<event_id>'},\n schema='EventSchemaPublic',\n type_=\"event\")\n\n marketer = Relationship(attribute='marketer',\n self_view='v1.order_marketer',\n self_view_kwargs={'order_identifier': '<identifier>'},\n related_view='v1.user_detail',\n related_view_kwargs={'id': '<marketer_id>'},\n schema='UserSchemaPublic',\n type_=\"user\")\n\n discount_code = Relationship(attribute='discount_code',\n self_view='v1.order_discount',\n self_view_kwargs={'order_identifier': '<identifier>'},\n related_view='v1.discount_code_detail',\n related_view_kwargs={'id': '<discount_code_id>'},\n schema='DiscountCodeSchemaPublic',\n type_=\"discount-code\")\n", "path": "app/api/schema/orders.py"}]}
1,648
190
gh_patches_debug_39642
rasdani/github-patches
git_diff
gammapy__gammapy-3504
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- LightCurveEstimator fails if no dataset contribute in a time bin **Gammapy version** dev **Bug description** If you run the `LightCurveEstimator` and no dataset are found to contribute in a time interval, it will fail because the `FluxPoints` produced has no `npred` quantity attached. This is because the `ParameterEstimator.estimate_ts` only return `ts` as `nan` before estimating the `npred`. https://github.com/gammapy/gammapy/blob/d1ede2663429253b72011b9eda256ed2b48f8e9c/gammapy/estimators/parameter.py#L110 **Expected behavior** The estimator should run and return `nan` for `npred` as well. **To Reproduce** If you want to test, use the light curve validation, script and change the energy range to e.g. 0.5-0.7 TeV in line https://github.com/gammapy/gammapy-benchmarks/blob/8d02a809583a8ddf3d222c5b7db2b542c7f2addf/validation/lightcurve/make.py#L108 it will yield: ``` Traceback (most recent call last): File "make.py", line 278, in <module> cli() File "/Users/terrier/Code/anaconda3/envs/gammapy-dev/lib/python3.7/site-packages/click/core.py", line 1137, in __call__ return self.main(*args, **kwargs) File "/Users/terrier/Code/anaconda3/envs/gammapy-dev/lib/python3.7/site-packages/click/core.py", line 1062, in main rv = self.invoke(ctx) File "/Users/terrier/Code/anaconda3/envs/gammapy-dev/lib/python3.7/site-packages/click/core.py", line 1668, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/Users/terrier/Code/anaconda3/envs/gammapy-dev/lib/python3.7/site-packages/click/core.py", line 1404, in invoke return ctx.invoke(self.callback, **ctx.params) File "/Users/terrier/Code/anaconda3/envs/gammapy-dev/lib/python3.7/site-packages/click/core.py", line 763, in invoke return __callback(*args, **kwargs) File "make.py", line 69, in run_analyses perform_analysis(analysis_type, short_observations, target_position, time_intervals) File "make.py", line 114, in perform_analysis lc = lc_maker.run(datasets) File "/Users/terrier/Code/gammapy-dev/gammapy/gammapy/estimators/lightcurve.py", line 126, in run fp._data[name], dataset_names=datasets.names File "/Users/terrier/Code/gammapy-dev/gammapy/gammapy/maps/maps.py", line 46, in __getitem__ return self._data[key] KeyError: 'npred' ``` **Other information** Any other information you think will be useful for us to fix the issue can go here. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `gammapy/estimators/parameter.py` Content: ``` 1 # Licensed under a 3-clause BSD style license - see LICENSE.rst 2 import logging 3 import numpy as np 4 from gammapy.datasets import Datasets 5 from gammapy.modeling import Fit 6 from .core import Estimator 7 8 log = logging.getLogger(__name__) 9 10 11 class ParameterEstimator(Estimator): 12 """Model parameter estimator. 13 14 Estimates a model parameter for a group of datasets. Compute best fit value, 15 symmetric and delta TS for a given null value. Additionally asymmetric errors 16 as well as parameter upper limit and fit statistic profile can be estimated. 17 18 Parameters 19 ---------- 20 n_sigma : int 21 Sigma to use for asymmetric error computation. Default is 1. 22 n_sigma_ul : int 23 Sigma to use for upper limit computation. Default is 2. 24 null_value : float 25 Which null value to use for the parameter 26 selection_optional : list of str 27 Which additional quantities to estimate. Available options are: 28 29 * "all": all the optional steps are executed 30 * "errn-errp": estimate asymmetric errors on parameter best fit value. 31 * "ul": estimate upper limits. 32 * "scan": estimate fit statistic profiles. 33 34 Default is None so the optionnal steps are not executed. 35 fit : `Fit` 36 Fit instance specifying the backend and fit options. 37 reoptimize : bool 38 Re-optimize other free model parameters. Default is True. 39 """ 40 41 tag = "ParameterEstimator" 42 _available_selection_optional = ["errn-errp", "ul", "scan"] 43 44 def __init__( 45 self, 46 n_sigma=1, 47 n_sigma_ul=2, 48 null_value=1e-150, 49 selection_optional=None, 50 fit=None, 51 reoptimize=True 52 ): 53 self.n_sigma = n_sigma 54 self.n_sigma_ul = n_sigma_ul 55 self.null_value = null_value 56 self.selection_optional = selection_optional 57 58 if fit is None: 59 fit = Fit() 60 61 self.fit = fit 62 self.reoptimize = reoptimize 63 64 def estimate_best_fit(self, datasets, parameter): 65 """Estimate parameter assymetric errors 66 67 Parameters 68 ---------- 69 datasets : `~gammapy.datasets.Datasets` 70 Datasets 71 parameter : `Parameter` 72 For which parameter to get the value 73 74 Returns 75 ------- 76 result : dict 77 Dict with the various parameter estimation values. 78 """ 79 value, total_stat, success, error = np.nan, 0, False, np.nan 80 81 if np.any(datasets.contributes_to_stat): 82 result = self.fit.run(datasets=datasets) 83 value, error = parameter.value, parameter.error 84 total_stat = result["optimize_result"].total_stat 85 success = result["optimize_result"].success 86 87 return { 88 f"{parameter.name}": value, 89 "stat": total_stat, 90 "success": success, 91 f"{parameter.name}_err": error * self.n_sigma, 92 } 93 94 def estimate_ts(self, datasets, parameter): 95 """Estimate parameter ts 96 97 Parameters 98 ---------- 99 datasets : `~gammapy.datasets.Datasets` 100 Datasets 101 parameter : `Parameter` 102 For which parameter to get the value 103 104 Returns 105 ------- 106 result : dict 107 Dict with the various parameter estimation values. 108 """ 109 if not np.any(datasets.contributes_to_stat): 110 return {"ts": np.nan} 111 112 stat = datasets.stat_sum() 113 npred = self.estimate_npred(datasets=datasets) 114 115 with datasets.parameters.restore_status(): 116 # compute ts value 117 parameter.value = self.null_value 118 119 if self.reoptimize: 120 parameter.frozen = True 121 _ = self.fit.optimize(datasets=datasets) 122 123 ts = datasets.stat_sum() - stat 124 npred_null = self.estimate_npred(datasets=datasets) 125 126 return { 127 "ts": ts, 128 "npred": npred["npred"], 129 "npred_null": npred_null["npred"] 130 } 131 132 def estimate_errn_errp(self, datasets, parameter): 133 """Estimate parameter assymetric errors 134 135 Parameters 136 ---------- 137 datasets : `~gammapy.datasets.Datasets` 138 Datasets 139 parameter : `Parameter` 140 For which parameter to get the value 141 142 Returns 143 ------- 144 result : dict 145 Dict with the various parameter estimation values. 146 """ 147 if not np.any(datasets.contributes_to_stat): 148 return { 149 f"{parameter.name}_errp": np.nan, 150 f"{parameter.name}_errn": np.nan, 151 } 152 153 self.fit.optimize(datasets=datasets) 154 155 res = self.fit.confidence( 156 datasets=datasets, 157 parameter=parameter, 158 sigma=self.n_sigma, 159 reoptimize=self.reoptimize 160 ) 161 162 return { 163 f"{parameter.name}_errp": res["errp"], 164 f"{parameter.name}_errn": res["errn"], 165 } 166 167 def estimate_scan(self, datasets, parameter): 168 """Estimate parameter stat scan. 169 170 Parameters 171 ---------- 172 datasets : `~gammapy.datasets.Datasets` 173 The datasets used to estimate the model parameter 174 parameter : `Parameter` 175 For which parameter to get the value 176 177 Returns 178 ------- 179 result : dict 180 Dict with the various parameter estimation values. 181 182 """ 183 scan_values = parameter.scan_values 184 185 if not np.any(datasets.contributes_to_stat): 186 return { 187 f"{parameter.name}_scan": scan_values, 188 "stat_scan": scan_values * np.nan 189 } 190 191 self.fit.optimize(datasets=datasets) 192 193 profile = self.fit.stat_profile( 194 datasets=datasets, 195 parameter=parameter, 196 reoptimize=self.reoptimize 197 ) 198 199 return { 200 f"{parameter.name}_scan": scan_values, 201 "stat_scan": profile["stat_scan"], 202 } 203 204 def estimate_ul(self, datasets, parameter): 205 """Estimate parameter ul. 206 207 Parameters 208 ---------- 209 datasets : `~gammapy.datasets.Datasets` 210 The datasets used to estimate the model parameter 211 parameter : `Parameter` 212 For which parameter to get the value 213 214 Returns 215 ------- 216 result : dict 217 Dict with the various parameter estimation values. 218 219 """ 220 if not np.any(datasets.contributes_to_stat): 221 return {f"{parameter.name}_ul": np.nan} 222 223 self.fit.optimize(datasets=datasets) 224 225 res = self.fit.confidence( 226 datasets=datasets, 227 parameter=parameter, 228 sigma=self.n_sigma_ul, 229 reoptimize=self.reoptimize 230 ) 231 return {f"{parameter.name}_ul": res["errp"] + parameter.value} 232 233 @staticmethod 234 def estimate_counts(datasets): 235 """Estimate counts for the flux point. 236 237 Parameters 238 ---------- 239 datasets : Datasets 240 Datasets 241 242 Returns 243 ------- 244 result : dict 245 Dict with an array with one entry per dataset with the sum of the 246 masked counts. 247 """ 248 counts = [] 249 250 for dataset in datasets: 251 mask = dataset.mask 252 counts.append(dataset.counts.data[mask].sum()) 253 254 return {"counts": np.array(counts, dtype=int), "datasets": datasets.names} 255 256 @staticmethod 257 def estimate_npred(datasets): 258 """Estimate npred for the flux point. 259 260 Parameters 261 ---------- 262 datasets : Datasets 263 Datasets 264 265 Returns 266 ------- 267 result : dict 268 Dict with an array with one entry per dataset with the sum of the 269 masked npred. 270 """ 271 npred = [] 272 273 for dataset in datasets: 274 mask = dataset.mask 275 npred.append(dataset.npred().data[mask].sum()) 276 277 return {"npred": np.array(npred), "datasets": datasets.names} 278 279 def run(self, datasets, parameter): 280 """Run the parameter estimator. 281 282 Parameters 283 ---------- 284 datasets : `~gammapy.datasets.Datasets` 285 The datasets used to estimate the model parameter 286 parameter : `str` or `Parameter` 287 For which parameter to run the estimator 288 289 Returns 290 ------- 291 result : dict 292 Dict with the various parameter estimation values. 293 """ 294 datasets = Datasets(datasets) 295 parameter = datasets.parameters[parameter] 296 297 with datasets.parameters.restore_status(): 298 299 if not self.reoptimize: 300 datasets.parameters.freeze_all() 301 parameter.frozen = False 302 303 result = self.estimate_best_fit(datasets, parameter) 304 result.update(self.estimate_ts(datasets, parameter)) 305 306 if "errn-errp" in self.selection_optional: 307 result.update(self.estimate_errn_errp(datasets, parameter)) 308 309 if "ul" in self.selection_optional: 310 result.update(self.estimate_ul(datasets, parameter)) 311 312 if "scan" in self.selection_optional: 313 result.update(self.estimate_scan(datasets, parameter)) 314 315 result.update(self.estimate_counts(datasets)) 316 return result 317 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/gammapy/estimators/parameter.py b/gammapy/estimators/parameter.py --- a/gammapy/estimators/parameter.py +++ b/gammapy/estimators/parameter.py @@ -74,7 +74,12 @@ Returns ------- result : dict - Dict with the various parameter estimation values. + Dict with the various parameter estimation values. Entries are: + + * parameter.name: best fit parameter value + * "stat": best fit total stat. + * "success": boolean flag for fit success + * parameter.name_err: covariance-based error estimate on parameter value """ value, total_stat, success, error = np.nan, 0, False, np.nan @@ -104,14 +109,20 @@ Returns ------- result : dict - Dict with the various parameter estimation values. - """ - if not np.any(datasets.contributes_to_stat): - return {"ts": np.nan} + Dict with the TS of the best fit value compared to the null hypothesis. Entries are: - stat = datasets.stat_sum() + * TS : fit statistic difference with null hypothesis + * "npred" : predicted number of counts per dataset + * "npred_null" : predicted number of counts per dataset in the null hypothesis + """ npred = self.estimate_npred(datasets=datasets) + if not np.any(datasets.contributes_to_stat): + stat = np.nan + npred["npred"][...] = np.nan + else: + stat = datasets.stat_sum() + with datasets.parameters.restore_status(): # compute ts value parameter.value = self.null_value @@ -142,7 +153,10 @@ Returns ------- result : dict - Dict with the various parameter estimation values. + Dict with the parameter asymmetric errors. Entries are: + + * parameter.name_errp : positive error on parameter value + * parameter.name_errn : negative error on parameter value """ if not np.any(datasets.contributes_to_stat): return { @@ -177,8 +191,10 @@ Returns ------- result : dict - Dict with the various parameter estimation values. + Dict with the parameter fit scan values. Entries are: + * parameter.name_scan : parameter values scan + * "stat_scan" : fit statistic values scan """ scan_values = parameter.scan_values @@ -214,8 +230,9 @@ Returns ------- result : dict - Dict with the various parameter estimation values. - + Dict with the parameter ULs. Entries are: + + * parameter.name_ul : upper limit on parameter value """ if not np.any(datasets.contributes_to_stat): return {f"{parameter.name}_ul": np.nan}
{"golden_diff": "diff --git a/gammapy/estimators/parameter.py b/gammapy/estimators/parameter.py\n--- a/gammapy/estimators/parameter.py\n+++ b/gammapy/estimators/parameter.py\n@@ -74,7 +74,12 @@\n Returns\n -------\n result : dict\n- Dict with the various parameter estimation values.\n+ Dict with the various parameter estimation values. Entries are:\n+\n+ * parameter.name: best fit parameter value\n+ * \"stat\": best fit total stat.\n+ * \"success\": boolean flag for fit success\n+ * parameter.name_err: covariance-based error estimate on parameter value\n \"\"\"\n value, total_stat, success, error = np.nan, 0, False, np.nan\n \n@@ -104,14 +109,20 @@\n Returns\n -------\n result : dict\n- Dict with the various parameter estimation values.\n- \"\"\"\n- if not np.any(datasets.contributes_to_stat):\n- return {\"ts\": np.nan}\n+ Dict with the TS of the best fit value compared to the null hypothesis. Entries are:\n \n- stat = datasets.stat_sum()\n+ * TS : fit statistic difference with null hypothesis\n+ * \"npred\" : predicted number of counts per dataset\n+ * \"npred_null\" : predicted number of counts per dataset in the null hypothesis\n+ \"\"\"\n npred = self.estimate_npred(datasets=datasets)\n \n+ if not np.any(datasets.contributes_to_stat):\n+ stat = np.nan\n+ npred[\"npred\"][...] = np.nan\n+ else:\n+ stat = datasets.stat_sum()\n+\n with datasets.parameters.restore_status():\n # compute ts value\n parameter.value = self.null_value\n@@ -142,7 +153,10 @@\n Returns\n -------\n result : dict\n- Dict with the various parameter estimation values.\n+ Dict with the parameter asymmetric errors. Entries are:\n+\n+ * parameter.name_errp : positive error on parameter value\n+ * parameter.name_errn : negative error on parameter value\n \"\"\"\n if not np.any(datasets.contributes_to_stat):\n return {\n@@ -177,8 +191,10 @@\n Returns\n -------\n result : dict\n- Dict with the various parameter estimation values.\n+ Dict with the parameter fit scan values. Entries are:\n \n+ * parameter.name_scan : parameter values scan\n+ * \"stat_scan\" : fit statistic values scan\n \"\"\"\n scan_values = parameter.scan_values\n \n@@ -214,8 +230,9 @@\n Returns\n -------\n result : dict\n- Dict with the various parameter estimation values.\n-\n+ Dict with the parameter ULs. Entries are:\n+ \n+ * parameter.name_ul : upper limit on parameter value\n \"\"\"\n if not np.any(datasets.contributes_to_stat):\n return {f\"{parameter.name}_ul\": np.nan}\n", "issue": "LightCurveEstimator fails if no dataset contribute in a time bin\n**Gammapy version**\r\ndev\r\n\r\n**Bug description**\r\nIf you run the `LightCurveEstimator` and no dataset are found to contribute in a time interval, it will fail because the `FluxPoints` produced has no `npred` quantity attached. \r\nThis is because the `ParameterEstimator.estimate_ts` only return `ts` as `nan` before estimating the `npred`. \r\nhttps://github.com/gammapy/gammapy/blob/d1ede2663429253b72011b9eda256ed2b48f8e9c/gammapy/estimators/parameter.py#L110\r\n\r\n\r\n**Expected behavior**\r\nThe estimator should run and return `nan` for `npred` as well.\r\n\r\n**To Reproduce**\r\nIf you want to test, use the light curve validation, script and change the energy range to e.g. 0.5-0.7 TeV in line https://github.com/gammapy/gammapy-benchmarks/blob/8d02a809583a8ddf3d222c5b7db2b542c7f2addf/validation/lightcurve/make.py#L108\r\n\r\nit will yield:\r\n```\r\nTraceback (most recent call last):\r\n File \"make.py\", line 278, in <module>\r\n cli()\r\n File \"/Users/terrier/Code/anaconda3/envs/gammapy-dev/lib/python3.7/site-packages/click/core.py\", line 1137, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/Users/terrier/Code/anaconda3/envs/gammapy-dev/lib/python3.7/site-packages/click/core.py\", line 1062, in main\r\n rv = self.invoke(ctx)\r\n File \"/Users/terrier/Code/anaconda3/envs/gammapy-dev/lib/python3.7/site-packages/click/core.py\", line 1668, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/Users/terrier/Code/anaconda3/envs/gammapy-dev/lib/python3.7/site-packages/click/core.py\", line 1404, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/Users/terrier/Code/anaconda3/envs/gammapy-dev/lib/python3.7/site-packages/click/core.py\", line 763, in invoke\r\n return __callback(*args, **kwargs)\r\n File \"make.py\", line 69, in run_analyses\r\n perform_analysis(analysis_type, short_observations, target_position, time_intervals)\r\n File \"make.py\", line 114, in perform_analysis\r\n lc = lc_maker.run(datasets)\r\n File \"/Users/terrier/Code/gammapy-dev/gammapy/gammapy/estimators/lightcurve.py\", line 126, in run\r\n fp._data[name], dataset_names=datasets.names\r\n File \"/Users/terrier/Code/gammapy-dev/gammapy/gammapy/maps/maps.py\", line 46, in __getitem__\r\n return self._data[key]\r\nKeyError: 'npred'\r\n```\r\n\r\n**Other information**\r\nAny other information you think will be useful for us to fix the issue can go here.\r\n\n", "before_files": [{"content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport logging\nimport numpy as np\nfrom gammapy.datasets import Datasets\nfrom gammapy.modeling import Fit\nfrom .core import Estimator\n\nlog = logging.getLogger(__name__)\n\n\nclass ParameterEstimator(Estimator):\n \"\"\"Model parameter estimator.\n\n Estimates a model parameter for a group of datasets. Compute best fit value,\n symmetric and delta TS for a given null value. Additionally asymmetric errors\n as well as parameter upper limit and fit statistic profile can be estimated.\n\n Parameters\n ----------\n n_sigma : int\n Sigma to use for asymmetric error computation. Default is 1.\n n_sigma_ul : int\n Sigma to use for upper limit computation. Default is 2.\n null_value : float\n Which null value to use for the parameter\n selection_optional : list of str\n Which additional quantities to estimate. Available options are:\n\n * \"all\": all the optional steps are executed\n * \"errn-errp\": estimate asymmetric errors on parameter best fit value.\n * \"ul\": estimate upper limits.\n * \"scan\": estimate fit statistic profiles.\n\n Default is None so the optionnal steps are not executed.\n fit : `Fit`\n Fit instance specifying the backend and fit options.\n reoptimize : bool\n Re-optimize other free model parameters. Default is True.\n \"\"\"\n\n tag = \"ParameterEstimator\"\n _available_selection_optional = [\"errn-errp\", \"ul\", \"scan\"]\n\n def __init__(\n self,\n n_sigma=1,\n n_sigma_ul=2,\n null_value=1e-150,\n selection_optional=None,\n fit=None,\n reoptimize=True\n ):\n self.n_sigma = n_sigma\n self.n_sigma_ul = n_sigma_ul\n self.null_value = null_value\n self.selection_optional = selection_optional\n\n if fit is None:\n fit = Fit()\n\n self.fit = fit\n self.reoptimize = reoptimize\n\n def estimate_best_fit(self, datasets, parameter):\n \"\"\"Estimate parameter assymetric errors\n\n Parameters\n ----------\n datasets : `~gammapy.datasets.Datasets`\n Datasets\n parameter : `Parameter`\n For which parameter to get the value\n\n Returns\n -------\n result : dict\n Dict with the various parameter estimation values.\n \"\"\"\n value, total_stat, success, error = np.nan, 0, False, np.nan\n\n if np.any(datasets.contributes_to_stat):\n result = self.fit.run(datasets=datasets)\n value, error = parameter.value, parameter.error\n total_stat = result[\"optimize_result\"].total_stat\n success = result[\"optimize_result\"].success\n\n return {\n f\"{parameter.name}\": value,\n \"stat\": total_stat,\n \"success\": success,\n f\"{parameter.name}_err\": error * self.n_sigma,\n }\n\n def estimate_ts(self, datasets, parameter):\n \"\"\"Estimate parameter ts\n\n Parameters\n ----------\n datasets : `~gammapy.datasets.Datasets`\n Datasets\n parameter : `Parameter`\n For which parameter to get the value\n\n Returns\n -------\n result : dict\n Dict with the various parameter estimation values.\n \"\"\"\n if not np.any(datasets.contributes_to_stat):\n return {\"ts\": np.nan}\n\n stat = datasets.stat_sum()\n npred = self.estimate_npred(datasets=datasets)\n\n with datasets.parameters.restore_status():\n # compute ts value\n parameter.value = self.null_value\n\n if self.reoptimize:\n parameter.frozen = True\n _ = self.fit.optimize(datasets=datasets)\n\n ts = datasets.stat_sum() - stat\n npred_null = self.estimate_npred(datasets=datasets)\n\n return {\n \"ts\": ts,\n \"npred\": npred[\"npred\"],\n \"npred_null\": npred_null[\"npred\"]\n }\n\n def estimate_errn_errp(self, datasets, parameter):\n \"\"\"Estimate parameter assymetric errors\n\n Parameters\n ----------\n datasets : `~gammapy.datasets.Datasets`\n Datasets\n parameter : `Parameter`\n For which parameter to get the value\n\n Returns\n -------\n result : dict\n Dict with the various parameter estimation values.\n \"\"\"\n if not np.any(datasets.contributes_to_stat):\n return {\n f\"{parameter.name}_errp\": np.nan,\n f\"{parameter.name}_errn\": np.nan,\n }\n\n self.fit.optimize(datasets=datasets)\n\n res = self.fit.confidence(\n datasets=datasets,\n parameter=parameter,\n sigma=self.n_sigma,\n reoptimize=self.reoptimize\n )\n\n return {\n f\"{parameter.name}_errp\": res[\"errp\"],\n f\"{parameter.name}_errn\": res[\"errn\"],\n }\n\n def estimate_scan(self, datasets, parameter):\n \"\"\"Estimate parameter stat scan.\n\n Parameters\n ----------\n datasets : `~gammapy.datasets.Datasets`\n The datasets used to estimate the model parameter\n parameter : `Parameter`\n For which parameter to get the value\n\n Returns\n -------\n result : dict\n Dict with the various parameter estimation values.\n\n \"\"\"\n scan_values = parameter.scan_values\n\n if not np.any(datasets.contributes_to_stat):\n return {\n f\"{parameter.name}_scan\": scan_values,\n \"stat_scan\": scan_values * np.nan\n }\n\n self.fit.optimize(datasets=datasets)\n\n profile = self.fit.stat_profile(\n datasets=datasets,\n parameter=parameter,\n reoptimize=self.reoptimize\n )\n\n return {\n f\"{parameter.name}_scan\": scan_values,\n \"stat_scan\": profile[\"stat_scan\"],\n }\n\n def estimate_ul(self, datasets, parameter):\n \"\"\"Estimate parameter ul.\n\n Parameters\n ----------\n datasets : `~gammapy.datasets.Datasets`\n The datasets used to estimate the model parameter\n parameter : `Parameter`\n For which parameter to get the value\n\n Returns\n -------\n result : dict\n Dict with the various parameter estimation values.\n\n \"\"\"\n if not np.any(datasets.contributes_to_stat):\n return {f\"{parameter.name}_ul\": np.nan}\n\n self.fit.optimize(datasets=datasets)\n\n res = self.fit.confidence(\n datasets=datasets,\n parameter=parameter,\n sigma=self.n_sigma_ul,\n reoptimize=self.reoptimize\n )\n return {f\"{parameter.name}_ul\": res[\"errp\"] + parameter.value}\n\n @staticmethod\n def estimate_counts(datasets):\n \"\"\"Estimate counts for the flux point.\n\n Parameters\n ----------\n datasets : Datasets\n Datasets\n\n Returns\n -------\n result : dict\n Dict with an array with one entry per dataset with the sum of the\n masked counts.\n \"\"\"\n counts = []\n\n for dataset in datasets:\n mask = dataset.mask\n counts.append(dataset.counts.data[mask].sum())\n\n return {\"counts\": np.array(counts, dtype=int), \"datasets\": datasets.names}\n\n @staticmethod\n def estimate_npred(datasets):\n \"\"\"Estimate npred for the flux point.\n\n Parameters\n ----------\n datasets : Datasets\n Datasets\n\n Returns\n -------\n result : dict\n Dict with an array with one entry per dataset with the sum of the\n masked npred.\n \"\"\"\n npred = []\n\n for dataset in datasets:\n mask = dataset.mask\n npred.append(dataset.npred().data[mask].sum())\n\n return {\"npred\": np.array(npred), \"datasets\": datasets.names}\n\n def run(self, datasets, parameter):\n \"\"\"Run the parameter estimator.\n\n Parameters\n ----------\n datasets : `~gammapy.datasets.Datasets`\n The datasets used to estimate the model parameter\n parameter : `str` or `Parameter`\n For which parameter to run the estimator\n\n Returns\n -------\n result : dict\n Dict with the various parameter estimation values.\n \"\"\"\n datasets = Datasets(datasets)\n parameter = datasets.parameters[parameter]\n\n with datasets.parameters.restore_status():\n\n if not self.reoptimize:\n datasets.parameters.freeze_all()\n parameter.frozen = False\n\n result = self.estimate_best_fit(datasets, parameter)\n result.update(self.estimate_ts(datasets, parameter))\n\n if \"errn-errp\" in self.selection_optional:\n result.update(self.estimate_errn_errp(datasets, parameter))\n\n if \"ul\" in self.selection_optional:\n result.update(self.estimate_ul(datasets, parameter))\n\n if \"scan\" in self.selection_optional:\n result.update(self.estimate_scan(datasets, parameter))\n\n result.update(self.estimate_counts(datasets))\n return result\n", "path": "gammapy/estimators/parameter.py"}], "after_files": [{"content": "# Licensed under a 3-clause BSD style license - see LICENSE.rst\nimport logging\nimport numpy as np\nfrom gammapy.datasets import Datasets\nfrom gammapy.modeling import Fit\nfrom .core import Estimator\n\nlog = logging.getLogger(__name__)\n\n\nclass ParameterEstimator(Estimator):\n \"\"\"Model parameter estimator.\n\n Estimates a model parameter for a group of datasets. Compute best fit value,\n symmetric and delta TS for a given null value. Additionally asymmetric errors\n as well as parameter upper limit and fit statistic profile can be estimated.\n\n Parameters\n ----------\n n_sigma : int\n Sigma to use for asymmetric error computation. Default is 1.\n n_sigma_ul : int\n Sigma to use for upper limit computation. Default is 2.\n null_value : float\n Which null value to use for the parameter\n selection_optional : list of str\n Which additional quantities to estimate. Available options are:\n\n * \"all\": all the optional steps are executed\n * \"errn-errp\": estimate asymmetric errors on parameter best fit value.\n * \"ul\": estimate upper limits.\n * \"scan\": estimate fit statistic profiles.\n\n Default is None so the optionnal steps are not executed.\n fit : `Fit`\n Fit instance specifying the backend and fit options.\n reoptimize : bool\n Re-optimize other free model parameters. Default is True.\n \"\"\"\n\n tag = \"ParameterEstimator\"\n _available_selection_optional = [\"errn-errp\", \"ul\", \"scan\"]\n\n def __init__(\n self,\n n_sigma=1,\n n_sigma_ul=2,\n null_value=1e-150,\n selection_optional=None,\n fit=None,\n reoptimize=True\n ):\n self.n_sigma = n_sigma\n self.n_sigma_ul = n_sigma_ul\n self.null_value = null_value\n self.selection_optional = selection_optional\n\n if fit is None:\n fit = Fit()\n\n self.fit = fit\n self.reoptimize = reoptimize\n\n def estimate_best_fit(self, datasets, parameter):\n \"\"\"Estimate parameter assymetric errors\n\n Parameters\n ----------\n datasets : `~gammapy.datasets.Datasets`\n Datasets\n parameter : `Parameter`\n For which parameter to get the value\n\n Returns\n -------\n result : dict\n Dict with the various parameter estimation values. Entries are:\n\n * parameter.name: best fit parameter value\n * \"stat\": best fit total stat.\n * \"success\": boolean flag for fit success\n * parameter.name_err: covariance-based error estimate on parameter value\n \"\"\"\n value, total_stat, success, error = np.nan, 0, False, np.nan\n\n if np.any(datasets.contributes_to_stat):\n result = self.fit.run(datasets=datasets)\n value, error = parameter.value, parameter.error\n total_stat = result[\"optimize_result\"].total_stat\n success = result[\"optimize_result\"].success\n\n return {\n f\"{parameter.name}\": value,\n \"stat\": total_stat,\n \"success\": success,\n f\"{parameter.name}_err\": error * self.n_sigma,\n }\n\n def estimate_ts(self, datasets, parameter):\n \"\"\"Estimate parameter ts\n\n Parameters\n ----------\n datasets : `~gammapy.datasets.Datasets`\n Datasets\n parameter : `Parameter`\n For which parameter to get the value\n\n Returns\n -------\n result : dict\n Dict with the TS of the best fit value compared to the null hypothesis. Entries are:\n\n * TS : fit statistic difference with null hypothesis\n * \"npred\" : predicted number of counts per dataset\n * \"npred_null\" : predicted number of counts per dataset in the null hypothesis\n \"\"\"\n npred = self.estimate_npred(datasets=datasets)\n\n if not np.any(datasets.contributes_to_stat):\n stat = np.nan\n npred[\"npred\"][...] = np.nan\n else:\n stat = datasets.stat_sum()\n\n with datasets.parameters.restore_status():\n # compute ts value\n parameter.value = self.null_value\n\n if self.reoptimize:\n parameter.frozen = True\n _ = self.fit.optimize(datasets=datasets)\n\n ts = datasets.stat_sum() - stat\n npred_null = self.estimate_npred(datasets=datasets)\n\n return {\n \"ts\": ts,\n \"npred\": npred[\"npred\"],\n \"npred_null\": npred_null[\"npred\"]\n }\n\n def estimate_errn_errp(self, datasets, parameter):\n \"\"\"Estimate parameter assymetric errors\n\n Parameters\n ----------\n datasets : `~gammapy.datasets.Datasets`\n Datasets\n parameter : `Parameter`\n For which parameter to get the value\n\n Returns\n -------\n result : dict\n Dict with the parameter asymmetric errors. Entries are:\n\n * parameter.name_errp : positive error on parameter value\n * parameter.name_errn : negative error on parameter value\n \"\"\"\n if not np.any(datasets.contributes_to_stat):\n return {\n f\"{parameter.name}_errp\": np.nan,\n f\"{parameter.name}_errn\": np.nan,\n }\n\n self.fit.optimize(datasets=datasets)\n\n res = self.fit.confidence(\n datasets=datasets,\n parameter=parameter,\n sigma=self.n_sigma,\n reoptimize=self.reoptimize\n )\n\n return {\n f\"{parameter.name}_errp\": res[\"errp\"],\n f\"{parameter.name}_errn\": res[\"errn\"],\n }\n\n def estimate_scan(self, datasets, parameter):\n \"\"\"Estimate parameter stat scan.\n\n Parameters\n ----------\n datasets : `~gammapy.datasets.Datasets`\n The datasets used to estimate the model parameter\n parameter : `Parameter`\n For which parameter to get the value\n\n Returns\n -------\n result : dict\n Dict with the parameter fit scan values. Entries are:\n\n * parameter.name_scan : parameter values scan\n * \"stat_scan\" : fit statistic values scan\n \"\"\"\n scan_values = parameter.scan_values\n\n if not np.any(datasets.contributes_to_stat):\n return {\n f\"{parameter.name}_scan\": scan_values,\n \"stat_scan\": scan_values * np.nan\n }\n\n self.fit.optimize(datasets=datasets)\n\n profile = self.fit.stat_profile(\n datasets=datasets,\n parameter=parameter,\n reoptimize=self.reoptimize\n )\n\n return {\n f\"{parameter.name}_scan\": scan_values,\n \"stat_scan\": profile[\"stat_scan\"],\n }\n\n def estimate_ul(self, datasets, parameter):\n \"\"\"Estimate parameter ul.\n\n Parameters\n ----------\n datasets : `~gammapy.datasets.Datasets`\n The datasets used to estimate the model parameter\n parameter : `Parameter`\n For which parameter to get the value\n\n Returns\n -------\n result : dict\n Dict with the parameter ULs. Entries are:\n \n * parameter.name_ul : upper limit on parameter value\n \"\"\"\n if not np.any(datasets.contributes_to_stat):\n return {f\"{parameter.name}_ul\": np.nan}\n\n self.fit.optimize(datasets=datasets)\n\n res = self.fit.confidence(\n datasets=datasets,\n parameter=parameter,\n sigma=self.n_sigma_ul,\n reoptimize=self.reoptimize\n )\n return {f\"{parameter.name}_ul\": res[\"errp\"] + parameter.value}\n\n @staticmethod\n def estimate_counts(datasets):\n \"\"\"Estimate counts for the flux point.\n\n Parameters\n ----------\n datasets : Datasets\n Datasets\n\n Returns\n -------\n result : dict\n Dict with an array with one entry per dataset with the sum of the\n masked counts.\n \"\"\"\n counts = []\n\n for dataset in datasets:\n mask = dataset.mask\n counts.append(dataset.counts.data[mask].sum())\n\n return {\"counts\": np.array(counts, dtype=int), \"datasets\": datasets.names}\n\n @staticmethod\n def estimate_npred(datasets):\n \"\"\"Estimate npred for the flux point.\n\n Parameters\n ----------\n datasets : Datasets\n Datasets\n\n Returns\n -------\n result : dict\n Dict with an array with one entry per dataset with the sum of the\n masked npred.\n \"\"\"\n npred = []\n\n for dataset in datasets:\n mask = dataset.mask\n npred.append(dataset.npred().data[mask].sum())\n\n return {\"npred\": np.array(npred), \"datasets\": datasets.names}\n\n def run(self, datasets, parameter):\n \"\"\"Run the parameter estimator.\n\n Parameters\n ----------\n datasets : `~gammapy.datasets.Datasets`\n The datasets used to estimate the model parameter\n parameter : `str` or `Parameter`\n For which parameter to run the estimator\n\n Returns\n -------\n result : dict\n Dict with the various parameter estimation values.\n \"\"\"\n datasets = Datasets(datasets)\n parameter = datasets.parameters[parameter]\n\n with datasets.parameters.restore_status():\n\n if not self.reoptimize:\n datasets.parameters.freeze_all()\n parameter.frozen = False\n\n result = self.estimate_best_fit(datasets, parameter)\n result.update(self.estimate_ts(datasets, parameter))\n\n if \"errn-errp\" in self.selection_optional:\n result.update(self.estimate_errn_errp(datasets, parameter))\n\n if \"ul\" in self.selection_optional:\n result.update(self.estimate_ul(datasets, parameter))\n\n if \"scan\" in self.selection_optional:\n result.update(self.estimate_scan(datasets, parameter))\n\n result.update(self.estimate_counts(datasets))\n return result\n", "path": "gammapy/estimators/parameter.py"}]}
3,802
658
gh_patches_debug_25006
rasdani/github-patches
git_diff
PrefectHQ__prefect-11253
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Windows prefect deploy does not work with docker ### First check - [X] I added a descriptive title to this issue. - [X] I used the GitHub search to find a similar issue and didn't find it. - [X] I searched the Prefect documentation for this issue. - [X] I checked that this issue is related to Prefect and not one of its dependencies. ### Bug summary prefect deploy -n "name of deployment" does not work anymore on Windows. ### Reproduction ```python3 prefect deploy -n ``` ### Error ```python3 Running deployment build steps... > Running run_shell_script step... > Running build_docker_image step... Unable to load step function: prefect_docker.deployments.steps.build_docker_image. Attempting install of prefect-docker>=0.3.11. Traceback (most recent call last): File "C:\Users\JKANG1\PycharmProjects\site_analytics\.venv\Lib\site-packages\prefect\deployments\steps\core.py", line 149, in run_steps step_output = await run_step(step, upstream_outputs) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\JKANG1\PycharmProjects\site_analytics\.venv\Lib\site-packages\prefect\deployments\steps\core.py", line 119, in run_step step_func = _get_function_for_step(fqn, requires=keywords.get("requires")) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\JKANG1\PycharmProjects\site_analytics\.venv\Lib\site-packages\prefect\deployments\steps\core.py", line 83, in _get_function_for_step subprocess.check_call( File "C:\Users\JKANG1\AppData\Local\Programs\Python\Python311\Lib\subprocess.py", line 408, in check_call retcode = call(*popenargs, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\JKANG1\AppData\Local\Programs\Python\Python311\Lib\subprocess.py", line 389, in call with Popen(*popenargs, **kwargs) as p: ^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\JKANG1\AppData\Local\Programs\Python\Python311\Lib\subprocess.py", line 1026, in __init__ self._execute_child(args, executable, preexec_fn, close_fds, File "C:\Users\JKANG1\AppData\Local\Programs\Python\Python311\Lib\subprocess.py", line 1538, in _execute_child hp, ht, pid, tid = _winapi.CreateProcess(executable, args, ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ FileNotFoundError: [WinError 2] The system cannot find the file specified The above exception was the direct cause of the following exception: Traceback (most recent call last): File "C:\Users\JKANG1\PycharmProjects\site_analytics\.venv\Lib\site-packages\prefect\cli\_utilities.py", line 41, in wrapper return fn(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^ File "C:\Users\JKANG1\PycharmProjects\site_analytics\.venv\Lib\site-packages\prefect\utilities\asyncutils.py", line 255, in coroutine_wrapper return call() ^^^^^^ File "C:\Users\JKANG1\PycharmProjects\site_analytics\.venv\Lib\site-packages\prefect\_internal\concurrency\calls.py", line 382, in __call__ return self.result() ^^^^^^^^^^^^^ File "C:\Users\JKANG1\PycharmProjects\site_analytics\.venv\Lib\site-packages\prefect\_internal\concurrency\calls.py", line 282, in result return self.future.result(timeout=timeout) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\JKANG1\PycharmProjects\site_analytics\.venv\Lib\site-packages\prefect\_internal\concurrency\calls.py", line 168, in result return self.__get_result() ^^^^^^^^^^^^^^^^^^^ File "C:\Users\JKANG1\AppData\Local\Programs\Python\Python311\Lib\concurrent\futures\_base.py", line 401, in __get_result raise self._exception File "C:\Users\JKANG1\PycharmProjects\site_analytics\.venv\Lib\site-packages\prefect\_internal\concurrency\calls.py", line 345, in _run_async result = await coro ^^^^^^^^^^ File "C:\Users\JKANG1\PycharmProjects\site_analytics\.venv\Lib\site-packages\prefect\cli\deploy.py", line 292, in deploy await _run_single_deploy( File "C:\Users\JKANG1\PycharmProjects\site_analytics\.venv\Lib\site-packages\prefect\client\utilities.py", line 51, in with_injected_client return await fn(*args, **kwargs) ^^^^^^^^^^^^^^^^^^^^^^^^^ File "C:\Users\JKANG1\PycharmProjects\site_analytics\.venv\Lib\site-packages\prefect\cli\deploy.py", line 595, in _run_single_deploy await run_steps(build_steps, step_outputs, print_function=app.console.print) File "C:\Users\JKANG1\PycharmProjects\site_analytics\.venv\Lib\site-packages\prefect\deployments\steps\core.py", line 177, in run_steps raise StepExecutionError(f"Encountered error while running {fqn}") from exc prefect.deployments.steps.core.StepExecutionError: Encountered error while running prefect_docker.deployments.steps.build_docker_image An exception occurred. ``` ### Versions ```Text Version: 2.14.4 API version: 0.8.4 Python version: 3.11.6 Git commit: d2cf30f4 Built: Thu, Nov 9, 2023 4:48 PM OS/Arch: win32/AMD64 Profile: default Server type: cloud ``` ### Additional context _No response_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/prefect/deployments/steps/core.py` Content: ``` 1 """ 2 Core primitives for running Prefect project steps. 3 4 Project steps are YAML representations of Python functions along with their inputs. 5 6 Whenever a step is run, the following actions are taken: 7 8 - The step's inputs and block / variable references are resolved (see [the projects concepts documentation](/concepts/projects/#templating-options) for more details) 9 - The step's function is imported; if it cannot be found, the `requires` keyword is used to install the necessary packages 10 - The step's function is called with the resolved inputs 11 - The step's output is returned and used to resolve inputs for subsequent steps 12 """ 13 import os 14 import re 15 import subprocess 16 import warnings 17 from copy import deepcopy 18 from importlib import import_module 19 from typing import Any, Dict, List, Optional, Tuple, Union 20 21 from prefect._internal.compatibility.deprecated import PrefectDeprecationWarning 22 from prefect._internal.concurrency.api import Call, from_async 23 from prefect.logging.loggers import get_logger 24 from prefect.settings import PREFECT_DEBUG_MODE 25 from prefect.utilities.importtools import import_object 26 from prefect.utilities.processutils import get_sys_executable 27 from prefect.utilities.templating import ( 28 apply_values, 29 resolve_block_document_references, 30 resolve_variables, 31 ) 32 33 RESERVED_KEYWORDS = {"id", "requires"} 34 35 36 class StepExecutionError(Exception): 37 """ 38 Raised when a step fails to execute. 39 """ 40 41 42 def _strip_version(requirement: str) -> str: 43 """ 44 Strips the version from a requirement string. 45 46 Args: 47 requirement: A requirement string, e.g. "requests>=2.0.0" 48 49 Returns: 50 The package name, e.g. "requests" 51 52 Examples: 53 ```python 54 >>> _strip_version("s3fs>=2.0.0<3.0.0") 55 "s3fs" 56 ``` 57 """ 58 # split on any of the characters in the set [<>=!~] 59 # and return the first element which will be the package name 60 return re.split(r"[<>=!~]", requirement)[0].strip() 61 62 63 def _get_function_for_step( 64 fully_qualified_name: str, requires: Union[str, List[str], None] = None 65 ): 66 if not isinstance(requires, list): 67 packages = [requires] if requires else [] 68 else: 69 packages = requires 70 71 try: 72 for package in packages: 73 import_module(_strip_version(package).replace("-", "_")) 74 step_func = import_object(fully_qualified_name) 75 return step_func 76 except ImportError: 77 if requires: 78 print( 79 f"Unable to load step function: {fully_qualified_name}. Attempting" 80 f" install of {requires}." 81 ) 82 else: 83 raise 84 85 try: 86 subprocess.check_call( 87 [get_sys_executable(), "-m", "pip", "install", *packages], 88 ) 89 except subprocess.CalledProcessError: 90 get_logger("deployments.steps.core").warning( 91 "Unable to install required packages for %s", fully_qualified_name 92 ) 93 step_func = import_object(fully_qualified_name) 94 return step_func 95 96 97 async def run_step(step: Dict, upstream_outputs: Optional[Dict] = None) -> Dict: 98 """ 99 Runs a step, returns the step's output. 100 101 Steps are assumed to be in the format `{"importable.func.name": {"kwarg1": "value1", ...}}`. 102 103 The 'id and 'requires' keywords are reserved for specific purposes and will be removed from the 104 inputs before passing to the step function: 105 106 This keyword is used to specify packages that should be installed before running the step. 107 """ 108 fqn, inputs = _get_step_fully_qualified_name_and_inputs(step) 109 upstream_outputs = upstream_outputs or {} 110 111 if len(step.keys()) > 1: 112 raise ValueError( 113 f"Step has unexpected additional keys: {', '.join(list(step.keys())[1:])}" 114 ) 115 116 keywords = { 117 keyword: inputs.pop(keyword) 118 for keyword in RESERVED_KEYWORDS 119 if keyword in inputs 120 } 121 122 inputs = apply_values(inputs, upstream_outputs) 123 inputs = await resolve_block_document_references(inputs) 124 inputs = await resolve_variables(inputs) 125 inputs = apply_values(inputs, os.environ) 126 step_func = _get_function_for_step(fqn, requires=keywords.get("requires")) 127 result = await from_async.call_soon_in_new_thread( 128 Call.new(step_func, **inputs) 129 ).aresult() 130 return result 131 132 133 async def run_steps( 134 steps: List[Dict[str, Any]], 135 upstream_outputs: Optional[Dict[str, Any]] = None, 136 print_function: Any = print, 137 ): 138 upstream_outputs = deepcopy(upstream_outputs) if upstream_outputs else {} 139 for step in steps: 140 if not step: 141 continue 142 fqn, inputs = _get_step_fully_qualified_name_and_inputs(step) 143 step_name = fqn.split(".")[-1] 144 print_function(f" > Running {step_name} step...") 145 try: 146 # catch warnings to ensure deprecation warnings are printed 147 with warnings.catch_warnings(record=True) as w: 148 warnings.simplefilter( 149 "always", 150 category=PrefectDeprecationWarning, 151 ) 152 warnings.simplefilter( 153 "always", 154 category=DeprecationWarning, 155 ) 156 step_output = await run_step(step, upstream_outputs) 157 if w: 158 printed_messages = [] 159 for warning in w: 160 message = str(warning.message) 161 # prevent duplicate warnings from being printed 162 if message not in printed_messages: 163 try: 164 # try using rich styling 165 print_function(message, style="yellow") 166 except Exception: 167 # default to printing without styling 168 print_function(message) 169 printed_messages.append(message) 170 171 if not isinstance(step_output, dict): 172 if PREFECT_DEBUG_MODE: 173 get_logger().warning( 174 "Step function %s returned unexpected type: %s", 175 fqn, 176 type(step_output), 177 ) 178 continue 179 # store step output under step id to prevent clobbering 180 if inputs.get("id"): 181 upstream_outputs[inputs.get("id")] = step_output 182 upstream_outputs.update(step_output) 183 except Exception as exc: 184 raise StepExecutionError(f"Encountered error while running {fqn}") from exc 185 return upstream_outputs 186 187 188 def _get_step_fully_qualified_name_and_inputs(step: Dict) -> Tuple[str, Dict]: 189 step = deepcopy(step) 190 return step.popitem() 191 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/prefect/deployments/steps/core.py b/src/prefect/deployments/steps/core.py --- a/src/prefect/deployments/steps/core.py +++ b/src/prefect/deployments/steps/core.py @@ -13,6 +13,7 @@ import os import re import subprocess +import sys import warnings from copy import deepcopy from importlib import import_module @@ -23,7 +24,6 @@ from prefect.logging.loggers import get_logger from prefect.settings import PREFECT_DEBUG_MODE from prefect.utilities.importtools import import_object -from prefect.utilities.processutils import get_sys_executable from prefect.utilities.templating import ( apply_values, resolve_block_document_references, @@ -83,9 +83,7 @@ raise try: - subprocess.check_call( - [get_sys_executable(), "-m", "pip", "install", *packages], - ) + subprocess.check_call([sys.executable, "-m", "pip", "install", *packages]) except subprocess.CalledProcessError: get_logger("deployments.steps.core").warning( "Unable to install required packages for %s", fully_qualified_name
{"golden_diff": "diff --git a/src/prefect/deployments/steps/core.py b/src/prefect/deployments/steps/core.py\n--- a/src/prefect/deployments/steps/core.py\n+++ b/src/prefect/deployments/steps/core.py\n@@ -13,6 +13,7 @@\n import os\n import re\n import subprocess\n+import sys\n import warnings\n from copy import deepcopy\n from importlib import import_module\n@@ -23,7 +24,6 @@\n from prefect.logging.loggers import get_logger\n from prefect.settings import PREFECT_DEBUG_MODE\n from prefect.utilities.importtools import import_object\n-from prefect.utilities.processutils import get_sys_executable\n from prefect.utilities.templating import (\n apply_values,\n resolve_block_document_references,\n@@ -83,9 +83,7 @@\n raise\n \n try:\n- subprocess.check_call(\n- [get_sys_executable(), \"-m\", \"pip\", \"install\", *packages],\n- )\n+ subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", *packages])\n except subprocess.CalledProcessError:\n get_logger(\"deployments.steps.core\").warning(\n \"Unable to install required packages for %s\", fully_qualified_name\n", "issue": "Windows prefect deploy does not work with docker\n### First check\n\n- [X] I added a descriptive title to this issue.\n- [X] I used the GitHub search to find a similar issue and didn't find it.\n- [X] I searched the Prefect documentation for this issue.\n- [X] I checked that this issue is related to Prefect and not one of its dependencies.\n\n### Bug summary\n\nprefect deploy -n \"name of deployment\" does not work anymore on Windows. \n\n### Reproduction\n\n```python3\nprefect deploy -n\n```\n\n\n### Error\n\n```python3\nRunning deployment build steps...\r\n > Running run_shell_script step...\r\n > Running build_docker_image step...\r\nUnable to load step function: prefect_docker.deployments.steps.build_docker_image. Attempting install of prefect-docker>=0.3.11.\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\JKANG1\\PycharmProjects\\site_analytics\\.venv\\Lib\\site-packages\\prefect\\deployments\\steps\\core.py\", line 149, in run_steps\r\n step_output = await run_step(step, upstream_outputs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"C:\\Users\\JKANG1\\PycharmProjects\\site_analytics\\.venv\\Lib\\site-packages\\prefect\\deployments\\steps\\core.py\", line 119, in run_step\r\n step_func = _get_function_for_step(fqn, requires=keywords.get(\"requires\"))\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"C:\\Users\\JKANG1\\PycharmProjects\\site_analytics\\.venv\\Lib\\site-packages\\prefect\\deployments\\steps\\core.py\", line 83, in _get_function_for_step\r\n subprocess.check_call(\r\n File \"C:\\Users\\JKANG1\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\subprocess.py\", line 408, in check_call\r\n retcode = call(*popenargs, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"C:\\Users\\JKANG1\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\subprocess.py\", line 389, in call\r\n with Popen(*popenargs, **kwargs) as p:\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"C:\\Users\\JKANG1\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\subprocess.py\", line 1026, in __init__\r\n self._execute_child(args, executable, preexec_fn, close_fds,\r\n File \"C:\\Users\\JKANG1\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\subprocess.py\", line 1538, in _execute_child\r\n hp, ht, pid, tid = _winapi.CreateProcess(executable, args,\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\nFileNotFoundError: [WinError 2] The system cannot find the file specified\r\n\r\nThe above exception was the direct cause of the following exception:\r\n\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\JKANG1\\PycharmProjects\\site_analytics\\.venv\\Lib\\site-packages\\prefect\\cli\\_utilities.py\", line 41, in wrapper\r\n return fn(*args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^\r\n File \"C:\\Users\\JKANG1\\PycharmProjects\\site_analytics\\.venv\\Lib\\site-packages\\prefect\\utilities\\asyncutils.py\", line 255, in coroutine_wrapper\r\n return call()\r\n ^^^^^^\r\n File \"C:\\Users\\JKANG1\\PycharmProjects\\site_analytics\\.venv\\Lib\\site-packages\\prefect\\_internal\\concurrency\\calls.py\", line 382, in __call__\r\n return self.result()\r\n ^^^^^^^^^^^^^\r\n File \"C:\\Users\\JKANG1\\PycharmProjects\\site_analytics\\.venv\\Lib\\site-packages\\prefect\\_internal\\concurrency\\calls.py\", line 282, in result\r\n return self.future.result(timeout=timeout)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"C:\\Users\\JKANG1\\PycharmProjects\\site_analytics\\.venv\\Lib\\site-packages\\prefect\\_internal\\concurrency\\calls.py\", line 168, in result\r\n return self.__get_result()\r\n ^^^^^^^^^^^^^^^^^^^\r\n File \"C:\\Users\\JKANG1\\AppData\\Local\\Programs\\Python\\Python311\\Lib\\concurrent\\futures\\_base.py\", line 401, in __get_result\r\n raise self._exception\r\n File \"C:\\Users\\JKANG1\\PycharmProjects\\site_analytics\\.venv\\Lib\\site-packages\\prefect\\_internal\\concurrency\\calls.py\", line 345, in _run_async\r\n result = await coro\r\n ^^^^^^^^^^\r\n File \"C:\\Users\\JKANG1\\PycharmProjects\\site_analytics\\.venv\\Lib\\site-packages\\prefect\\cli\\deploy.py\", line 292, in deploy\r\n await _run_single_deploy(\r\n File \"C:\\Users\\JKANG1\\PycharmProjects\\site_analytics\\.venv\\Lib\\site-packages\\prefect\\client\\utilities.py\", line 51, in with_injected_client\r\n return await fn(*args, **kwargs)\r\n ^^^^^^^^^^^^^^^^^^^^^^^^^\r\n File \"C:\\Users\\JKANG1\\PycharmProjects\\site_analytics\\.venv\\Lib\\site-packages\\prefect\\cli\\deploy.py\", line 595, in _run_single_deploy\r\n await run_steps(build_steps, step_outputs, print_function=app.console.print)\r\n File \"C:\\Users\\JKANG1\\PycharmProjects\\site_analytics\\.venv\\Lib\\site-packages\\prefect\\deployments\\steps\\core.py\", line 177, in run_steps\r\n raise StepExecutionError(f\"Encountered error while running {fqn}\") from exc\r\nprefect.deployments.steps.core.StepExecutionError: Encountered error while running prefect_docker.deployments.steps.build_docker_image\r\nAn exception occurred.\n```\n\n\n### Versions\n\n```Text\nVersion: 2.14.4\r\nAPI version: 0.8.4\r\nPython version: 3.11.6\r\nGit commit: d2cf30f4\r\nBuilt: Thu, Nov 9, 2023 4:48 PM\r\nOS/Arch: win32/AMD64\r\nProfile: default\r\nServer type: cloud\n```\n\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "\"\"\"\nCore primitives for running Prefect project steps.\n\nProject steps are YAML representations of Python functions along with their inputs.\n\nWhenever a step is run, the following actions are taken:\n\n- The step's inputs and block / variable references are resolved (see [the projects concepts documentation](/concepts/projects/#templating-options) for more details)\n- The step's function is imported; if it cannot be found, the `requires` keyword is used to install the necessary packages\n- The step's function is called with the resolved inputs\n- The step's output is returned and used to resolve inputs for subsequent steps\n\"\"\"\nimport os\nimport re\nimport subprocess\nimport warnings\nfrom copy import deepcopy\nfrom importlib import import_module\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nfrom prefect._internal.compatibility.deprecated import PrefectDeprecationWarning\nfrom prefect._internal.concurrency.api import Call, from_async\nfrom prefect.logging.loggers import get_logger\nfrom prefect.settings import PREFECT_DEBUG_MODE\nfrom prefect.utilities.importtools import import_object\nfrom prefect.utilities.processutils import get_sys_executable\nfrom prefect.utilities.templating import (\n apply_values,\n resolve_block_document_references,\n resolve_variables,\n)\n\nRESERVED_KEYWORDS = {\"id\", \"requires\"}\n\n\nclass StepExecutionError(Exception):\n \"\"\"\n Raised when a step fails to execute.\n \"\"\"\n\n\ndef _strip_version(requirement: str) -> str:\n \"\"\"\n Strips the version from a requirement string.\n\n Args:\n requirement: A requirement string, e.g. \"requests>=2.0.0\"\n\n Returns:\n The package name, e.g. \"requests\"\n\n Examples:\n ```python\n >>> _strip_version(\"s3fs>=2.0.0<3.0.0\")\n \"s3fs\"\n ```\n \"\"\"\n # split on any of the characters in the set [<>=!~]\n # and return the first element which will be the package name\n return re.split(r\"[<>=!~]\", requirement)[0].strip()\n\n\ndef _get_function_for_step(\n fully_qualified_name: str, requires: Union[str, List[str], None] = None\n):\n if not isinstance(requires, list):\n packages = [requires] if requires else []\n else:\n packages = requires\n\n try:\n for package in packages:\n import_module(_strip_version(package).replace(\"-\", \"_\"))\n step_func = import_object(fully_qualified_name)\n return step_func\n except ImportError:\n if requires:\n print(\n f\"Unable to load step function: {fully_qualified_name}. Attempting\"\n f\" install of {requires}.\"\n )\n else:\n raise\n\n try:\n subprocess.check_call(\n [get_sys_executable(), \"-m\", \"pip\", \"install\", *packages],\n )\n except subprocess.CalledProcessError:\n get_logger(\"deployments.steps.core\").warning(\n \"Unable to install required packages for %s\", fully_qualified_name\n )\n step_func = import_object(fully_qualified_name)\n return step_func\n\n\nasync def run_step(step: Dict, upstream_outputs: Optional[Dict] = None) -> Dict:\n \"\"\"\n Runs a step, returns the step's output.\n\n Steps are assumed to be in the format `{\"importable.func.name\": {\"kwarg1\": \"value1\", ...}}`.\n\n The 'id and 'requires' keywords are reserved for specific purposes and will be removed from the\n inputs before passing to the step function:\n\n This keyword is used to specify packages that should be installed before running the step.\n \"\"\"\n fqn, inputs = _get_step_fully_qualified_name_and_inputs(step)\n upstream_outputs = upstream_outputs or {}\n\n if len(step.keys()) > 1:\n raise ValueError(\n f\"Step has unexpected additional keys: {', '.join(list(step.keys())[1:])}\"\n )\n\n keywords = {\n keyword: inputs.pop(keyword)\n for keyword in RESERVED_KEYWORDS\n if keyword in inputs\n }\n\n inputs = apply_values(inputs, upstream_outputs)\n inputs = await resolve_block_document_references(inputs)\n inputs = await resolve_variables(inputs)\n inputs = apply_values(inputs, os.environ)\n step_func = _get_function_for_step(fqn, requires=keywords.get(\"requires\"))\n result = await from_async.call_soon_in_new_thread(\n Call.new(step_func, **inputs)\n ).aresult()\n return result\n\n\nasync def run_steps(\n steps: List[Dict[str, Any]],\n upstream_outputs: Optional[Dict[str, Any]] = None,\n print_function: Any = print,\n):\n upstream_outputs = deepcopy(upstream_outputs) if upstream_outputs else {}\n for step in steps:\n if not step:\n continue\n fqn, inputs = _get_step_fully_qualified_name_and_inputs(step)\n step_name = fqn.split(\".\")[-1]\n print_function(f\" > Running {step_name} step...\")\n try:\n # catch warnings to ensure deprecation warnings are printed\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\n \"always\",\n category=PrefectDeprecationWarning,\n )\n warnings.simplefilter(\n \"always\",\n category=DeprecationWarning,\n )\n step_output = await run_step(step, upstream_outputs)\n if w:\n printed_messages = []\n for warning in w:\n message = str(warning.message)\n # prevent duplicate warnings from being printed\n if message not in printed_messages:\n try:\n # try using rich styling\n print_function(message, style=\"yellow\")\n except Exception:\n # default to printing without styling\n print_function(message)\n printed_messages.append(message)\n\n if not isinstance(step_output, dict):\n if PREFECT_DEBUG_MODE:\n get_logger().warning(\n \"Step function %s returned unexpected type: %s\",\n fqn,\n type(step_output),\n )\n continue\n # store step output under step id to prevent clobbering\n if inputs.get(\"id\"):\n upstream_outputs[inputs.get(\"id\")] = step_output\n upstream_outputs.update(step_output)\n except Exception as exc:\n raise StepExecutionError(f\"Encountered error while running {fqn}\") from exc\n return upstream_outputs\n\n\ndef _get_step_fully_qualified_name_and_inputs(step: Dict) -> Tuple[str, Dict]:\n step = deepcopy(step)\n return step.popitem()\n", "path": "src/prefect/deployments/steps/core.py"}], "after_files": [{"content": "\"\"\"\nCore primitives for running Prefect project steps.\n\nProject steps are YAML representations of Python functions along with their inputs.\n\nWhenever a step is run, the following actions are taken:\n\n- The step's inputs and block / variable references are resolved (see [the projects concepts documentation](/concepts/projects/#templating-options) for more details)\n- The step's function is imported; if it cannot be found, the `requires` keyword is used to install the necessary packages\n- The step's function is called with the resolved inputs\n- The step's output is returned and used to resolve inputs for subsequent steps\n\"\"\"\nimport os\nimport re\nimport subprocess\nimport sys\nimport warnings\nfrom copy import deepcopy\nfrom importlib import import_module\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nfrom prefect._internal.compatibility.deprecated import PrefectDeprecationWarning\nfrom prefect._internal.concurrency.api import Call, from_async\nfrom prefect.logging.loggers import get_logger\nfrom prefect.settings import PREFECT_DEBUG_MODE\nfrom prefect.utilities.importtools import import_object\nfrom prefect.utilities.templating import (\n apply_values,\n resolve_block_document_references,\n resolve_variables,\n)\n\nRESERVED_KEYWORDS = {\"id\", \"requires\"}\n\n\nclass StepExecutionError(Exception):\n \"\"\"\n Raised when a step fails to execute.\n \"\"\"\n\n\ndef _strip_version(requirement: str) -> str:\n \"\"\"\n Strips the version from a requirement string.\n\n Args:\n requirement: A requirement string, e.g. \"requests>=2.0.0\"\n\n Returns:\n The package name, e.g. \"requests\"\n\n Examples:\n ```python\n >>> _strip_version(\"s3fs>=2.0.0<3.0.0\")\n \"s3fs\"\n ```\n \"\"\"\n # split on any of the characters in the set [<>=!~]\n # and return the first element which will be the package name\n return re.split(r\"[<>=!~]\", requirement)[0].strip()\n\n\ndef _get_function_for_step(\n fully_qualified_name: str, requires: Union[str, List[str], None] = None\n):\n if not isinstance(requires, list):\n packages = [requires] if requires else []\n else:\n packages = requires\n\n try:\n for package in packages:\n import_module(_strip_version(package).replace(\"-\", \"_\"))\n step_func = import_object(fully_qualified_name)\n return step_func\n except ImportError:\n if requires:\n print(\n f\"Unable to load step function: {fully_qualified_name}. Attempting\"\n f\" install of {requires}.\"\n )\n else:\n raise\n\n try:\n subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", *packages])\n except subprocess.CalledProcessError:\n get_logger(\"deployments.steps.core\").warning(\n \"Unable to install required packages for %s\", fully_qualified_name\n )\n step_func = import_object(fully_qualified_name)\n return step_func\n\n\nasync def run_step(step: Dict, upstream_outputs: Optional[Dict] = None) -> Dict:\n \"\"\"\n Runs a step, returns the step's output.\n\n Steps are assumed to be in the format `{\"importable.func.name\": {\"kwarg1\": \"value1\", ...}}`.\n\n The 'id and 'requires' keywords are reserved for specific purposes and will be removed from the\n inputs before passing to the step function:\n\n This keyword is used to specify packages that should be installed before running the step.\n \"\"\"\n fqn, inputs = _get_step_fully_qualified_name_and_inputs(step)\n upstream_outputs = upstream_outputs or {}\n\n if len(step.keys()) > 1:\n raise ValueError(\n f\"Step has unexpected additional keys: {', '.join(list(step.keys())[1:])}\"\n )\n\n keywords = {\n keyword: inputs.pop(keyword)\n for keyword in RESERVED_KEYWORDS\n if keyword in inputs\n }\n\n inputs = apply_values(inputs, upstream_outputs)\n inputs = await resolve_block_document_references(inputs)\n inputs = await resolve_variables(inputs)\n inputs = apply_values(inputs, os.environ)\n step_func = _get_function_for_step(fqn, requires=keywords.get(\"requires\"))\n result = await from_async.call_soon_in_new_thread(\n Call.new(step_func, **inputs)\n ).aresult()\n return result\n\n\nasync def run_steps(\n steps: List[Dict[str, Any]],\n upstream_outputs: Optional[Dict[str, Any]] = None,\n print_function: Any = print,\n):\n upstream_outputs = deepcopy(upstream_outputs) if upstream_outputs else {}\n for step in steps:\n if not step:\n continue\n fqn, inputs = _get_step_fully_qualified_name_and_inputs(step)\n step_name = fqn.split(\".\")[-1]\n print_function(f\" > Running {step_name} step...\")\n try:\n # catch warnings to ensure deprecation warnings are printed\n with warnings.catch_warnings(record=True) as w:\n warnings.simplefilter(\n \"always\",\n category=PrefectDeprecationWarning,\n )\n warnings.simplefilter(\n \"always\",\n category=DeprecationWarning,\n )\n step_output = await run_step(step, upstream_outputs)\n if w:\n printed_messages = []\n for warning in w:\n message = str(warning.message)\n # prevent duplicate warnings from being printed\n if message not in printed_messages:\n try:\n # try using rich styling\n print_function(message, style=\"yellow\")\n except Exception:\n # default to printing without styling\n print_function(message)\n printed_messages.append(message)\n\n if not isinstance(step_output, dict):\n if PREFECT_DEBUG_MODE:\n get_logger().warning(\n \"Step function %s returned unexpected type: %s\",\n fqn,\n type(step_output),\n )\n continue\n # store step output under step id to prevent clobbering\n if inputs.get(\"id\"):\n upstream_outputs[inputs.get(\"id\")] = step_output\n upstream_outputs.update(step_output)\n except Exception as exc:\n raise StepExecutionError(f\"Encountered error while running {fqn}\") from exc\n return upstream_outputs\n\n\ndef _get_step_fully_qualified_name_and_inputs(step: Dict) -> Tuple[str, Dict]:\n step = deepcopy(step)\n return step.popitem()\n", "path": "src/prefect/deployments/steps/core.py"}]}
3,597
268
gh_patches_debug_63639
rasdani/github-patches
git_diff
bookwyrm-social__bookwyrm-2239
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Logged out view of list of lists is empty This is a 🤦 on my part -- it should directly query the list of public lists, instead of trying to use the redis cache, which relies on logged in users --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bookwyrm/views/list/lists.py` Content: ``` 1 """ book list views""" 2 from django.contrib.auth.decorators import login_required 3 from django.core.paginator import Paginator 4 from django.shortcuts import redirect 5 from django.template.response import TemplateResponse 6 from django.utils.decorators import method_decorator 7 from django.views import View 8 9 from bookwyrm import forms, models 10 from bookwyrm.lists_stream import ListsStream 11 from bookwyrm.views.helpers import get_user_from_username 12 13 14 # pylint: disable=no-self-use 15 class Lists(View): 16 """book list page""" 17 18 def get(self, request): 19 """display a book list""" 20 lists = ListsStream().get_list_stream(request.user) 21 paginated = Paginator(lists, 12) 22 data = { 23 "lists": paginated.get_page(request.GET.get("page")), 24 "list_form": forms.ListForm(), 25 "path": "/list", 26 } 27 return TemplateResponse(request, "lists/lists.html", data) 28 29 @method_decorator(login_required, name="dispatch") 30 # pylint: disable=unused-argument 31 def post(self, request): 32 """create a book_list""" 33 form = forms.ListForm(request.POST) 34 if not form.is_valid(): 35 return redirect("lists") 36 book_list = form.save() 37 # list should not have a group if it is not group curated 38 if not book_list.curation == "group": 39 book_list.group = None 40 book_list.save(broadcast=False) 41 42 return redirect(book_list.local_path) 43 44 45 @method_decorator(login_required, name="dispatch") 46 class SavedLists(View): 47 """saved book list page""" 48 49 def get(self, request): 50 """display book lists""" 51 # hide lists with no approved books 52 lists = request.user.saved_lists.order_by("-updated_date") 53 54 paginated = Paginator(lists, 12) 55 data = { 56 "lists": paginated.get_page(request.GET.get("page")), 57 "list_form": forms.ListForm(), 58 "path": "/list", 59 } 60 return TemplateResponse(request, "lists/lists.html", data) 61 62 63 @method_decorator(login_required, name="dispatch") 64 class UserLists(View): 65 """a user's book list page""" 66 67 def get(self, request, username): 68 """display a book list""" 69 user = get_user_from_username(request.user, username) 70 lists = models.List.privacy_filter(request.user).filter(user=user) 71 paginated = Paginator(lists, 12) 72 73 data = { 74 "user": user, 75 "is_self": request.user.id == user.id, 76 "lists": paginated.get_page(request.GET.get("page")), 77 "list_form": forms.ListForm(), 78 "path": user.local_path + "/lists", 79 } 80 return TemplateResponse(request, "user/lists.html", data) 81 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bookwyrm/views/list/lists.py b/bookwyrm/views/list/lists.py --- a/bookwyrm/views/list/lists.py +++ b/bookwyrm/views/list/lists.py @@ -17,7 +17,10 @@ def get(self, request): """display a book list""" - lists = ListsStream().get_list_stream(request.user) + if request.user.is_authenticated: + lists = ListsStream().get_list_stream(request.user) + else: + lists = models.List.objects.filter(privacy="public") paginated = Paginator(lists, 12) data = { "lists": paginated.get_page(request.GET.get("page")),
{"golden_diff": "diff --git a/bookwyrm/views/list/lists.py b/bookwyrm/views/list/lists.py\n--- a/bookwyrm/views/list/lists.py\n+++ b/bookwyrm/views/list/lists.py\n@@ -17,7 +17,10 @@\n \n def get(self, request):\n \"\"\"display a book list\"\"\"\n- lists = ListsStream().get_list_stream(request.user)\n+ if request.user.is_authenticated:\n+ lists = ListsStream().get_list_stream(request.user)\n+ else:\n+ lists = models.List.objects.filter(privacy=\"public\")\n paginated = Paginator(lists, 12)\n data = {\n \"lists\": paginated.get_page(request.GET.get(\"page\")),\n", "issue": "Logged out view of list of lists is empty\nThis is a \ud83e\udd26 on my part -- it should directly query the list of public lists, instead of trying to use the redis cache, which relies on logged in users\n", "before_files": [{"content": "\"\"\" book list views\"\"\"\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.shortcuts import redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.lists_stream import ListsStream\nfrom bookwyrm.views.helpers import get_user_from_username\n\n\n# pylint: disable=no-self-use\nclass Lists(View):\n \"\"\"book list page\"\"\"\n\n def get(self, request):\n \"\"\"display a book list\"\"\"\n lists = ListsStream().get_list_stream(request.user)\n paginated = Paginator(lists, 12)\n data = {\n \"lists\": paginated.get_page(request.GET.get(\"page\")),\n \"list_form\": forms.ListForm(),\n \"path\": \"/list\",\n }\n return TemplateResponse(request, \"lists/lists.html\", data)\n\n @method_decorator(login_required, name=\"dispatch\")\n # pylint: disable=unused-argument\n def post(self, request):\n \"\"\"create a book_list\"\"\"\n form = forms.ListForm(request.POST)\n if not form.is_valid():\n return redirect(\"lists\")\n book_list = form.save()\n # list should not have a group if it is not group curated\n if not book_list.curation == \"group\":\n book_list.group = None\n book_list.save(broadcast=False)\n\n return redirect(book_list.local_path)\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass SavedLists(View):\n \"\"\"saved book list page\"\"\"\n\n def get(self, request):\n \"\"\"display book lists\"\"\"\n # hide lists with no approved books\n lists = request.user.saved_lists.order_by(\"-updated_date\")\n\n paginated = Paginator(lists, 12)\n data = {\n \"lists\": paginated.get_page(request.GET.get(\"page\")),\n \"list_form\": forms.ListForm(),\n \"path\": \"/list\",\n }\n return TemplateResponse(request, \"lists/lists.html\", data)\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass UserLists(View):\n \"\"\"a user's book list page\"\"\"\n\n def get(self, request, username):\n \"\"\"display a book list\"\"\"\n user = get_user_from_username(request.user, username)\n lists = models.List.privacy_filter(request.user).filter(user=user)\n paginated = Paginator(lists, 12)\n\n data = {\n \"user\": user,\n \"is_self\": request.user.id == user.id,\n \"lists\": paginated.get_page(request.GET.get(\"page\")),\n \"list_form\": forms.ListForm(),\n \"path\": user.local_path + \"/lists\",\n }\n return TemplateResponse(request, \"user/lists.html\", data)\n", "path": "bookwyrm/views/list/lists.py"}], "after_files": [{"content": "\"\"\" book list views\"\"\"\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.shortcuts import redirect\nfrom django.template.response import TemplateResponse\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\n\nfrom bookwyrm import forms, models\nfrom bookwyrm.lists_stream import ListsStream\nfrom bookwyrm.views.helpers import get_user_from_username\n\n\n# pylint: disable=no-self-use\nclass Lists(View):\n \"\"\"book list page\"\"\"\n\n def get(self, request):\n \"\"\"display a book list\"\"\"\n if request.user.is_authenticated:\n lists = ListsStream().get_list_stream(request.user)\n else:\n lists = models.List.objects.filter(privacy=\"public\")\n paginated = Paginator(lists, 12)\n data = {\n \"lists\": paginated.get_page(request.GET.get(\"page\")),\n \"list_form\": forms.ListForm(),\n \"path\": \"/list\",\n }\n return TemplateResponse(request, \"lists/lists.html\", data)\n\n @method_decorator(login_required, name=\"dispatch\")\n # pylint: disable=unused-argument\n def post(self, request):\n \"\"\"create a book_list\"\"\"\n form = forms.ListForm(request.POST)\n if not form.is_valid():\n return redirect(\"lists\")\n book_list = form.save()\n # list should not have a group if it is not group curated\n if not book_list.curation == \"group\":\n book_list.group = None\n book_list.save(broadcast=False)\n\n return redirect(book_list.local_path)\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass SavedLists(View):\n \"\"\"saved book list page\"\"\"\n\n def get(self, request):\n \"\"\"display book lists\"\"\"\n # hide lists with no approved books\n lists = request.user.saved_lists.order_by(\"-updated_date\")\n\n paginated = Paginator(lists, 12)\n data = {\n \"lists\": paginated.get_page(request.GET.get(\"page\")),\n \"list_form\": forms.ListForm(),\n \"path\": \"/list\",\n }\n return TemplateResponse(request, \"lists/lists.html\", data)\n\n\n@method_decorator(login_required, name=\"dispatch\")\nclass UserLists(View):\n \"\"\"a user's book list page\"\"\"\n\n def get(self, request, username):\n \"\"\"display a book list\"\"\"\n user = get_user_from_username(request.user, username)\n lists = models.List.privacy_filter(request.user).filter(user=user)\n paginated = Paginator(lists, 12)\n\n data = {\n \"user\": user,\n \"is_self\": request.user.id == user.id,\n \"lists\": paginated.get_page(request.GET.get(\"page\")),\n \"list_form\": forms.ListForm(),\n \"path\": user.local_path + \"/lists\",\n }\n return TemplateResponse(request, \"user/lists.html\", data)\n", "path": "bookwyrm/views/list/lists.py"}]}
1,040
149
gh_patches_debug_17668
rasdani/github-patches
git_diff
scikit-image__scikit-image-3642
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- "[Errno 36] File name too long:" when using imread on remote resource with long querystring ## Description When using skimage.io.imread with a remote resource, a long query string on the remote resource will cause a failure to read the remote resource, because the temporary file cannot be created. e.g. The following works fine ``` >>> im = imread('https://c1.staticflickr.com/9/8370/8429454143_1066b73c04_o.jpg?{}'.format(''.join(['s' for i in range(100)]))) ``` while the one below fails ``` >>> im = imread('https://c1.staticflickr.com/9/8370/8429454143_1066b73c04_o.jpg?{}'.format(''.join(['s' for i in range(300)]))) Traceback (most recent call last): File "/usr/local/lib/python3.5/dist-packages/skimage/io/util.py", line 28, in file_or_url_context with tempfile.NamedTemporaryFile(delete=False, suffix=ext) as f: File "/usr/lib/python3.5/tempfile.py", line 688, in NamedTemporaryFile (fd, name) = _mkstemp_inner(dir, prefix, suffix, flags, output_type) File "/usr/lib/python3.5/tempfile.py", line 399, in _mkstemp_inner fd = _os.open(file, flags, 0o600) OSError: [Errno 36] File name too long: '/tmp/tmpmfnujlq6.jpg?ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss' During handling of the above exception, another exception occurred: Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/usr/local/lib/python3.5/dist-packages/skimage/io/_io.py", line 61, in imread with file_or_url_context(fname) as fname: File "/usr/lib/python3.5/contextlib.py", line 59, in __enter__ return next(self.gen) File "/usr/local/lib/python3.5/dist-packages/skimage/io/util.py", line 34, in file_or_url_context os.remove(f.name) UnboundLocalError: local variable 'f' referenced before assignment ``` ## Way to reproduce [If reporting a bug, please include the following important information:] - [x] Code example - [x] Operating system and version ``` DISTRIB_ID=Ubuntu DISTRIB_RELEASE=16.04 DISTRIB_CODENAME=xenial DISTRIB_DESCRIPTION="Ubuntu 16.04.5 LTS" ``` - [x] Python version: Python 3.5.2 - [x] scikit-image version (run `skimage.__version__`): skimage.__version__ '0.14.1' ## Proposal https://github.com/scikit-image/scikit-image/blob/d24634d86e4f90fe96377209d66ed114b9b601e4/skimage/io/util.py#L22 The querystring should be excluded from the temporary file name extension --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `skimage/io/util.py` Content: ``` 1 from urllib.request import urlopen 2 3 import os 4 import re 5 import tempfile 6 from contextlib import contextmanager 7 8 9 URL_REGEX = re.compile(r'http://|https://|ftp://|file://|file:\\') 10 11 12 def is_url(filename): 13 """Return True if string is an http or ftp path.""" 14 return (isinstance(filename, str) and 15 URL_REGEX.match(filename) is not None) 16 17 18 @contextmanager 19 def file_or_url_context(resource_name): 20 """Yield name of file from the given resource (i.e. file or url).""" 21 if is_url(resource_name): 22 _, ext = os.path.splitext(resource_name) 23 try: 24 with tempfile.NamedTemporaryFile(delete=False, suffix=ext) as f: 25 u = urlopen(resource_name) 26 f.write(u.read()) 27 # f must be closed before yielding 28 yield f.name 29 finally: 30 os.remove(f.name) 31 else: 32 yield resource_name 33 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/skimage/io/util.py b/skimage/io/util.py --- a/skimage/io/util.py +++ b/skimage/io/util.py @@ -1,4 +1,4 @@ -from urllib.request import urlopen +import urllib.parse import os import re @@ -19,10 +19,11 @@ def file_or_url_context(resource_name): """Yield name of file from the given resource (i.e. file or url).""" if is_url(resource_name): - _, ext = os.path.splitext(resource_name) + url_components = urllib.parse.urlparse(resource_name) + _, ext = os.path.splitext(url_components.path) try: with tempfile.NamedTemporaryFile(delete=False, suffix=ext) as f: - u = urlopen(resource_name) + u = urllib.request.urlopen(resource_name) f.write(u.read()) # f must be closed before yielding yield f.name
{"golden_diff": "diff --git a/skimage/io/util.py b/skimage/io/util.py\n--- a/skimage/io/util.py\n+++ b/skimage/io/util.py\n@@ -1,4 +1,4 @@\n-from urllib.request import urlopen\n+import urllib.parse\n \n import os\n import re\n@@ -19,10 +19,11 @@\n def file_or_url_context(resource_name):\n \"\"\"Yield name of file from the given resource (i.e. file or url).\"\"\"\n if is_url(resource_name):\n- _, ext = os.path.splitext(resource_name)\n+ url_components = urllib.parse.urlparse(resource_name)\n+ _, ext = os.path.splitext(url_components.path)\n try:\n with tempfile.NamedTemporaryFile(delete=False, suffix=ext) as f:\n- u = urlopen(resource_name)\n+ u = urllib.request.urlopen(resource_name)\n f.write(u.read())\n # f must be closed before yielding\n yield f.name\n", "issue": "\"[Errno 36] File name too long:\" when using imread on remote resource with long querystring\n## Description\r\nWhen using skimage.io.imread with a remote resource, a long query string on the remote resource will cause a failure to read the remote resource, because the temporary file cannot be created.\r\n\r\ne.g. \r\n\r\nThe following works fine\r\n```\r\n>>> im = imread('https://c1.staticflickr.com/9/8370/8429454143_1066b73c04_o.jpg?{}'.format(''.join(['s' for i in range(100)])))\r\n\r\n```\r\n\r\nwhile the one below fails\r\n\r\n```\r\n>>> im = imread('https://c1.staticflickr.com/9/8370/8429454143_1066b73c04_o.jpg?{}'.format(''.join(['s' for i in range(300)])))\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.5/dist-packages/skimage/io/util.py\", line 28, in file_or_url_context\r\n with tempfile.NamedTemporaryFile(delete=False, suffix=ext) as f:\r\n File \"/usr/lib/python3.5/tempfile.py\", line 688, in NamedTemporaryFile\r\n (fd, name) = _mkstemp_inner(dir, prefix, suffix, flags, output_type)\r\n File \"/usr/lib/python3.5/tempfile.py\", line 399, in _mkstemp_inner\r\n fd = _os.open(file, flags, 0o600)\r\nOSError: [Errno 36] File name too long: '/tmp/tmpmfnujlq6.jpg?ssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss'\r\n\r\nDuring handling of the above exception, another exception occurred:\r\n\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/usr/local/lib/python3.5/dist-packages/skimage/io/_io.py\", line 61, in imread\r\n with file_or_url_context(fname) as fname:\r\n File \"/usr/lib/python3.5/contextlib.py\", line 59, in __enter__\r\n return next(self.gen)\r\n File \"/usr/local/lib/python3.5/dist-packages/skimage/io/util.py\", line 34, in file_or_url_context\r\n os.remove(f.name)\r\nUnboundLocalError: local variable 'f' referenced before assignment\r\n\r\n```\r\n\r\n## Way to reproduce\r\n[If reporting a bug, please include the following important information:]\r\n- [x] Code example\r\n- [x] Operating system and version\r\n```\r\nDISTRIB_ID=Ubuntu\r\nDISTRIB_RELEASE=16.04\r\nDISTRIB_CODENAME=xenial\r\nDISTRIB_DESCRIPTION=\"Ubuntu 16.04.5 LTS\"\r\n```\r\n- [x] Python version: Python 3.5.2\r\n- [x] scikit-image version (run `skimage.__version__`): skimage.__version__\r\n'0.14.1'\r\n\r\n## Proposal\r\n\r\nhttps://github.com/scikit-image/scikit-image/blob/d24634d86e4f90fe96377209d66ed114b9b601e4/skimage/io/util.py#L22\r\n\r\nThe querystring should be excluded from the temporary file name extension\n", "before_files": [{"content": "from urllib.request import urlopen\n\nimport os\nimport re\nimport tempfile\nfrom contextlib import contextmanager\n\n\nURL_REGEX = re.compile(r'http://|https://|ftp://|file://|file:\\\\')\n\n\ndef is_url(filename):\n \"\"\"Return True if string is an http or ftp path.\"\"\"\n return (isinstance(filename, str) and\n URL_REGEX.match(filename) is not None)\n\n\n@contextmanager\ndef file_or_url_context(resource_name):\n \"\"\"Yield name of file from the given resource (i.e. file or url).\"\"\"\n if is_url(resource_name):\n _, ext = os.path.splitext(resource_name)\n try:\n with tempfile.NamedTemporaryFile(delete=False, suffix=ext) as f:\n u = urlopen(resource_name)\n f.write(u.read())\n # f must be closed before yielding\n yield f.name\n finally:\n os.remove(f.name)\n else:\n yield resource_name\n", "path": "skimage/io/util.py"}], "after_files": [{"content": "import urllib.parse\n\nimport os\nimport re\nimport tempfile\nfrom contextlib import contextmanager\n\n\nURL_REGEX = re.compile(r'http://|https://|ftp://|file://|file:\\\\')\n\n\ndef is_url(filename):\n \"\"\"Return True if string is an http or ftp path.\"\"\"\n return (isinstance(filename, str) and\n URL_REGEX.match(filename) is not None)\n\n\n@contextmanager\ndef file_or_url_context(resource_name):\n \"\"\"Yield name of file from the given resource (i.e. file or url).\"\"\"\n if is_url(resource_name):\n url_components = urllib.parse.urlparse(resource_name)\n _, ext = os.path.splitext(url_components.path)\n try:\n with tempfile.NamedTemporaryFile(delete=False, suffix=ext) as f:\n u = urllib.request.urlopen(resource_name)\n f.write(u.read())\n # f must be closed before yielding\n yield f.name\n finally:\n os.remove(f.name)\n else:\n yield resource_name\n", "path": "skimage/io/util.py"}]}
1,404
206
gh_patches_debug_25033
rasdani/github-patches
git_diff
apache__airflow-6783
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [AIRFLOW-3014] Fix multiple alembic heads Make sure you have checked _all_ steps below. ### Jira - [ ] My PR addresses the following [Airflow Jira](https://issues.apache.org/jira/browse/AIRFLOW/) issues and references them in the PR title. For example, "\[AIRFLOW-XXX\] My Airflow PR" - https://issues.apache.org/jira/browse/AIRFLOW-6224 - In case you are fixing a typo in the documentation you can prepend your commit with \[AIRFLOW-XXX\], code changes always need a Jira issue. - In case you are proposing a fundamental code change, you need to create an Airflow Improvement Proposal ([AIP](https://cwiki.apache.org/confluence/display/AIRFLOW/Airflow+Improvements+Proposals)). - In case you are adding a dependency, check if the license complies with the [ASF 3rd Party License Policy](https://www.apache.org/legal/resolved.html#category-x). ### Description - [ ] Here are some details about my PR, including screenshots of any UI changes: ### Tests - [ ] My PR adds the following unit tests __OR__ does not need testing for this extremely good reason: ### Commits - [ ] My commits all reference Jira issues in their subject lines, and I have squashed multiple commits if they address the same issue. In addition, my commits follow the guidelines from "[How to write a good git commit message](http://chris.beams.io/posts/git-commit/)": 1. Subject is separated from body by a blank line 1. Subject is limited to 50 characters (not including Jira issue reference) 1. Subject does not end with a period 1. Subject uses the imperative mood ("add", not "adding") 1. Body wraps at 72 characters 1. Body explains "what" and "why", not "how" ### Documentation - [ ] In case of new functionality, my PR adds documentation that describes how to use it. - All the public functions and the classes in the PR contain docstrings that explain what it does - If you implement backwards incompatible changes, please leave a note in the [Updating.md](https://github.com/apache/airflow/blob/master/UPDATING.md) so we can assign it to a appropriate release --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `airflow/migrations/versions/c1840b4bcf1a_increase_length_of_password_column_in_.py` Content: ``` 1 # 2 # Licensed to the Apache Software Foundation (ASF) under one 3 # or more contributor license agreements. See the NOTICE file 4 # distributed with this work for additional information 5 # regarding copyright ownership. The ASF licenses this file 6 # to you under the Apache License, Version 2.0 (the 7 # "License"); you may not use this file except in compliance 8 # with the License. You may obtain a copy of the License at 9 # 10 # http://www.apache.org/licenses/LICENSE-2.0 11 # 12 # Unless required by applicable law or agreed to in writing, 13 # software distributed under the License is distributed on an 14 # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 15 # KIND, either express or implied. See the License for the 16 # specific language governing permissions and limitations 17 # under the License. 18 19 """Increase length of password column in connection table 20 21 Revision ID: c1840b4bcf1a 22 Revises: 004c1210f153 23 Create Date: 2019-10-02 16:56:54.865550 24 25 """ 26 27 import sqlalchemy as sa 28 from alembic import op 29 30 # revision identifiers, used by Alembic. 31 revision = 'c1840b4bcf1a' 32 down_revision = '004c1210f153' 33 branch_labels = None 34 depends_on = None 35 36 37 def upgrade(): 38 conn = op.get_bind() 39 if conn.dialect.name == 'sqlite': 40 # SQLite does not allow column modifications so we need to skip this migration 41 return 42 43 op.alter_column(table_name='connection', 44 column_name='password', 45 type_=sa.String(length=5000)) 46 47 48 def downgrade(): 49 # Can't be undone 50 pass 51 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/airflow/migrations/versions/c1840b4bcf1a_increase_length_of_password_column_in_.py b/airflow/migrations/versions/c1840b4bcf1a_increase_length_of_password_column_in_.py deleted file mode 100644 --- a/airflow/migrations/versions/c1840b4bcf1a_increase_length_of_password_column_in_.py +++ /dev/null @@ -1,50 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -"""Increase length of password column in connection table - -Revision ID: c1840b4bcf1a -Revises: 004c1210f153 -Create Date: 2019-10-02 16:56:54.865550 - -""" - -import sqlalchemy as sa -from alembic import op - -# revision identifiers, used by Alembic. -revision = 'c1840b4bcf1a' -down_revision = '004c1210f153' -branch_labels = None -depends_on = None - - -def upgrade(): - conn = op.get_bind() - if conn.dialect.name == 'sqlite': - # SQLite does not allow column modifications so we need to skip this migration - return - - op.alter_column(table_name='connection', - column_name='password', - type_=sa.String(length=5000)) - - -def downgrade(): - # Can't be undone - pass
{"golden_diff": "diff --git a/airflow/migrations/versions/c1840b4bcf1a_increase_length_of_password_column_in_.py b/airflow/migrations/versions/c1840b4bcf1a_increase_length_of_password_column_in_.py\ndeleted file mode 100644\n--- a/airflow/migrations/versions/c1840b4bcf1a_increase_length_of_password_column_in_.py\n+++ /dev/null\n@@ -1,50 +0,0 @@\n-#\n-# Licensed to the Apache Software Foundation (ASF) under one\n-# or more contributor license agreements. See the NOTICE file\n-# distributed with this work for additional information\n-# regarding copyright ownership. The ASF licenses this file\n-# to you under the Apache License, Version 2.0 (the\n-# \"License\"); you may not use this file except in compliance\n-# with the License. You may obtain a copy of the License at\n-#\n-# http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Unless required by applicable law or agreed to in writing,\n-# software distributed under the License is distributed on an\n-# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n-# KIND, either express or implied. See the License for the\n-# specific language governing permissions and limitations\n-# under the License.\n-\n-\"\"\"Increase length of password column in connection table\n-\n-Revision ID: c1840b4bcf1a\n-Revises: 004c1210f153\n-Create Date: 2019-10-02 16:56:54.865550\n-\n-\"\"\"\n-\n-import sqlalchemy as sa\n-from alembic import op\n-\n-# revision identifiers, used by Alembic.\n-revision = 'c1840b4bcf1a'\n-down_revision = '004c1210f153'\n-branch_labels = None\n-depends_on = None\n-\n-\n-def upgrade():\n- conn = op.get_bind()\n- if conn.dialect.name == 'sqlite':\n- # SQLite does not allow column modifications so we need to skip this migration\n- return\n-\n- op.alter_column(table_name='connection',\n- column_name='password',\n- type_=sa.String(length=5000))\n-\n-\n-def downgrade():\n- # Can't be undone\n- pass\n", "issue": "[AIRFLOW-3014] Fix multiple alembic heads\nMake sure you have checked _all_ steps below.\r\n\r\n### Jira\r\n\r\n- [ ] My PR addresses the following [Airflow Jira](https://issues.apache.org/jira/browse/AIRFLOW/) issues and references them in the PR title. For example, \"\\[AIRFLOW-XXX\\] My Airflow PR\"\r\n - https://issues.apache.org/jira/browse/AIRFLOW-6224\r\n - In case you are fixing a typo in the documentation you can prepend your commit with \\[AIRFLOW-XXX\\], code changes always need a Jira issue.\r\n - In case you are proposing a fundamental code change, you need to create an Airflow Improvement Proposal ([AIP](https://cwiki.apache.org/confluence/display/AIRFLOW/Airflow+Improvements+Proposals)).\r\n - In case you are adding a dependency, check if the license complies with the [ASF 3rd Party License Policy](https://www.apache.org/legal/resolved.html#category-x).\r\n\r\n### Description\r\n\r\n- [ ] Here are some details about my PR, including screenshots of any UI changes:\r\n\r\n### Tests\r\n\r\n- [ ] My PR adds the following unit tests __OR__ does not need testing for this extremely good reason:\r\n\r\n### Commits\r\n\r\n- [ ] My commits all reference Jira issues in their subject lines, and I have squashed multiple commits if they address the same issue. In addition, my commits follow the guidelines from \"[How to write a good git commit message](http://chris.beams.io/posts/git-commit/)\":\r\n 1. Subject is separated from body by a blank line\r\n 1. Subject is limited to 50 characters (not including Jira issue reference)\r\n 1. Subject does not end with a period\r\n 1. Subject uses the imperative mood (\"add\", not \"adding\")\r\n 1. Body wraps at 72 characters\r\n 1. Body explains \"what\" and \"why\", not \"how\"\r\n\r\n### Documentation\r\n\r\n- [ ] In case of new functionality, my PR adds documentation that describes how to use it.\r\n - All the public functions and the classes in the PR contain docstrings that explain what it does\r\n - If you implement backwards incompatible changes, please leave a note in the [Updating.md](https://github.com/apache/airflow/blob/master/UPDATING.md) so we can assign it to a appropriate release\r\n\n", "before_files": [{"content": "#\n# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n\"\"\"Increase length of password column in connection table\n\nRevision ID: c1840b4bcf1a\nRevises: 004c1210f153\nCreate Date: 2019-10-02 16:56:54.865550\n\n\"\"\"\n\nimport sqlalchemy as sa\nfrom alembic import op\n\n# revision identifiers, used by Alembic.\nrevision = 'c1840b4bcf1a'\ndown_revision = '004c1210f153'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n conn = op.get_bind()\n if conn.dialect.name == 'sqlite':\n # SQLite does not allow column modifications so we need to skip this migration\n return\n\n op.alter_column(table_name='connection',\n column_name='password',\n type_=sa.String(length=5000))\n\n\ndef downgrade():\n # Can't be undone\n pass\n", "path": "airflow/migrations/versions/c1840b4bcf1a_increase_length_of_password_column_in_.py"}], "after_files": [{"content": null, "path": "airflow/migrations/versions/c1840b4bcf1a_increase_length_of_password_column_in_.py"}]}
1,293
553
gh_patches_debug_67223
rasdani/github-patches
git_diff
svthalia__concrexit-1867
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Fix payable registry cache using old data ### Describe the bug Payments are a mess. For example, if you pay for an event registration, delete the payment (through the admin or api), creating a new TPay payment through the api fails with 409 Conflict, there is still a payment in the registration model, but not in the payments api. Furthermore, paying with a different payment type works, but those payments can then not be removed. Also see #1806. I suspect there are many more related problems. ### How to reproduce Play around with payable things, deleting and recreating them, or deleting and recreating payments. ### Expected behaviour When a payable is not paid and should be payable with TPay, paying does not fail. Deleting a payment makes the payable not-paid as it was before creating the payment. Deleting or changing a payable is either impossible, or also deletes a payment that belongs to it. ### Additional context I think it would be a good idea to combine this with #1000. Some test-driven development would make sense for payments, and I think the expected behaviour should be well-testable. Of course the problems may not be entirely within the payments app, but also in the payables defined by other apps. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `website/payments/payables.py` Content: ``` 1 from functools import lru_cache 2 3 from django.db.models import Model 4 5 _registry = {} 6 7 8 class NotRegistered(Exception): 9 pass 10 11 12 class Payable: 13 def __init__(self, model: Model): 14 self.model = model 15 16 @property 17 def pk(self): 18 return self.model.pk 19 20 @property 21 def payment(self): 22 return self.model.payment 23 24 @payment.setter 25 def payment(self, payment): 26 self.model.payment = payment 27 28 @property 29 def payment_amount(self): 30 raise NotImplementedError 31 32 @property 33 def payment_topic(self): 34 raise NotImplementedError 35 36 @property 37 def payment_notes(self): 38 raise NotImplementedError 39 40 @property 41 def payment_payer(self): 42 raise NotImplementedError 43 44 @property 45 def tpay_allowed(self): 46 return True 47 48 def can_manage_payment(self, member): 49 raise NotImplementedError 50 51 52 class Payables: 53 _registry = {} 54 55 @lru_cache(maxsize=None) 56 def _get_key(self, model): 57 return f"{model._meta.app_label}_{model._meta.model_name}" 58 59 @lru_cache(maxsize=None) 60 def get_payable(self, model: Model) -> Payable: 61 if self._get_key(model) not in self._registry: 62 raise NotRegistered(f"No Payable registered for {self._get_key(model)}") 63 return self._registry[self._get_key(model)](model) 64 65 def register(self, model: Model, payable_class: Payable): 66 self._registry[self._get_key(model)] = payable_class 67 68 69 payables = Payables() 70 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/website/payments/payables.py b/website/payments/payables.py --- a/website/payments/payables.py +++ b/website/payments/payables.py @@ -56,7 +56,6 @@ def _get_key(self, model): return f"{model._meta.app_label}_{model._meta.model_name}" - @lru_cache(maxsize=None) def get_payable(self, model: Model) -> Payable: if self._get_key(model) not in self._registry: raise NotRegistered(f"No Payable registered for {self._get_key(model)}")
{"golden_diff": "diff --git a/website/payments/payables.py b/website/payments/payables.py\n--- a/website/payments/payables.py\n+++ b/website/payments/payables.py\n@@ -56,7 +56,6 @@\n def _get_key(self, model):\n return f\"{model._meta.app_label}_{model._meta.model_name}\"\n \n- @lru_cache(maxsize=None)\n def get_payable(self, model: Model) -> Payable:\n if self._get_key(model) not in self._registry:\n raise NotRegistered(f\"No Payable registered for {self._get_key(model)}\")\n", "issue": "Fix payable registry cache using old data\n### Describe the bug\r\nPayments are a mess. For example, if you pay for an event registration, delete the payment (through the admin or api), creating a new TPay payment through the api fails with 409 Conflict, there is still a payment in the registration model, but not in the payments api. Furthermore, paying with a different payment type works, but those payments can then not be removed. Also see #1806. I suspect there are many more related problems.\r\n\r\n### How to reproduce\r\nPlay around with payable things, deleting and recreating them, or deleting and recreating payments.\r\n\r\n### Expected behaviour\r\nWhen a payable is not paid and should be payable with TPay, paying does not fail. Deleting a payment makes the payable not-paid as it was before creating the payment. Deleting or changing a payable is either impossible, or also deletes a payment that belongs to it.\r\n\r\n### Additional context\r\nI think it would be a good idea to combine this with #1000. Some test-driven development would make sense for payments, and I think the expected behaviour should be well-testable. Of course the problems may not be entirely within the payments app, but also in the payables defined by other apps.\r\n\n", "before_files": [{"content": "from functools import lru_cache\n\nfrom django.db.models import Model\n\n_registry = {}\n\n\nclass NotRegistered(Exception):\n pass\n\n\nclass Payable:\n def __init__(self, model: Model):\n self.model = model\n\n @property\n def pk(self):\n return self.model.pk\n\n @property\n def payment(self):\n return self.model.payment\n\n @payment.setter\n def payment(self, payment):\n self.model.payment = payment\n\n @property\n def payment_amount(self):\n raise NotImplementedError\n\n @property\n def payment_topic(self):\n raise NotImplementedError\n\n @property\n def payment_notes(self):\n raise NotImplementedError\n\n @property\n def payment_payer(self):\n raise NotImplementedError\n\n @property\n def tpay_allowed(self):\n return True\n\n def can_manage_payment(self, member):\n raise NotImplementedError\n\n\nclass Payables:\n _registry = {}\n\n @lru_cache(maxsize=None)\n def _get_key(self, model):\n return f\"{model._meta.app_label}_{model._meta.model_name}\"\n\n @lru_cache(maxsize=None)\n def get_payable(self, model: Model) -> Payable:\n if self._get_key(model) not in self._registry:\n raise NotRegistered(f\"No Payable registered for {self._get_key(model)}\")\n return self._registry[self._get_key(model)](model)\n\n def register(self, model: Model, payable_class: Payable):\n self._registry[self._get_key(model)] = payable_class\n\n\npayables = Payables()\n", "path": "website/payments/payables.py"}], "after_files": [{"content": "from functools import lru_cache\n\nfrom django.db.models import Model\n\n_registry = {}\n\n\nclass NotRegistered(Exception):\n pass\n\n\nclass Payable:\n def __init__(self, model: Model):\n self.model = model\n\n @property\n def pk(self):\n return self.model.pk\n\n @property\n def payment(self):\n return self.model.payment\n\n @payment.setter\n def payment(self, payment):\n self.model.payment = payment\n\n @property\n def payment_amount(self):\n raise NotImplementedError\n\n @property\n def payment_topic(self):\n raise NotImplementedError\n\n @property\n def payment_notes(self):\n raise NotImplementedError\n\n @property\n def payment_payer(self):\n raise NotImplementedError\n\n @property\n def tpay_allowed(self):\n return True\n\n def can_manage_payment(self, member):\n raise NotImplementedError\n\n\nclass Payables:\n _registry = {}\n\n @lru_cache(maxsize=None)\n def _get_key(self, model):\n return f\"{model._meta.app_label}_{model._meta.model_name}\"\n\n def get_payable(self, model: Model) -> Payable:\n if self._get_key(model) not in self._registry:\n raise NotRegistered(f\"No Payable registered for {self._get_key(model)}\")\n return self._registry[self._get_key(model)](model)\n\n def register(self, model: Model, payable_class: Payable):\n self._registry[self._get_key(model)] = payable_class\n\n\npayables = Payables()\n", "path": "website/payments/payables.py"}]}
1,003
135
gh_patches_debug_37053
rasdani/github-patches
git_diff
uccser__cs-unplugged-156
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Fix __str__ function in ProgrammingExerciseLanguageImplementation The `__str__` function of `ProgrammingExerciseLanguageImplementation` returns an unknown property which creates an error. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `csunplugged/topics/management/commands/_ProgrammingExerciseLoader.py` Content: ``` 1 import os.path 2 from utils.BaseLoader import BaseLoader 3 from topics.models import ( 4 LearningOutcome, 5 ProgrammingExerciseDifficulty, 6 ProgrammingExerciseLanguage, 7 ProgrammingExerciseLanguageImplementation, 8 ) 9 10 11 class ProgrammingExerciseLoader(BaseLoader): 12 """Loader for a programming exercise""" 13 14 def __init__(self, load_log, exercise_slug, exercise_structure, topic, BASE_PATH): 15 """Initiates the loader for a programming exercise 16 17 Args: 18 exercise_slug (string): slug for exercise 19 exercise_structure (dict): attributes for exercise 20 topic: Topic model object 21 """ 22 super().__init__(BASE_PATH, load_log) 23 self.exercise_slug = exercise_slug 24 self.exercise_structure = exercise_structure 25 self.topic = topic 26 27 def load(self): 28 """load the content for a programming exercise""" 29 content = self.convert_md_file(os.path.join(self.BASE_PATH, self.exercise_structure['md-file'])) 30 31 programming_exercise = self.topic.topic_programming_exercises.create( 32 slug=self.exercise_slug, 33 name=content.title, 34 exercise_set_number=self.exercise_structure['exercise-set-number'], 35 exercise_number=self.exercise_structure['exercise-number'], 36 content=content.html_string, 37 difficulty=ProgrammingExerciseDifficulty.objects.get( 38 level=self.exercise_structure['difficulty-level'] 39 ) 40 ) 41 programming_exercise.save() 42 43 language_solutions = self.exercise_structure['programming-languages'] 44 for language in language_solutions: 45 # This gets the language for the solution, if not found it should throw an error! 46 language_object = ProgrammingExerciseLanguage.objects.get( 47 slug=language 48 ) 49 50 expected_result_path = os.path.join(self.BASE_PATH, language_solutions[language]['expected-result']) 51 expected_result_content = self.convert_md_file(expected_result_path).html_string 52 53 hint_path = os.path.join(self.BASE_PATH, language_solutions[language]['hints']) 54 hint_content = self.convert_md_file(hint_path).html_string 55 56 solution_path = os.path.join(self.BASE_PATH, language_solutions[language]['solution']) 57 solution_content = self.convert_md_file(solution_path).html_string 58 59 implementation = ProgrammingExerciseLanguageImplementation.objects.create( 60 expected_result=expected_result_content, 61 hints=hint_content, 62 solution=solution_content, 63 language=language_object, 64 exercise=programming_exercise, 65 topic=self.topic 66 ) 67 implementation.save() 68 69 for learning_outcome_slug in self.exercise_structure['learning-outcomes']: 70 learning_outcome = LearningOutcome.objects.get( 71 slug=learning_outcome_slug 72 ) 73 programming_exercise.learning_outcomes.add(learning_outcome) 74 75 LOG_TEMPLATE = 'Added Programming Exercise: {}' 76 self.log(LOG_TEMPLATE.format(programming_exercise.name), 1) 77 ``` Path: `csunplugged/topics/models.py` Content: ``` 1 from collections import OrderedDict 2 3 from django.db import models 4 from resources.models import Resource 5 6 7 class LearningOutcome(models.Model): 8 # Auto-incrementing 'id' field is automatically set by Django 9 slug = models.SlugField(unique=True) 10 text = models.CharField(max_length=200, unique=True) 11 12 def __str__(self): 13 return self.text 14 15 16 class CurriculumArea(models.Model): 17 # Auto-incrementing 'id' field is automatically set by Django 18 slug = models.SlugField(unique=True) 19 name = models.CharField(max_length=100, unique=True) 20 21 def __str__(self): 22 return self.name 23 24 25 class ClassroomResource(models.Model): 26 # Auto-incrementing 'id' field is automatically set by Django 27 text = models.CharField(max_length=300, unique=True) 28 29 def __str__(self): 30 return self.text 31 32 33 class Topic(models.Model): 34 # Auto-incrementing 'id' field is automatically set by Django 35 slug = models.SlugField(unique=True) 36 name = models.CharField(max_length=100) 37 content = models.TextField() 38 other_resources = models.TextField() 39 icon = models.CharField(max_length=100, null=True) 40 41 def __str__(self): 42 return self.name 43 44 45 class UnitPlan(models.Model): 46 # Auto-incrementing 'id' field is automatically set by Django 47 topic = models.ForeignKey( 48 Topic, 49 on_delete=models.CASCADE, 50 related_name='topic_unit_plans' 51 ) 52 slug = models.SlugField() 53 name = models.CharField(max_length=100) 54 content = models.TextField() 55 56 def lessons_by_age_group(self): 57 """Returns groups of lessons grouped by the lesson minimum age 58 and maximum ages, and then order by number. 59 60 Returns: 61 A ordered dictionary of grouped lessons. 62 The key is a tuple of the minimum age and maximum ages for 63 the lessons. 64 The value for a key is a sorted list of lessons. 65 The dictionary is ordered by minimum age, then maximum age. 66 """ 67 grouped_lessons = OrderedDict() 68 lessons = self.unit_plan_lessons.order_by('min_age', 'max_age', 'number') 69 for lesson in lessons: 70 if (lesson.min_age, lesson.max_age) in grouped_lessons: 71 grouped_lessons[(lesson.min_age, lesson.max_age)].append(lesson) 72 else: 73 grouped_lessons[(lesson.min_age, lesson.max_age)] = [lesson] 74 return grouped_lessons 75 76 def __str__(self): 77 return self.name 78 79 80 class ProgrammingExerciseDifficulty(models.Model): 81 # Auto-incrementing 'id' field is automatically set by Django 82 level = models.PositiveSmallIntegerField(unique=True) 83 name = models.CharField(max_length=100, unique=True) 84 85 def __str__(self): 86 return self.name 87 88 89 class ProgrammingExercise(models.Model): 90 # Auto-incrementing 'id' field is automatically set by Django 91 topic = models.ForeignKey( 92 Topic, 93 on_delete=models.CASCADE, 94 related_name='topic_programming_exercises' 95 ) 96 slug = models.SlugField() 97 name = models.CharField(max_length=200) 98 exercise_set_number = models.PositiveSmallIntegerField() 99 exercise_number = models.PositiveSmallIntegerField() 100 content = models.TextField() 101 learning_outcomes = models.ManyToManyField( 102 LearningOutcome, 103 related_name='programming_exercise_learning_outcomes' 104 ) 105 difficulty = models.ForeignKey( 106 ProgrammingExerciseDifficulty, 107 on_delete=models.CASCADE, 108 related_name='difficulty_programming_exercises' 109 ) 110 111 def __str__(self): 112 return self.name 113 114 115 class ProgrammingExerciseLanguage(models.Model): 116 # Auto-incrementing 'id' field is automatically set by Django 117 slug = models.SlugField() 118 name = models.CharField(max_length=200) 119 icon = models.CharField(max_length=100, null=True) 120 121 def __str__(self): 122 return self.name 123 124 125 class ProgrammingExerciseLanguageImplementation(models.Model): 126 # Auto-incrementing 'id' field is automatically set by Django 127 topic = models.ForeignKey( 128 Topic, 129 on_delete=models.CASCADE, 130 related_name='implementations' 131 ) 132 language = models.ForeignKey( 133 ProgrammingExerciseLanguage, 134 on_delete=models.CASCADE, 135 related_name='implementations' 136 ) 137 exercise = models.ForeignKey( 138 ProgrammingExercise, 139 on_delete=models.CASCADE, 140 related_name='implementations' 141 ) 142 expected_result = models.TextField() 143 hints = models.TextField() 144 solution = models.TextField() 145 146 def __str__(self): 147 return self.name 148 149 150 class Lesson(models.Model): 151 # Auto-incrementing 'id' field is automatically set by Django 152 topic = models.ForeignKey( 153 Topic, 154 on_delete=models.CASCADE, 155 related_name='topic_lessons' 156 ) 157 unit_plan = models.ForeignKey( 158 UnitPlan, 159 on_delete=models.CASCADE, 160 related_name='unit_plan_lessons' 161 ) 162 slug = models.SlugField() 163 name = models.CharField(max_length=100) 164 number = models.IntegerField() 165 content = models.TextField() 166 min_age = models.PositiveSmallIntegerField() 167 max_age = models.PositiveSmallIntegerField() 168 programming_exercises = models.ManyToManyField( 169 ProgrammingExercise, 170 related_name='lessons' 171 ) 172 learning_outcomes = models.ManyToManyField( 173 LearningOutcome, 174 related_name='lesson_learning_outcomes' 175 ) 176 curriculum_areas = models.ManyToManyField( 177 CurriculumArea, 178 related_name='lesson_curriculum_areas' 179 ) 180 classroom_resources = models.ManyToManyField( 181 ClassroomResource, 182 related_name='lesson_classroom_resources' 183 ) 184 generated_resources = models.ManyToManyField( 185 Resource, 186 through='ConnectedGeneratedResource', 187 related_name='lesson_generated_resources' 188 ) 189 190 def has_programming_exercises(self): 191 """Returns a boolean to state whether the lesson has any 192 programming exercises. 193 194 Returns: 195 True if the lesson has connected programming exercises. 196 Otherwise False. 197 """ 198 return bool(self.programming_exercises.all()) 199 200 def __str__(self): 201 return self.name 202 203 204 class CurriculumIntegration(models.Model): 205 # Auto-incrementing 'id' field is automatically set by Django 206 topic = models.ForeignKey( 207 Topic, 208 on_delete=models.CASCADE, 209 related_name='curriculum_integrations' 210 ) 211 slug = models.SlugField() 212 number = models.PositiveSmallIntegerField() 213 name = models.CharField(max_length=200) 214 content = models.TextField() 215 curriculum_areas = models.ManyToManyField( 216 CurriculumArea, 217 related_name='curriculum_integrations' 218 ) 219 prerequisite_lessons = models.ManyToManyField( 220 Lesson, 221 related_name='curriculum_integrations' 222 ) 223 224 def has_prerequisite_lessons(self): 225 """Returns True if the curriculum integration has at 226 least one prerequisite lesson, otherwise False. 227 228 Returns: 229 True if the curriculum integration has at 230 least one prerequisite lesson, otherwise False. 231 """ 232 return bool(self.prerequisite_lessons.all()) 233 234 def __str__(self): 235 return self.name 236 237 238 class ConnectedGeneratedResource(models.Model): 239 # Auto-incrementing 'id' field is automatically set by Django 240 resource = models.ForeignKey(Resource, on_delete=models.CASCADE) 241 lesson = models.ForeignKey(Lesson, on_delete=models.CASCADE) 242 description = models.CharField(max_length=300) 243 ``` Path: `csunplugged/topics/management/commands/_CurriculumIntegrationsLoader.py` Content: ``` 1 import os.path 2 from utils.BaseLoader import BaseLoader 3 from topics.models import CurriculumArea, Lesson 4 5 6 class CurriculumIntegrationsLoader(BaseLoader): 7 """Loader for curriculum integrations""" 8 9 def __init__(self, load_log, structure_file, topic, BASE_PATH): 10 """Initiates the loader for curriculum integrations 11 12 Args: 13 structure_file: file path (string) 14 topic: Topic model object 15 """ 16 super().__init__(BASE_PATH, load_log) 17 self.structure_file = os.path.join(self.BASE_PATH, structure_file) 18 self.BASE_PATH = os.path.join(self.BASE_PATH, os.path.split(structure_file)[0]) 19 self.topic = topic 20 21 def load(self): 22 """Load the content for curriculum integrations""" 23 if self.structure_file: 24 structure = self.load_yaml_file(self.structure_file) 25 26 for integration_slug, integration_data in structure.items(): 27 md_file = integration_data['md-file'] 28 integration_content = self.convert_md_file(os.path.join(self.BASE_PATH, md_file)) 29 30 integration = self.topic.curriculum_integrations.create( 31 slug=integration_slug, 32 number=integration_data['number'], 33 name=integration_content.title, 34 content=integration_content.html_string, 35 ) 36 integration.save() 37 38 # Add curriculum areas 39 curriculum_area_slugs = integration_data['curriculum-areas'] 40 for curriculum_area_slug in curriculum_area_slugs: 41 curriculum_area = CurriculumArea.objects.get( 42 slug=curriculum_area_slug 43 ) 44 integration.curriculum_areas.add(curriculum_area) 45 46 # Add prerequisite lessons 47 if 'prerequisite-lessons' in integration_data: 48 prerequisite_lessons_slugs = integration_data['prerequisite-lessons'] 49 for prerequisite_lessons_slug in prerequisite_lessons_slugs: 50 (unit_plan_slug, lesson_slug) = prerequisite_lessons_slug.split('/') 51 print(unit_plan_slug, lesson_slug) 52 lesson = Lesson.objects.get( 53 slug=lesson_slug, 54 unit_plan__slug=unit_plan_slug, 55 topic__slug=self.topic.slug 56 ) 57 integration.prerequisite_lessons.add(lesson) 58 59 self.log('Added Curriculum Integration: {}'.format(integration.name), 1) 60 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/csunplugged/topics/management/commands/_CurriculumIntegrationsLoader.py b/csunplugged/topics/management/commands/_CurriculumIntegrationsLoader.py --- a/csunplugged/topics/management/commands/_CurriculumIntegrationsLoader.py +++ b/csunplugged/topics/management/commands/_CurriculumIntegrationsLoader.py @@ -48,7 +48,6 @@ prerequisite_lessons_slugs = integration_data['prerequisite-lessons'] for prerequisite_lessons_slug in prerequisite_lessons_slugs: (unit_plan_slug, lesson_slug) = prerequisite_lessons_slug.split('/') - print(unit_plan_slug, lesson_slug) lesson = Lesson.objects.get( slug=lesson_slug, unit_plan__slug=unit_plan_slug, diff --git a/csunplugged/topics/management/commands/_ProgrammingExerciseLoader.py b/csunplugged/topics/management/commands/_ProgrammingExerciseLoader.py --- a/csunplugged/topics/management/commands/_ProgrammingExerciseLoader.py +++ b/csunplugged/topics/management/commands/_ProgrammingExerciseLoader.py @@ -40,6 +40,9 @@ ) programming_exercise.save() + LOG_TEMPLATE = 'Added Programming Exercise: {}' + self.log(LOG_TEMPLATE.format(programming_exercise.name), 1) + language_solutions = self.exercise_structure['programming-languages'] for language in language_solutions: # This gets the language for the solution, if not found it should throw an error! @@ -66,11 +69,11 @@ ) implementation.save() + LOG_TEMPLATE = 'Added Language Implementation: {}' + self.log(LOG_TEMPLATE.format(implementation.language), 2) + for learning_outcome_slug in self.exercise_structure['learning-outcomes']: learning_outcome = LearningOutcome.objects.get( slug=learning_outcome_slug ) programming_exercise.learning_outcomes.add(learning_outcome) - - LOG_TEMPLATE = 'Added Programming Exercise: {}' - self.log(LOG_TEMPLATE.format(programming_exercise.name), 1) diff --git a/csunplugged/topics/models.py b/csunplugged/topics/models.py --- a/csunplugged/topics/models.py +++ b/csunplugged/topics/models.py @@ -144,7 +144,12 @@ solution = models.TextField() def __str__(self): - return self.name + return '{} for exercise {}.{}, {}'.format( + self.language.name, + self.exercise.exercise_set_number, + self.exercise.exercise_number, + self.exercise.name + ) class Lesson(models.Model):
{"golden_diff": "diff --git a/csunplugged/topics/management/commands/_CurriculumIntegrationsLoader.py b/csunplugged/topics/management/commands/_CurriculumIntegrationsLoader.py\n--- a/csunplugged/topics/management/commands/_CurriculumIntegrationsLoader.py\n+++ b/csunplugged/topics/management/commands/_CurriculumIntegrationsLoader.py\n@@ -48,7 +48,6 @@\n prerequisite_lessons_slugs = integration_data['prerequisite-lessons']\n for prerequisite_lessons_slug in prerequisite_lessons_slugs:\n (unit_plan_slug, lesson_slug) = prerequisite_lessons_slug.split('/')\n- print(unit_plan_slug, lesson_slug)\n lesson = Lesson.objects.get(\n slug=lesson_slug,\n unit_plan__slug=unit_plan_slug,\ndiff --git a/csunplugged/topics/management/commands/_ProgrammingExerciseLoader.py b/csunplugged/topics/management/commands/_ProgrammingExerciseLoader.py\n--- a/csunplugged/topics/management/commands/_ProgrammingExerciseLoader.py\n+++ b/csunplugged/topics/management/commands/_ProgrammingExerciseLoader.py\n@@ -40,6 +40,9 @@\n )\n programming_exercise.save()\n \n+ LOG_TEMPLATE = 'Added Programming Exercise: {}'\n+ self.log(LOG_TEMPLATE.format(programming_exercise.name), 1)\n+\n language_solutions = self.exercise_structure['programming-languages']\n for language in language_solutions:\n # This gets the language for the solution, if not found it should throw an error!\n@@ -66,11 +69,11 @@\n )\n implementation.save()\n \n+ LOG_TEMPLATE = 'Added Language Implementation: {}'\n+ self.log(LOG_TEMPLATE.format(implementation.language), 2)\n+\n for learning_outcome_slug in self.exercise_structure['learning-outcomes']:\n learning_outcome = LearningOutcome.objects.get(\n slug=learning_outcome_slug\n )\n programming_exercise.learning_outcomes.add(learning_outcome)\n-\n- LOG_TEMPLATE = 'Added Programming Exercise: {}'\n- self.log(LOG_TEMPLATE.format(programming_exercise.name), 1)\ndiff --git a/csunplugged/topics/models.py b/csunplugged/topics/models.py\n--- a/csunplugged/topics/models.py\n+++ b/csunplugged/topics/models.py\n@@ -144,7 +144,12 @@\n solution = models.TextField()\n \n def __str__(self):\n- return self.name\n+ return '{} for exercise {}.{}, {}'.format(\n+ self.language.name,\n+ self.exercise.exercise_set_number,\n+ self.exercise.exercise_number,\n+ self.exercise.name\n+ )\n \n \n class Lesson(models.Model):\n", "issue": "Fix __str__ function in ProgrammingExerciseLanguageImplementation\nThe `__str__` function of `ProgrammingExerciseLanguageImplementation` returns an unknown property which creates an error.\n", "before_files": [{"content": "import os.path\nfrom utils.BaseLoader import BaseLoader\nfrom topics.models import (\n LearningOutcome,\n ProgrammingExerciseDifficulty,\n ProgrammingExerciseLanguage,\n ProgrammingExerciseLanguageImplementation,\n)\n\n\nclass ProgrammingExerciseLoader(BaseLoader):\n \"\"\"Loader for a programming exercise\"\"\"\n\n def __init__(self, load_log, exercise_slug, exercise_structure, topic, BASE_PATH):\n \"\"\"Initiates the loader for a programming exercise\n\n Args:\n exercise_slug (string): slug for exercise\n exercise_structure (dict): attributes for exercise\n topic: Topic model object\n \"\"\"\n super().__init__(BASE_PATH, load_log)\n self.exercise_slug = exercise_slug\n self.exercise_structure = exercise_structure\n self.topic = topic\n\n def load(self):\n \"\"\"load the content for a programming exercise\"\"\"\n content = self.convert_md_file(os.path.join(self.BASE_PATH, self.exercise_structure['md-file']))\n\n programming_exercise = self.topic.topic_programming_exercises.create(\n slug=self.exercise_slug,\n name=content.title,\n exercise_set_number=self.exercise_structure['exercise-set-number'],\n exercise_number=self.exercise_structure['exercise-number'],\n content=content.html_string,\n difficulty=ProgrammingExerciseDifficulty.objects.get(\n level=self.exercise_structure['difficulty-level']\n )\n )\n programming_exercise.save()\n\n language_solutions = self.exercise_structure['programming-languages']\n for language in language_solutions:\n # This gets the language for the solution, if not found it should throw an error!\n language_object = ProgrammingExerciseLanguage.objects.get(\n slug=language\n )\n\n expected_result_path = os.path.join(self.BASE_PATH, language_solutions[language]['expected-result'])\n expected_result_content = self.convert_md_file(expected_result_path).html_string\n\n hint_path = os.path.join(self.BASE_PATH, language_solutions[language]['hints'])\n hint_content = self.convert_md_file(hint_path).html_string\n\n solution_path = os.path.join(self.BASE_PATH, language_solutions[language]['solution'])\n solution_content = self.convert_md_file(solution_path).html_string\n\n implementation = ProgrammingExerciseLanguageImplementation.objects.create(\n expected_result=expected_result_content,\n hints=hint_content,\n solution=solution_content,\n language=language_object,\n exercise=programming_exercise,\n topic=self.topic\n )\n implementation.save()\n\n for learning_outcome_slug in self.exercise_structure['learning-outcomes']:\n learning_outcome = LearningOutcome.objects.get(\n slug=learning_outcome_slug\n )\n programming_exercise.learning_outcomes.add(learning_outcome)\n\n LOG_TEMPLATE = 'Added Programming Exercise: {}'\n self.log(LOG_TEMPLATE.format(programming_exercise.name), 1)\n", "path": "csunplugged/topics/management/commands/_ProgrammingExerciseLoader.py"}, {"content": "from collections import OrderedDict\n\nfrom django.db import models\nfrom resources.models import Resource\n\n\nclass LearningOutcome(models.Model):\n # Auto-incrementing 'id' field is automatically set by Django\n slug = models.SlugField(unique=True)\n text = models.CharField(max_length=200, unique=True)\n\n def __str__(self):\n return self.text\n\n\nclass CurriculumArea(models.Model):\n # Auto-incrementing 'id' field is automatically set by Django\n slug = models.SlugField(unique=True)\n name = models.CharField(max_length=100, unique=True)\n\n def __str__(self):\n return self.name\n\n\nclass ClassroomResource(models.Model):\n # Auto-incrementing 'id' field is automatically set by Django\n text = models.CharField(max_length=300, unique=True)\n\n def __str__(self):\n return self.text\n\n\nclass Topic(models.Model):\n # Auto-incrementing 'id' field is automatically set by Django\n slug = models.SlugField(unique=True)\n name = models.CharField(max_length=100)\n content = models.TextField()\n other_resources = models.TextField()\n icon = models.CharField(max_length=100, null=True)\n\n def __str__(self):\n return self.name\n\n\nclass UnitPlan(models.Model):\n # Auto-incrementing 'id' field is automatically set by Django\n topic = models.ForeignKey(\n Topic,\n on_delete=models.CASCADE,\n related_name='topic_unit_plans'\n )\n slug = models.SlugField()\n name = models.CharField(max_length=100)\n content = models.TextField()\n\n def lessons_by_age_group(self):\n \"\"\"Returns groups of lessons grouped by the lesson minimum age\n and maximum ages, and then order by number.\n\n Returns:\n A ordered dictionary of grouped lessons.\n The key is a tuple of the minimum age and maximum ages for\n the lessons.\n The value for a key is a sorted list of lessons.\n The dictionary is ordered by minimum age, then maximum age.\n \"\"\"\n grouped_lessons = OrderedDict()\n lessons = self.unit_plan_lessons.order_by('min_age', 'max_age', 'number')\n for lesson in lessons:\n if (lesson.min_age, lesson.max_age) in grouped_lessons:\n grouped_lessons[(lesson.min_age, lesson.max_age)].append(lesson)\n else:\n grouped_lessons[(lesson.min_age, lesson.max_age)] = [lesson]\n return grouped_lessons\n\n def __str__(self):\n return self.name\n\n\nclass ProgrammingExerciseDifficulty(models.Model):\n # Auto-incrementing 'id' field is automatically set by Django\n level = models.PositiveSmallIntegerField(unique=True)\n name = models.CharField(max_length=100, unique=True)\n\n def __str__(self):\n return self.name\n\n\nclass ProgrammingExercise(models.Model):\n # Auto-incrementing 'id' field is automatically set by Django\n topic = models.ForeignKey(\n Topic,\n on_delete=models.CASCADE,\n related_name='topic_programming_exercises'\n )\n slug = models.SlugField()\n name = models.CharField(max_length=200)\n exercise_set_number = models.PositiveSmallIntegerField()\n exercise_number = models.PositiveSmallIntegerField()\n content = models.TextField()\n learning_outcomes = models.ManyToManyField(\n LearningOutcome,\n related_name='programming_exercise_learning_outcomes'\n )\n difficulty = models.ForeignKey(\n ProgrammingExerciseDifficulty,\n on_delete=models.CASCADE,\n related_name='difficulty_programming_exercises'\n )\n\n def __str__(self):\n return self.name\n\n\nclass ProgrammingExerciseLanguage(models.Model):\n # Auto-incrementing 'id' field is automatically set by Django\n slug = models.SlugField()\n name = models.CharField(max_length=200)\n icon = models.CharField(max_length=100, null=True)\n\n def __str__(self):\n return self.name\n\n\nclass ProgrammingExerciseLanguageImplementation(models.Model):\n # Auto-incrementing 'id' field is automatically set by Django\n topic = models.ForeignKey(\n Topic,\n on_delete=models.CASCADE,\n related_name='implementations'\n )\n language = models.ForeignKey(\n ProgrammingExerciseLanguage,\n on_delete=models.CASCADE,\n related_name='implementations'\n )\n exercise = models.ForeignKey(\n ProgrammingExercise,\n on_delete=models.CASCADE,\n related_name='implementations'\n )\n expected_result = models.TextField()\n hints = models.TextField()\n solution = models.TextField()\n\n def __str__(self):\n return self.name\n\n\nclass Lesson(models.Model):\n # Auto-incrementing 'id' field is automatically set by Django\n topic = models.ForeignKey(\n Topic,\n on_delete=models.CASCADE,\n related_name='topic_lessons'\n )\n unit_plan = models.ForeignKey(\n UnitPlan,\n on_delete=models.CASCADE,\n related_name='unit_plan_lessons'\n )\n slug = models.SlugField()\n name = models.CharField(max_length=100)\n number = models.IntegerField()\n content = models.TextField()\n min_age = models.PositiveSmallIntegerField()\n max_age = models.PositiveSmallIntegerField()\n programming_exercises = models.ManyToManyField(\n ProgrammingExercise,\n related_name='lessons'\n )\n learning_outcomes = models.ManyToManyField(\n LearningOutcome,\n related_name='lesson_learning_outcomes'\n )\n curriculum_areas = models.ManyToManyField(\n CurriculumArea,\n related_name='lesson_curriculum_areas'\n )\n classroom_resources = models.ManyToManyField(\n ClassroomResource,\n related_name='lesson_classroom_resources'\n )\n generated_resources = models.ManyToManyField(\n Resource,\n through='ConnectedGeneratedResource',\n related_name='lesson_generated_resources'\n )\n\n def has_programming_exercises(self):\n \"\"\"Returns a boolean to state whether the lesson has any\n programming exercises.\n\n Returns:\n True if the lesson has connected programming exercises.\n Otherwise False.\n \"\"\"\n return bool(self.programming_exercises.all())\n\n def __str__(self):\n return self.name\n\n\nclass CurriculumIntegration(models.Model):\n # Auto-incrementing 'id' field is automatically set by Django\n topic = models.ForeignKey(\n Topic,\n on_delete=models.CASCADE,\n related_name='curriculum_integrations'\n )\n slug = models.SlugField()\n number = models.PositiveSmallIntegerField()\n name = models.CharField(max_length=200)\n content = models.TextField()\n curriculum_areas = models.ManyToManyField(\n CurriculumArea,\n related_name='curriculum_integrations'\n )\n prerequisite_lessons = models.ManyToManyField(\n Lesson,\n related_name='curriculum_integrations'\n )\n\n def has_prerequisite_lessons(self):\n \"\"\"Returns True if the curriculum integration has at\n least one prerequisite lesson, otherwise False.\n\n Returns:\n True if the curriculum integration has at\n least one prerequisite lesson, otherwise False.\n \"\"\"\n return bool(self.prerequisite_lessons.all())\n\n def __str__(self):\n return self.name\n\n\nclass ConnectedGeneratedResource(models.Model):\n # Auto-incrementing 'id' field is automatically set by Django\n resource = models.ForeignKey(Resource, on_delete=models.CASCADE)\n lesson = models.ForeignKey(Lesson, on_delete=models.CASCADE)\n description = models.CharField(max_length=300)\n", "path": "csunplugged/topics/models.py"}, {"content": "import os.path\nfrom utils.BaseLoader import BaseLoader\nfrom topics.models import CurriculumArea, Lesson\n\n\nclass CurriculumIntegrationsLoader(BaseLoader):\n \"\"\"Loader for curriculum integrations\"\"\"\n\n def __init__(self, load_log, structure_file, topic, BASE_PATH):\n \"\"\"Initiates the loader for curriculum integrations\n\n Args:\n structure_file: file path (string)\n topic: Topic model object\n \"\"\"\n super().__init__(BASE_PATH, load_log)\n self.structure_file = os.path.join(self.BASE_PATH, structure_file)\n self.BASE_PATH = os.path.join(self.BASE_PATH, os.path.split(structure_file)[0])\n self.topic = topic\n\n def load(self):\n \"\"\"Load the content for curriculum integrations\"\"\"\n if self.structure_file:\n structure = self.load_yaml_file(self.structure_file)\n\n for integration_slug, integration_data in structure.items():\n md_file = integration_data['md-file']\n integration_content = self.convert_md_file(os.path.join(self.BASE_PATH, md_file))\n\n integration = self.topic.curriculum_integrations.create(\n slug=integration_slug,\n number=integration_data['number'],\n name=integration_content.title,\n content=integration_content.html_string,\n )\n integration.save()\n\n # Add curriculum areas\n curriculum_area_slugs = integration_data['curriculum-areas']\n for curriculum_area_slug in curriculum_area_slugs:\n curriculum_area = CurriculumArea.objects.get(\n slug=curriculum_area_slug\n )\n integration.curriculum_areas.add(curriculum_area)\n\n # Add prerequisite lessons\n if 'prerequisite-lessons' in integration_data:\n prerequisite_lessons_slugs = integration_data['prerequisite-lessons']\n for prerequisite_lessons_slug in prerequisite_lessons_slugs:\n (unit_plan_slug, lesson_slug) = prerequisite_lessons_slug.split('/')\n print(unit_plan_slug, lesson_slug)\n lesson = Lesson.objects.get(\n slug=lesson_slug,\n unit_plan__slug=unit_plan_slug,\n topic__slug=self.topic.slug\n )\n integration.prerequisite_lessons.add(lesson)\n\n self.log('Added Curriculum Integration: {}'.format(integration.name), 1)\n", "path": "csunplugged/topics/management/commands/_CurriculumIntegrationsLoader.py"}], "after_files": [{"content": "import os.path\nfrom utils.BaseLoader import BaseLoader\nfrom topics.models import (\n LearningOutcome,\n ProgrammingExerciseDifficulty,\n ProgrammingExerciseLanguage,\n ProgrammingExerciseLanguageImplementation,\n)\n\n\nclass ProgrammingExerciseLoader(BaseLoader):\n \"\"\"Loader for a programming exercise\"\"\"\n\n def __init__(self, load_log, exercise_slug, exercise_structure, topic, BASE_PATH):\n \"\"\"Initiates the loader for a programming exercise\n\n Args:\n exercise_slug (string): slug for exercise\n exercise_structure (dict): attributes for exercise\n topic: Topic model object\n \"\"\"\n super().__init__(BASE_PATH, load_log)\n self.exercise_slug = exercise_slug\n self.exercise_structure = exercise_structure\n self.topic = topic\n\n def load(self):\n \"\"\"load the content for a programming exercise\"\"\"\n content = self.convert_md_file(os.path.join(self.BASE_PATH, self.exercise_structure['md-file']))\n\n programming_exercise = self.topic.topic_programming_exercises.create(\n slug=self.exercise_slug,\n name=content.title,\n exercise_set_number=self.exercise_structure['exercise-set-number'],\n exercise_number=self.exercise_structure['exercise-number'],\n content=content.html_string,\n difficulty=ProgrammingExerciseDifficulty.objects.get(\n level=self.exercise_structure['difficulty-level']\n )\n )\n programming_exercise.save()\n\n LOG_TEMPLATE = 'Added Programming Exercise: {}'\n self.log(LOG_TEMPLATE.format(programming_exercise.name), 1)\n\n language_solutions = self.exercise_structure['programming-languages']\n for language in language_solutions:\n # This gets the language for the solution, if not found it should throw an error!\n language_object = ProgrammingExerciseLanguage.objects.get(\n slug=language\n )\n\n expected_result_path = os.path.join(self.BASE_PATH, language_solutions[language]['expected-result'])\n expected_result_content = self.convert_md_file(expected_result_path).html_string\n\n hint_path = os.path.join(self.BASE_PATH, language_solutions[language]['hints'])\n hint_content = self.convert_md_file(hint_path).html_string\n\n solution_path = os.path.join(self.BASE_PATH, language_solutions[language]['solution'])\n solution_content = self.convert_md_file(solution_path).html_string\n\n implementation = ProgrammingExerciseLanguageImplementation.objects.create(\n expected_result=expected_result_content,\n hints=hint_content,\n solution=solution_content,\n language=language_object,\n exercise=programming_exercise,\n topic=self.topic\n )\n implementation.save()\n\n LOG_TEMPLATE = 'Added Language Implementation: {}'\n self.log(LOG_TEMPLATE.format(implementation.language), 2)\n\n for learning_outcome_slug in self.exercise_structure['learning-outcomes']:\n learning_outcome = LearningOutcome.objects.get(\n slug=learning_outcome_slug\n )\n programming_exercise.learning_outcomes.add(learning_outcome)\n", "path": "csunplugged/topics/management/commands/_ProgrammingExerciseLoader.py"}, {"content": "from collections import OrderedDict\n\nfrom django.db import models\nfrom resources.models import Resource\n\n\nclass LearningOutcome(models.Model):\n # Auto-incrementing 'id' field is automatically set by Django\n slug = models.SlugField(unique=True)\n text = models.CharField(max_length=200, unique=True)\n\n def __str__(self):\n return self.text\n\n\nclass CurriculumArea(models.Model):\n # Auto-incrementing 'id' field is automatically set by Django\n slug = models.SlugField(unique=True)\n name = models.CharField(max_length=100, unique=True)\n\n def __str__(self):\n return self.name\n\n\nclass ClassroomResource(models.Model):\n # Auto-incrementing 'id' field is automatically set by Django\n text = models.CharField(max_length=300, unique=True)\n\n def __str__(self):\n return self.text\n\n\nclass Topic(models.Model):\n # Auto-incrementing 'id' field is automatically set by Django\n slug = models.SlugField(unique=True)\n name = models.CharField(max_length=100)\n content = models.TextField()\n other_resources = models.TextField()\n icon = models.CharField(max_length=100, null=True)\n\n def __str__(self):\n return self.name\n\n\nclass UnitPlan(models.Model):\n # Auto-incrementing 'id' field is automatically set by Django\n topic = models.ForeignKey(\n Topic,\n on_delete=models.CASCADE,\n related_name='topic_unit_plans'\n )\n slug = models.SlugField()\n name = models.CharField(max_length=100)\n content = models.TextField()\n\n def lessons_by_age_group(self):\n \"\"\"Returns groups of lessons grouped by the lesson minimum age\n and maximum ages, and then order by number.\n\n Returns:\n A ordered dictionary of grouped lessons.\n The key is a tuple of the minimum age and maximum ages for\n the lessons.\n The value for a key is a sorted list of lessons.\n The dictionary is ordered by minimum age, then maximum age.\n \"\"\"\n grouped_lessons = OrderedDict()\n lessons = self.unit_plan_lessons.order_by('min_age', 'max_age', 'number')\n for lesson in lessons:\n if (lesson.min_age, lesson.max_age) in grouped_lessons:\n grouped_lessons[(lesson.min_age, lesson.max_age)].append(lesson)\n else:\n grouped_lessons[(lesson.min_age, lesson.max_age)] = [lesson]\n return grouped_lessons\n\n def __str__(self):\n return self.name\n\n\nclass ProgrammingExerciseDifficulty(models.Model):\n # Auto-incrementing 'id' field is automatically set by Django\n level = models.PositiveSmallIntegerField(unique=True)\n name = models.CharField(max_length=100, unique=True)\n\n def __str__(self):\n return self.name\n\n\nclass ProgrammingExercise(models.Model):\n # Auto-incrementing 'id' field is automatically set by Django\n topic = models.ForeignKey(\n Topic,\n on_delete=models.CASCADE,\n related_name='topic_programming_exercises'\n )\n slug = models.SlugField()\n name = models.CharField(max_length=200)\n exercise_set_number = models.PositiveSmallIntegerField()\n exercise_number = models.PositiveSmallIntegerField()\n content = models.TextField()\n learning_outcomes = models.ManyToManyField(\n LearningOutcome,\n related_name='programming_exercise_learning_outcomes'\n )\n difficulty = models.ForeignKey(\n ProgrammingExerciseDifficulty,\n on_delete=models.CASCADE,\n related_name='difficulty_programming_exercises'\n )\n\n def __str__(self):\n return self.name\n\n\nclass ProgrammingExerciseLanguage(models.Model):\n # Auto-incrementing 'id' field is automatically set by Django\n slug = models.SlugField()\n name = models.CharField(max_length=200)\n icon = models.CharField(max_length=100, null=True)\n\n def __str__(self):\n return self.name\n\n\nclass ProgrammingExerciseLanguageImplementation(models.Model):\n # Auto-incrementing 'id' field is automatically set by Django\n topic = models.ForeignKey(\n Topic,\n on_delete=models.CASCADE,\n related_name='implementations'\n )\n language = models.ForeignKey(\n ProgrammingExerciseLanguage,\n on_delete=models.CASCADE,\n related_name='implementations'\n )\n exercise = models.ForeignKey(\n ProgrammingExercise,\n on_delete=models.CASCADE,\n related_name='implementations'\n )\n expected_result = models.TextField()\n hints = models.TextField()\n solution = models.TextField()\n\n def __str__(self):\n return '{} for exercise {}.{}, {}'.format(\n self.language.name,\n self.exercise.exercise_set_number,\n self.exercise.exercise_number,\n self.exercise.name\n )\n\n\nclass Lesson(models.Model):\n # Auto-incrementing 'id' field is automatically set by Django\n topic = models.ForeignKey(\n Topic,\n on_delete=models.CASCADE,\n related_name='topic_lessons'\n )\n unit_plan = models.ForeignKey(\n UnitPlan,\n on_delete=models.CASCADE,\n related_name='unit_plan_lessons'\n )\n slug = models.SlugField()\n name = models.CharField(max_length=100)\n number = models.IntegerField()\n content = models.TextField()\n min_age = models.PositiveSmallIntegerField()\n max_age = models.PositiveSmallIntegerField()\n programming_exercises = models.ManyToManyField(\n ProgrammingExercise,\n related_name='lessons'\n )\n learning_outcomes = models.ManyToManyField(\n LearningOutcome,\n related_name='lesson_learning_outcomes'\n )\n curriculum_areas = models.ManyToManyField(\n CurriculumArea,\n related_name='lesson_curriculum_areas'\n )\n classroom_resources = models.ManyToManyField(\n ClassroomResource,\n related_name='lesson_classroom_resources'\n )\n generated_resources = models.ManyToManyField(\n Resource,\n through='ConnectedGeneratedResource',\n related_name='lesson_generated_resources'\n )\n\n def has_programming_exercises(self):\n \"\"\"Returns a boolean to state whether the lesson has any\n programming exercises.\n\n Returns:\n True if the lesson has connected programming exercises.\n Otherwise False.\n \"\"\"\n return bool(self.programming_exercises.all())\n\n def __str__(self):\n return self.name\n\n\nclass CurriculumIntegration(models.Model):\n # Auto-incrementing 'id' field is automatically set by Django\n topic = models.ForeignKey(\n Topic,\n on_delete=models.CASCADE,\n related_name='curriculum_integrations'\n )\n slug = models.SlugField()\n number = models.PositiveSmallIntegerField()\n name = models.CharField(max_length=200)\n content = models.TextField()\n curriculum_areas = models.ManyToManyField(\n CurriculumArea,\n related_name='curriculum_integrations'\n )\n prerequisite_lessons = models.ManyToManyField(\n Lesson,\n related_name='curriculum_integrations'\n )\n\n def has_prerequisite_lessons(self):\n \"\"\"Returns True if the curriculum integration has at\n least one prerequisite lesson, otherwise False.\n\n Returns:\n True if the curriculum integration has at\n least one prerequisite lesson, otherwise False.\n \"\"\"\n return bool(self.prerequisite_lessons.all())\n\n def __str__(self):\n return self.name\n\n\nclass ConnectedGeneratedResource(models.Model):\n # Auto-incrementing 'id' field is automatically set by Django\n resource = models.ForeignKey(Resource, on_delete=models.CASCADE)\n lesson = models.ForeignKey(Lesson, on_delete=models.CASCADE)\n description = models.CharField(max_length=300)\n", "path": "csunplugged/topics/models.py"}, {"content": "import os.path\nfrom utils.BaseLoader import BaseLoader\nfrom topics.models import CurriculumArea, Lesson\n\n\nclass CurriculumIntegrationsLoader(BaseLoader):\n \"\"\"Loader for curriculum integrations\"\"\"\n\n def __init__(self, load_log, structure_file, topic, BASE_PATH):\n \"\"\"Initiates the loader for curriculum integrations\n\n Args:\n structure_file: file path (string)\n topic: Topic model object\n \"\"\"\n super().__init__(BASE_PATH, load_log)\n self.structure_file = os.path.join(self.BASE_PATH, structure_file)\n self.BASE_PATH = os.path.join(self.BASE_PATH, os.path.split(structure_file)[0])\n self.topic = topic\n\n def load(self):\n \"\"\"Load the content for curriculum integrations\"\"\"\n if self.structure_file:\n structure = self.load_yaml_file(self.structure_file)\n\n for integration_slug, integration_data in structure.items():\n md_file = integration_data['md-file']\n integration_content = self.convert_md_file(os.path.join(self.BASE_PATH, md_file))\n\n integration = self.topic.curriculum_integrations.create(\n slug=integration_slug,\n number=integration_data['number'],\n name=integration_content.title,\n content=integration_content.html_string,\n )\n integration.save()\n\n # Add curriculum areas\n curriculum_area_slugs = integration_data['curriculum-areas']\n for curriculum_area_slug in curriculum_area_slugs:\n curriculum_area = CurriculumArea.objects.get(\n slug=curriculum_area_slug\n )\n integration.curriculum_areas.add(curriculum_area)\n\n # Add prerequisite lessons\n if 'prerequisite-lessons' in integration_data:\n prerequisite_lessons_slugs = integration_data['prerequisite-lessons']\n for prerequisite_lessons_slug in prerequisite_lessons_slugs:\n (unit_plan_slug, lesson_slug) = prerequisite_lessons_slug.split('/')\n lesson = Lesson.objects.get(\n slug=lesson_slug,\n unit_plan__slug=unit_plan_slug,\n topic__slug=self.topic.slug\n )\n integration.prerequisite_lessons.add(lesson)\n\n self.log('Added Curriculum Integration: {}'.format(integration.name), 1)\n", "path": "csunplugged/topics/management/commands/_CurriculumIntegrationsLoader.py"}]}
3,843
582
gh_patches_debug_29304
rasdani/github-patches
git_diff
cal-itp__benefits-755
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Ensure relevant analytics events are fired for Login.gov IAL2 flow Design to collaborate with Dev to ensure the behaviors they are interested in understanding are defined in Amplitude. - [x] `cancel_sign_in` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `benefits/oauth/analytics.py` Content: ``` 1 """ 2 The oauth application: analytics implementation. 3 """ 4 from benefits.core import analytics as core, session 5 6 7 class OAuthEvent(core.Event): 8 """Base OAuth analytics event.""" 9 10 def __init__(self, request, event_type): 11 super().__init__(request, event_type) 12 verifier = session.verifier(request) 13 self.update_event_properties(auth_provider=verifier.auth_provider.client_name) 14 15 16 class StartedSignInEvent(OAuthEvent): 17 """Analytics event representing the beginning of the OAuth sign in flow.""" 18 19 def __init__(self, request): 20 super().__init__(request, "started sign in") 21 22 23 class FinishedSignInEvent(OAuthEvent): 24 """Analytics event representing the end of the OAuth sign in flow.""" 25 26 def __init__(self, request): 27 super().__init__(request, "finished sign in") 28 29 30 class StartedSignOutEvent(OAuthEvent): 31 """Analytics event representing the beginning of application sign out.""" 32 33 def __init__(self, request): 34 super().__init__(request, "started signed out") 35 36 37 class FinishedSignOutEvent(OAuthEvent): 38 """Analytics event representing the end of application sign out.""" 39 40 def __init__(self, request): 41 super().__init__(request, "finished sign out") 42 self.update_event_properties(origin=session.origin(request)) 43 44 45 def started_sign_in(request): 46 """Send the "started sign in" analytics event.""" 47 core.send_event(StartedSignInEvent(request)) 48 49 50 def finished_sign_in(request): 51 """Send the "finished sign in" analytics event.""" 52 core.send_event(FinishedSignInEvent(request)) 53 54 55 def started_sign_out(request): 56 """Send the "started signed out" analytics event.""" 57 core.send_event(StartedSignOutEvent(request)) 58 59 60 def finished_sign_out(request): 61 """Send the "finished sign out" analytics event.""" 62 core.send_event(FinishedSignOutEvent(request)) 63 ``` Path: `benefits/oauth/views.py` Content: ``` 1 import logging 2 3 from django.shortcuts import redirect 4 from django.urls import reverse 5 from django.utils.decorators import decorator_from_middleware 6 7 from benefits.core import session 8 from benefits.core.middleware import VerifierSessionRequired 9 from . import analytics, redirects 10 from .client import oauth 11 12 13 logger = logging.getLogger(__name__) 14 15 16 ROUTE_AUTH = "oauth:authorize" 17 ROUTE_START = "eligibility:start" 18 ROUTE_CONFIRM = "eligibility:confirm" 19 ROUTE_UNVERIFIED = "eligibility:unverified" 20 ROUTE_POST_LOGOUT = "oauth:post_logout" 21 22 23 @decorator_from_middleware(VerifierSessionRequired) 24 def login(request): 25 """View implementing OIDC authorize_redirect.""" 26 verifier = session.verifier(request) 27 oauth_client = oauth.create_client(verifier.auth_provider.client_name) 28 29 if not oauth_client: 30 raise Exception(f"oauth_client not registered: {verifier.auth_provider.client_name}") 31 32 route = reverse(ROUTE_AUTH) 33 redirect_uri = redirects.generate_redirect_uri(request, route) 34 35 logger.debug(f"OAuth authorize_redirect with redirect_uri: {redirect_uri}") 36 37 analytics.started_sign_in(request) 38 39 return oauth_client.authorize_redirect(request, redirect_uri) 40 41 42 @decorator_from_middleware(VerifierSessionRequired) 43 def authorize(request): 44 """View implementing OIDC token authorization.""" 45 verifier = session.verifier(request) 46 oauth_client = oauth.create_client(verifier.auth_provider.client_name) 47 48 if not oauth_client: 49 raise Exception(f"oauth_client not registered: {verifier.auth_provider.client_name}") 50 51 logger.debug("Attempting to authorize OAuth access token") 52 token = oauth_client.authorize_access_token(request) 53 54 if token is None: 55 logger.warning("Could not authorize OAuth access token") 56 return redirect(ROUTE_START) 57 58 logger.debug("OAuth access token authorized") 59 60 # We store the id_token in the user's session. This is the minimal amount of information needed later to log the user out. 61 id_token = token["id_token"] 62 63 # We store the returned claim in case it can be used later in eligibility verification. 64 verifier_claim = verifier.auth_provider.claim 65 stored_claim = None 66 67 if verifier_claim: 68 userinfo = token.get("userinfo") 69 # the claim comes back in userinfo like { "claim": "True" | "False" } 70 claim_flag = (userinfo.get(verifier_claim) if userinfo else "false").lower() == "true" 71 # if userinfo contains our claim and the flag is true, store the *claim* 72 stored_claim = verifier_claim if claim_flag else None 73 74 session.update(request, oauth_token=id_token, oauth_claim=stored_claim) 75 76 analytics.finished_sign_in(request) 77 78 return redirect(ROUTE_CONFIRM) 79 80 81 def cancel(request): 82 """View implementing cancellation of OIDC authorization.""" 83 return redirect(ROUTE_UNVERIFIED) 84 85 86 @decorator_from_middleware(VerifierSessionRequired) 87 def logout(request): 88 """View implementing OIDC and application sign out.""" 89 verifier = session.verifier(request) 90 oauth_client = oauth.create_client(verifier.auth_provider.client_name) 91 92 if not oauth_client: 93 raise Exception(f"oauth_client not registered: {verifier.auth_provider.client_name}") 94 95 analytics.started_sign_out(request) 96 97 # overwrite the oauth session token, the user is signed out of the app 98 token = session.oauth_token(request) 99 session.logout(request) 100 101 route = reverse(ROUTE_POST_LOGOUT) 102 redirect_uri = redirects.generate_redirect_uri(request, route) 103 104 logger.debug(f"OAuth end_session_endpoint with redirect_uri: {redirect_uri}") 105 106 # send the user through the end_session_endpoint, redirecting back to 107 # the post_logout route 108 return redirects.deauthorize_redirect(oauth_client, token, redirect_uri) 109 110 111 def post_logout(request): 112 """View routes the user to their origin after sign out.""" 113 114 analytics.finished_sign_out(request) 115 116 origin = session.origin(request) 117 return redirect(origin) 118 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/benefits/oauth/analytics.py b/benefits/oauth/analytics.py --- a/benefits/oauth/analytics.py +++ b/benefits/oauth/analytics.py @@ -20,6 +20,13 @@ super().__init__(request, "started sign in") +class CanceledSignInEvent(OAuthEvent): + """Analytics event representing the canceling of application sign in.""" + + def __init__(self, request): + super().__init__(request, "canceled sign in") + + class FinishedSignInEvent(OAuthEvent): """Analytics event representing the end of the OAuth sign in flow.""" @@ -31,7 +38,7 @@ """Analytics event representing the beginning of application sign out.""" def __init__(self, request): - super().__init__(request, "started signed out") + super().__init__(request, "started sign out") class FinishedSignOutEvent(OAuthEvent): @@ -47,6 +54,11 @@ core.send_event(StartedSignInEvent(request)) +def canceled_sign_in(request): + """Send the "canceled sign in" analytics event.""" + core.send_event(CanceledSignInEvent(request)) + + def finished_sign_in(request): """Send the "finished sign in" analytics event.""" core.send_event(FinishedSignInEvent(request)) diff --git a/benefits/oauth/views.py b/benefits/oauth/views.py --- a/benefits/oauth/views.py +++ b/benefits/oauth/views.py @@ -80,6 +80,9 @@ def cancel(request): """View implementing cancellation of OIDC authorization.""" + + analytics.canceled_sign_in(request) + return redirect(ROUTE_UNVERIFIED)
{"golden_diff": "diff --git a/benefits/oauth/analytics.py b/benefits/oauth/analytics.py\n--- a/benefits/oauth/analytics.py\n+++ b/benefits/oauth/analytics.py\n@@ -20,6 +20,13 @@\n super().__init__(request, \"started sign in\")\n \n \n+class CanceledSignInEvent(OAuthEvent):\n+ \"\"\"Analytics event representing the canceling of application sign in.\"\"\"\n+\n+ def __init__(self, request):\n+ super().__init__(request, \"canceled sign in\")\n+\n+\n class FinishedSignInEvent(OAuthEvent):\n \"\"\"Analytics event representing the end of the OAuth sign in flow.\"\"\"\n \n@@ -31,7 +38,7 @@\n \"\"\"Analytics event representing the beginning of application sign out.\"\"\"\n \n def __init__(self, request):\n- super().__init__(request, \"started signed out\")\n+ super().__init__(request, \"started sign out\")\n \n \n class FinishedSignOutEvent(OAuthEvent):\n@@ -47,6 +54,11 @@\n core.send_event(StartedSignInEvent(request))\n \n \n+def canceled_sign_in(request):\n+ \"\"\"Send the \"canceled sign in\" analytics event.\"\"\"\n+ core.send_event(CanceledSignInEvent(request))\n+\n+\n def finished_sign_in(request):\n \"\"\"Send the \"finished sign in\" analytics event.\"\"\"\n core.send_event(FinishedSignInEvent(request))\ndiff --git a/benefits/oauth/views.py b/benefits/oauth/views.py\n--- a/benefits/oauth/views.py\n+++ b/benefits/oauth/views.py\n@@ -80,6 +80,9 @@\n \n def cancel(request):\n \"\"\"View implementing cancellation of OIDC authorization.\"\"\"\n+\n+ analytics.canceled_sign_in(request)\n+\n return redirect(ROUTE_UNVERIFIED)\n", "issue": "Ensure relevant analytics events are fired for Login.gov IAL2 flow\nDesign to collaborate with Dev to ensure the behaviors they are interested in understanding are defined in Amplitude. \r\n\r\n- [x] `cancel_sign_in`\n", "before_files": [{"content": "\"\"\"\nThe oauth application: analytics implementation.\n\"\"\"\nfrom benefits.core import analytics as core, session\n\n\nclass OAuthEvent(core.Event):\n \"\"\"Base OAuth analytics event.\"\"\"\n\n def __init__(self, request, event_type):\n super().__init__(request, event_type)\n verifier = session.verifier(request)\n self.update_event_properties(auth_provider=verifier.auth_provider.client_name)\n\n\nclass StartedSignInEvent(OAuthEvent):\n \"\"\"Analytics event representing the beginning of the OAuth sign in flow.\"\"\"\n\n def __init__(self, request):\n super().__init__(request, \"started sign in\")\n\n\nclass FinishedSignInEvent(OAuthEvent):\n \"\"\"Analytics event representing the end of the OAuth sign in flow.\"\"\"\n\n def __init__(self, request):\n super().__init__(request, \"finished sign in\")\n\n\nclass StartedSignOutEvent(OAuthEvent):\n \"\"\"Analytics event representing the beginning of application sign out.\"\"\"\n\n def __init__(self, request):\n super().__init__(request, \"started signed out\")\n\n\nclass FinishedSignOutEvent(OAuthEvent):\n \"\"\"Analytics event representing the end of application sign out.\"\"\"\n\n def __init__(self, request):\n super().__init__(request, \"finished sign out\")\n self.update_event_properties(origin=session.origin(request))\n\n\ndef started_sign_in(request):\n \"\"\"Send the \"started sign in\" analytics event.\"\"\"\n core.send_event(StartedSignInEvent(request))\n\n\ndef finished_sign_in(request):\n \"\"\"Send the \"finished sign in\" analytics event.\"\"\"\n core.send_event(FinishedSignInEvent(request))\n\n\ndef started_sign_out(request):\n \"\"\"Send the \"started signed out\" analytics event.\"\"\"\n core.send_event(StartedSignOutEvent(request))\n\n\ndef finished_sign_out(request):\n \"\"\"Send the \"finished sign out\" analytics event.\"\"\"\n core.send_event(FinishedSignOutEvent(request))\n", "path": "benefits/oauth/analytics.py"}, {"content": "import logging\n\nfrom django.shortcuts import redirect\nfrom django.urls import reverse\nfrom django.utils.decorators import decorator_from_middleware\n\nfrom benefits.core import session\nfrom benefits.core.middleware import VerifierSessionRequired\nfrom . import analytics, redirects\nfrom .client import oauth\n\n\nlogger = logging.getLogger(__name__)\n\n\nROUTE_AUTH = \"oauth:authorize\"\nROUTE_START = \"eligibility:start\"\nROUTE_CONFIRM = \"eligibility:confirm\"\nROUTE_UNVERIFIED = \"eligibility:unverified\"\nROUTE_POST_LOGOUT = \"oauth:post_logout\"\n\n\n@decorator_from_middleware(VerifierSessionRequired)\ndef login(request):\n \"\"\"View implementing OIDC authorize_redirect.\"\"\"\n verifier = session.verifier(request)\n oauth_client = oauth.create_client(verifier.auth_provider.client_name)\n\n if not oauth_client:\n raise Exception(f\"oauth_client not registered: {verifier.auth_provider.client_name}\")\n\n route = reverse(ROUTE_AUTH)\n redirect_uri = redirects.generate_redirect_uri(request, route)\n\n logger.debug(f\"OAuth authorize_redirect with redirect_uri: {redirect_uri}\")\n\n analytics.started_sign_in(request)\n\n return oauth_client.authorize_redirect(request, redirect_uri)\n\n\n@decorator_from_middleware(VerifierSessionRequired)\ndef authorize(request):\n \"\"\"View implementing OIDC token authorization.\"\"\"\n verifier = session.verifier(request)\n oauth_client = oauth.create_client(verifier.auth_provider.client_name)\n\n if not oauth_client:\n raise Exception(f\"oauth_client not registered: {verifier.auth_provider.client_name}\")\n\n logger.debug(\"Attempting to authorize OAuth access token\")\n token = oauth_client.authorize_access_token(request)\n\n if token is None:\n logger.warning(\"Could not authorize OAuth access token\")\n return redirect(ROUTE_START)\n\n logger.debug(\"OAuth access token authorized\")\n\n # We store the id_token in the user's session. This is the minimal amount of information needed later to log the user out.\n id_token = token[\"id_token\"]\n\n # We store the returned claim in case it can be used later in eligibility verification.\n verifier_claim = verifier.auth_provider.claim\n stored_claim = None\n\n if verifier_claim:\n userinfo = token.get(\"userinfo\")\n # the claim comes back in userinfo like { \"claim\": \"True\" | \"False\" }\n claim_flag = (userinfo.get(verifier_claim) if userinfo else \"false\").lower() == \"true\"\n # if userinfo contains our claim and the flag is true, store the *claim*\n stored_claim = verifier_claim if claim_flag else None\n\n session.update(request, oauth_token=id_token, oauth_claim=stored_claim)\n\n analytics.finished_sign_in(request)\n\n return redirect(ROUTE_CONFIRM)\n\n\ndef cancel(request):\n \"\"\"View implementing cancellation of OIDC authorization.\"\"\"\n return redirect(ROUTE_UNVERIFIED)\n\n\n@decorator_from_middleware(VerifierSessionRequired)\ndef logout(request):\n \"\"\"View implementing OIDC and application sign out.\"\"\"\n verifier = session.verifier(request)\n oauth_client = oauth.create_client(verifier.auth_provider.client_name)\n\n if not oauth_client:\n raise Exception(f\"oauth_client not registered: {verifier.auth_provider.client_name}\")\n\n analytics.started_sign_out(request)\n\n # overwrite the oauth session token, the user is signed out of the app\n token = session.oauth_token(request)\n session.logout(request)\n\n route = reverse(ROUTE_POST_LOGOUT)\n redirect_uri = redirects.generate_redirect_uri(request, route)\n\n logger.debug(f\"OAuth end_session_endpoint with redirect_uri: {redirect_uri}\")\n\n # send the user through the end_session_endpoint, redirecting back to\n # the post_logout route\n return redirects.deauthorize_redirect(oauth_client, token, redirect_uri)\n\n\ndef post_logout(request):\n \"\"\"View routes the user to their origin after sign out.\"\"\"\n\n analytics.finished_sign_out(request)\n\n origin = session.origin(request)\n return redirect(origin)\n", "path": "benefits/oauth/views.py"}], "after_files": [{"content": "\"\"\"\nThe oauth application: analytics implementation.\n\"\"\"\nfrom benefits.core import analytics as core, session\n\n\nclass OAuthEvent(core.Event):\n \"\"\"Base OAuth analytics event.\"\"\"\n\n def __init__(self, request, event_type):\n super().__init__(request, event_type)\n verifier = session.verifier(request)\n self.update_event_properties(auth_provider=verifier.auth_provider.client_name)\n\n\nclass StartedSignInEvent(OAuthEvent):\n \"\"\"Analytics event representing the beginning of the OAuth sign in flow.\"\"\"\n\n def __init__(self, request):\n super().__init__(request, \"started sign in\")\n\n\nclass CanceledSignInEvent(OAuthEvent):\n \"\"\"Analytics event representing the canceling of application sign in.\"\"\"\n\n def __init__(self, request):\n super().__init__(request, \"canceled sign in\")\n\n\nclass FinishedSignInEvent(OAuthEvent):\n \"\"\"Analytics event representing the end of the OAuth sign in flow.\"\"\"\n\n def __init__(self, request):\n super().__init__(request, \"finished sign in\")\n\n\nclass StartedSignOutEvent(OAuthEvent):\n \"\"\"Analytics event representing the beginning of application sign out.\"\"\"\n\n def __init__(self, request):\n super().__init__(request, \"started sign out\")\n\n\nclass FinishedSignOutEvent(OAuthEvent):\n \"\"\"Analytics event representing the end of application sign out.\"\"\"\n\n def __init__(self, request):\n super().__init__(request, \"finished sign out\")\n self.update_event_properties(origin=session.origin(request))\n\n\ndef started_sign_in(request):\n \"\"\"Send the \"started sign in\" analytics event.\"\"\"\n core.send_event(StartedSignInEvent(request))\n\n\ndef canceled_sign_in(request):\n \"\"\"Send the \"canceled sign in\" analytics event.\"\"\"\n core.send_event(CanceledSignInEvent(request))\n\n\ndef finished_sign_in(request):\n \"\"\"Send the \"finished sign in\" analytics event.\"\"\"\n core.send_event(FinishedSignInEvent(request))\n\n\ndef started_sign_out(request):\n \"\"\"Send the \"started signed out\" analytics event.\"\"\"\n core.send_event(StartedSignOutEvent(request))\n\n\ndef finished_sign_out(request):\n \"\"\"Send the \"finished sign out\" analytics event.\"\"\"\n core.send_event(FinishedSignOutEvent(request))\n", "path": "benefits/oauth/analytics.py"}, {"content": "import logging\n\nfrom django.shortcuts import redirect\nfrom django.urls import reverse\nfrom django.utils.decorators import decorator_from_middleware\n\nfrom benefits.core import session\nfrom benefits.core.middleware import VerifierSessionRequired\nfrom . import analytics, redirects\nfrom .client import oauth\n\n\nlogger = logging.getLogger(__name__)\n\n\nROUTE_AUTH = \"oauth:authorize\"\nROUTE_START = \"eligibility:start\"\nROUTE_CONFIRM = \"eligibility:confirm\"\nROUTE_UNVERIFIED = \"eligibility:unverified\"\nROUTE_POST_LOGOUT = \"oauth:post_logout\"\n\n\n@decorator_from_middleware(VerifierSessionRequired)\ndef login(request):\n \"\"\"View implementing OIDC authorize_redirect.\"\"\"\n verifier = session.verifier(request)\n oauth_client = oauth.create_client(verifier.auth_provider.client_name)\n\n if not oauth_client:\n raise Exception(f\"oauth_client not registered: {verifier.auth_provider.client_name}\")\n\n route = reverse(ROUTE_AUTH)\n redirect_uri = redirects.generate_redirect_uri(request, route)\n\n logger.debug(f\"OAuth authorize_redirect with redirect_uri: {redirect_uri}\")\n\n analytics.started_sign_in(request)\n\n return oauth_client.authorize_redirect(request, redirect_uri)\n\n\n@decorator_from_middleware(VerifierSessionRequired)\ndef authorize(request):\n \"\"\"View implementing OIDC token authorization.\"\"\"\n verifier = session.verifier(request)\n oauth_client = oauth.create_client(verifier.auth_provider.client_name)\n\n if not oauth_client:\n raise Exception(f\"oauth_client not registered: {verifier.auth_provider.client_name}\")\n\n logger.debug(\"Attempting to authorize OAuth access token\")\n token = oauth_client.authorize_access_token(request)\n\n if token is None:\n logger.warning(\"Could not authorize OAuth access token\")\n return redirect(ROUTE_START)\n\n logger.debug(\"OAuth access token authorized\")\n\n # We store the id_token in the user's session. This is the minimal amount of information needed later to log the user out.\n id_token = token[\"id_token\"]\n\n # We store the returned claim in case it can be used later in eligibility verification.\n verifier_claim = verifier.auth_provider.claim\n stored_claim = None\n\n if verifier_claim:\n userinfo = token.get(\"userinfo\")\n # the claim comes back in userinfo like { \"claim\": \"True\" | \"False\" }\n claim_flag = (userinfo.get(verifier_claim) if userinfo else \"false\").lower() == \"true\"\n # if userinfo contains our claim and the flag is true, store the *claim*\n stored_claim = verifier_claim if claim_flag else None\n\n session.update(request, oauth_token=id_token, oauth_claim=stored_claim)\n\n analytics.finished_sign_in(request)\n\n return redirect(ROUTE_CONFIRM)\n\n\ndef cancel(request):\n \"\"\"View implementing cancellation of OIDC authorization.\"\"\"\n\n analytics.canceled_sign_in(request)\n\n return redirect(ROUTE_UNVERIFIED)\n\n\n@decorator_from_middleware(VerifierSessionRequired)\ndef logout(request):\n \"\"\"View implementing OIDC and application sign out.\"\"\"\n verifier = session.verifier(request)\n oauth_client = oauth.create_client(verifier.auth_provider.client_name)\n\n if not oauth_client:\n raise Exception(f\"oauth_client not registered: {verifier.auth_provider.client_name}\")\n\n analytics.started_sign_out(request)\n\n # overwrite the oauth session token, the user is signed out of the app\n token = session.oauth_token(request)\n session.logout(request)\n\n route = reverse(ROUTE_POST_LOGOUT)\n redirect_uri = redirects.generate_redirect_uri(request, route)\n\n logger.debug(f\"OAuth end_session_endpoint with redirect_uri: {redirect_uri}\")\n\n # send the user through the end_session_endpoint, redirecting back to\n # the post_logout route\n return redirects.deauthorize_redirect(oauth_client, token, redirect_uri)\n\n\ndef post_logout(request):\n \"\"\"View routes the user to their origin after sign out.\"\"\"\n\n analytics.finished_sign_out(request)\n\n origin = session.origin(request)\n return redirect(origin)\n", "path": "benefits/oauth/views.py"}]}
1,914
379
gh_patches_debug_37702
rasdani/github-patches
git_diff
Textualize__textual-2605
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add a `description` parameter to the work decorator, to use in place of the auto-generated description. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/textual/_work_decorator.py` Content: ``` 1 """ 2 3 A decorator used to create [workers](/guide/workers). 4 """ 5 6 7 from __future__ import annotations 8 9 from functools import partial, wraps 10 from typing import TYPE_CHECKING, Callable, Coroutine, TypeVar, Union, cast, overload 11 12 from typing_extensions import ParamSpec, TypeAlias 13 14 if TYPE_CHECKING: 15 from .worker import Worker 16 17 18 FactoryParamSpec = ParamSpec("FactoryParamSpec") 19 DecoratorParamSpec = ParamSpec("DecoratorParamSpec") 20 ReturnType = TypeVar("ReturnType") 21 22 Decorator: TypeAlias = Callable[ 23 [ 24 Union[ 25 Callable[DecoratorParamSpec, ReturnType], 26 Callable[DecoratorParamSpec, Coroutine[None, None, ReturnType]], 27 ] 28 ], 29 Callable[DecoratorParamSpec, "Worker[ReturnType]"], 30 ] 31 32 33 @overload 34 def work( 35 method: Callable[FactoryParamSpec, Coroutine[None, None, ReturnType]] 36 ) -> Callable[FactoryParamSpec, "Worker[ReturnType]"]: 37 ... 38 39 40 @overload 41 def work( 42 method: Callable[FactoryParamSpec, ReturnType] 43 ) -> Callable[FactoryParamSpec, "Worker[ReturnType]"]: 44 ... 45 46 47 @overload 48 def work(*, exclusive: bool = False) -> Decorator[..., ReturnType]: 49 ... 50 51 52 def work( 53 method: Callable[FactoryParamSpec, ReturnType] 54 | Callable[FactoryParamSpec, Coroutine[None, None, ReturnType]] 55 | None = None, 56 *, 57 name: str = "", 58 group: str = "default", 59 exit_on_error: bool = True, 60 exclusive: bool = False, 61 ) -> Callable[FactoryParamSpec, Worker[ReturnType]] | Decorator: 62 """A decorator used to create [workers](/guide/workers). 63 64 Args: 65 method: A function or coroutine. 66 name: A short string to identify the worker (in logs and debugging). 67 group: A short string to identify a group of workers. 68 exit_on_error: Exit the app if the worker raises an error. Set to `False` to suppress exceptions. 69 exclusive: Cancel all workers in the same group. 70 """ 71 72 def decorator( 73 method: ( 74 Callable[DecoratorParamSpec, ReturnType] 75 | Callable[DecoratorParamSpec, Coroutine[None, None, ReturnType]] 76 ) 77 ) -> Callable[DecoratorParamSpec, Worker[ReturnType]]: 78 """The decorator.""" 79 80 @wraps(method) 81 def decorated( 82 *args: DecoratorParamSpec.args, **kwargs: DecoratorParamSpec.kwargs 83 ) -> Worker[ReturnType]: 84 """The replaced callable.""" 85 from .dom import DOMNode 86 87 self = args[0] 88 assert isinstance(self, DOMNode) 89 90 try: 91 positional_arguments = ", ".join(repr(arg) for arg in args[1:]) 92 keyword_arguments = ", ".join( 93 f"{name}={value!r}" for name, value in kwargs.items() 94 ) 95 tokens = [positional_arguments, keyword_arguments] 96 worker_description = f"{method.__name__}({', '.join(token for token in tokens if token)})" 97 except Exception: 98 worker_description = "<worker>" 99 worker = cast( 100 "Worker[ReturnType]", 101 self.run_worker( 102 partial(method, *args, **kwargs), 103 name=name or method.__name__, 104 group=group, 105 description=worker_description, 106 exclusive=exclusive, 107 exit_on_error=exit_on_error, 108 ), 109 ) 110 return worker 111 112 return decorated 113 114 if method is None: 115 return decorator 116 else: 117 return decorator(method) 118 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/textual/_work_decorator.py b/src/textual/_work_decorator.py --- a/src/textual/_work_decorator.py +++ b/src/textual/_work_decorator.py @@ -58,6 +58,7 @@ group: str = "default", exit_on_error: bool = True, exclusive: bool = False, + description: str | None = None, ) -> Callable[FactoryParamSpec, Worker[ReturnType]] | Decorator: """A decorator used to create [workers](/guide/workers). @@ -67,6 +68,9 @@ group: A short string to identify a group of workers. exit_on_error: Exit the app if the worker raises an error. Set to `False` to suppress exceptions. exclusive: Cancel all workers in the same group. + description: Readable description of the worker for debugging purposes. + By default, it uses a string representation of the decorated method + and its arguments. """ def decorator( @@ -87,22 +91,25 @@ self = args[0] assert isinstance(self, DOMNode) - try: - positional_arguments = ", ".join(repr(arg) for arg in args[1:]) - keyword_arguments = ", ".join( - f"{name}={value!r}" for name, value in kwargs.items() - ) - tokens = [positional_arguments, keyword_arguments] - worker_description = f"{method.__name__}({', '.join(token for token in tokens if token)})" - except Exception: - worker_description = "<worker>" + if description is not None: + debug_description = description + else: + try: + positional_arguments = ", ".join(repr(arg) for arg in args[1:]) + keyword_arguments = ", ".join( + f"{name}={value!r}" for name, value in kwargs.items() + ) + tokens = [positional_arguments, keyword_arguments] + debug_description = f"{method.__name__}({', '.join(token for token in tokens if token)})" + except Exception: + debug_description = "<worker>" worker = cast( "Worker[ReturnType]", self.run_worker( partial(method, *args, **kwargs), name=name or method.__name__, group=group, - description=worker_description, + description=debug_description, exclusive=exclusive, exit_on_error=exit_on_error, ),
{"golden_diff": "diff --git a/src/textual/_work_decorator.py b/src/textual/_work_decorator.py\n--- a/src/textual/_work_decorator.py\n+++ b/src/textual/_work_decorator.py\n@@ -58,6 +58,7 @@\n group: str = \"default\",\n exit_on_error: bool = True,\n exclusive: bool = False,\n+ description: str | None = None,\n ) -> Callable[FactoryParamSpec, Worker[ReturnType]] | Decorator:\n \"\"\"A decorator used to create [workers](/guide/workers).\n \n@@ -67,6 +68,9 @@\n group: A short string to identify a group of workers.\n exit_on_error: Exit the app if the worker raises an error. Set to `False` to suppress exceptions.\n exclusive: Cancel all workers in the same group.\n+ description: Readable description of the worker for debugging purposes.\n+ By default, it uses a string representation of the decorated method\n+ and its arguments.\n \"\"\"\n \n def decorator(\n@@ -87,22 +91,25 @@\n self = args[0]\n assert isinstance(self, DOMNode)\n \n- try:\n- positional_arguments = \", \".join(repr(arg) for arg in args[1:])\n- keyword_arguments = \", \".join(\n- f\"{name}={value!r}\" for name, value in kwargs.items()\n- )\n- tokens = [positional_arguments, keyword_arguments]\n- worker_description = f\"{method.__name__}({', '.join(token for token in tokens if token)})\"\n- except Exception:\n- worker_description = \"<worker>\"\n+ if description is not None:\n+ debug_description = description\n+ else:\n+ try:\n+ positional_arguments = \", \".join(repr(arg) for arg in args[1:])\n+ keyword_arguments = \", \".join(\n+ f\"{name}={value!r}\" for name, value in kwargs.items()\n+ )\n+ tokens = [positional_arguments, keyword_arguments]\n+ debug_description = f\"{method.__name__}({', '.join(token for token in tokens if token)})\"\n+ except Exception:\n+ debug_description = \"<worker>\"\n worker = cast(\n \"Worker[ReturnType]\",\n self.run_worker(\n partial(method, *args, **kwargs),\n name=name or method.__name__,\n group=group,\n- description=worker_description,\n+ description=debug_description,\n exclusive=exclusive,\n exit_on_error=exit_on_error,\n ),\n", "issue": "Add a `description` parameter to the work decorator, to use in place of the auto-generated description.\n\n", "before_files": [{"content": "\"\"\"\n\nA decorator used to create [workers](/guide/workers).\n\"\"\"\n\n\nfrom __future__ import annotations\n\nfrom functools import partial, wraps\nfrom typing import TYPE_CHECKING, Callable, Coroutine, TypeVar, Union, cast, overload\n\nfrom typing_extensions import ParamSpec, TypeAlias\n\nif TYPE_CHECKING:\n from .worker import Worker\n\n\nFactoryParamSpec = ParamSpec(\"FactoryParamSpec\")\nDecoratorParamSpec = ParamSpec(\"DecoratorParamSpec\")\nReturnType = TypeVar(\"ReturnType\")\n\nDecorator: TypeAlias = Callable[\n [\n Union[\n Callable[DecoratorParamSpec, ReturnType],\n Callable[DecoratorParamSpec, Coroutine[None, None, ReturnType]],\n ]\n ],\n Callable[DecoratorParamSpec, \"Worker[ReturnType]\"],\n]\n\n\n@overload\ndef work(\n method: Callable[FactoryParamSpec, Coroutine[None, None, ReturnType]]\n) -> Callable[FactoryParamSpec, \"Worker[ReturnType]\"]:\n ...\n\n\n@overload\ndef work(\n method: Callable[FactoryParamSpec, ReturnType]\n) -> Callable[FactoryParamSpec, \"Worker[ReturnType]\"]:\n ...\n\n\n@overload\ndef work(*, exclusive: bool = False) -> Decorator[..., ReturnType]:\n ...\n\n\ndef work(\n method: Callable[FactoryParamSpec, ReturnType]\n | Callable[FactoryParamSpec, Coroutine[None, None, ReturnType]]\n | None = None,\n *,\n name: str = \"\",\n group: str = \"default\",\n exit_on_error: bool = True,\n exclusive: bool = False,\n) -> Callable[FactoryParamSpec, Worker[ReturnType]] | Decorator:\n \"\"\"A decorator used to create [workers](/guide/workers).\n\n Args:\n method: A function or coroutine.\n name: A short string to identify the worker (in logs and debugging).\n group: A short string to identify a group of workers.\n exit_on_error: Exit the app if the worker raises an error. Set to `False` to suppress exceptions.\n exclusive: Cancel all workers in the same group.\n \"\"\"\n\n def decorator(\n method: (\n Callable[DecoratorParamSpec, ReturnType]\n | Callable[DecoratorParamSpec, Coroutine[None, None, ReturnType]]\n )\n ) -> Callable[DecoratorParamSpec, Worker[ReturnType]]:\n \"\"\"The decorator.\"\"\"\n\n @wraps(method)\n def decorated(\n *args: DecoratorParamSpec.args, **kwargs: DecoratorParamSpec.kwargs\n ) -> Worker[ReturnType]:\n \"\"\"The replaced callable.\"\"\"\n from .dom import DOMNode\n\n self = args[0]\n assert isinstance(self, DOMNode)\n\n try:\n positional_arguments = \", \".join(repr(arg) for arg in args[1:])\n keyword_arguments = \", \".join(\n f\"{name}={value!r}\" for name, value in kwargs.items()\n )\n tokens = [positional_arguments, keyword_arguments]\n worker_description = f\"{method.__name__}({', '.join(token for token in tokens if token)})\"\n except Exception:\n worker_description = \"<worker>\"\n worker = cast(\n \"Worker[ReturnType]\",\n self.run_worker(\n partial(method, *args, **kwargs),\n name=name or method.__name__,\n group=group,\n description=worker_description,\n exclusive=exclusive,\n exit_on_error=exit_on_error,\n ),\n )\n return worker\n\n return decorated\n\n if method is None:\n return decorator\n else:\n return decorator(method)\n", "path": "src/textual/_work_decorator.py"}], "after_files": [{"content": "\"\"\"\n\nA decorator used to create [workers](/guide/workers).\n\"\"\"\n\n\nfrom __future__ import annotations\n\nfrom functools import partial, wraps\nfrom typing import TYPE_CHECKING, Callable, Coroutine, TypeVar, Union, cast, overload\n\nfrom typing_extensions import ParamSpec, TypeAlias\n\nif TYPE_CHECKING:\n from .worker import Worker\n\n\nFactoryParamSpec = ParamSpec(\"FactoryParamSpec\")\nDecoratorParamSpec = ParamSpec(\"DecoratorParamSpec\")\nReturnType = TypeVar(\"ReturnType\")\n\nDecorator: TypeAlias = Callable[\n [\n Union[\n Callable[DecoratorParamSpec, ReturnType],\n Callable[DecoratorParamSpec, Coroutine[None, None, ReturnType]],\n ]\n ],\n Callable[DecoratorParamSpec, \"Worker[ReturnType]\"],\n]\n\n\n@overload\ndef work(\n method: Callable[FactoryParamSpec, Coroutine[None, None, ReturnType]]\n) -> Callable[FactoryParamSpec, \"Worker[ReturnType]\"]:\n ...\n\n\n@overload\ndef work(\n method: Callable[FactoryParamSpec, ReturnType]\n) -> Callable[FactoryParamSpec, \"Worker[ReturnType]\"]:\n ...\n\n\n@overload\ndef work(*, exclusive: bool = False) -> Decorator[..., ReturnType]:\n ...\n\n\ndef work(\n method: Callable[FactoryParamSpec, ReturnType]\n | Callable[FactoryParamSpec, Coroutine[None, None, ReturnType]]\n | None = None,\n *,\n name: str = \"\",\n group: str = \"default\",\n exit_on_error: bool = True,\n exclusive: bool = False,\n description: str | None = None,\n) -> Callable[FactoryParamSpec, Worker[ReturnType]] | Decorator:\n \"\"\"A decorator used to create [workers](/guide/workers).\n\n Args:\n method: A function or coroutine.\n name: A short string to identify the worker (in logs and debugging).\n group: A short string to identify a group of workers.\n exit_on_error: Exit the app if the worker raises an error. Set to `False` to suppress exceptions.\n exclusive: Cancel all workers in the same group.\n description: Readable description of the worker for debugging purposes.\n By default, it uses a string representation of the decorated method\n and its arguments.\n \"\"\"\n\n def decorator(\n method: (\n Callable[DecoratorParamSpec, ReturnType]\n | Callable[DecoratorParamSpec, Coroutine[None, None, ReturnType]]\n )\n ) -> Callable[DecoratorParamSpec, Worker[ReturnType]]:\n \"\"\"The decorator.\"\"\"\n\n @wraps(method)\n def decorated(\n *args: DecoratorParamSpec.args, **kwargs: DecoratorParamSpec.kwargs\n ) -> Worker[ReturnType]:\n \"\"\"The replaced callable.\"\"\"\n from .dom import DOMNode\n\n self = args[0]\n assert isinstance(self, DOMNode)\n\n if description is not None:\n debug_description = description\n else:\n try:\n positional_arguments = \", \".join(repr(arg) for arg in args[1:])\n keyword_arguments = \", \".join(\n f\"{name}={value!r}\" for name, value in kwargs.items()\n )\n tokens = [positional_arguments, keyword_arguments]\n debug_description = f\"{method.__name__}({', '.join(token for token in tokens if token)})\"\n except Exception:\n debug_description = \"<worker>\"\n worker = cast(\n \"Worker[ReturnType]\",\n self.run_worker(\n partial(method, *args, **kwargs),\n name=name or method.__name__,\n group=group,\n description=debug_description,\n exclusive=exclusive,\n exit_on_error=exit_on_error,\n ),\n )\n return worker\n\n return decorated\n\n if method is None:\n return decorator\n else:\n return decorator(method)\n", "path": "src/textual/_work_decorator.py"}]}
1,282
546
gh_patches_debug_4548
rasdani/github-patches
git_diff
capitalone__DataProfiler-739
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Windows Install error - ValueError: path 'resources/' cannot end with '/ https://github.com/capitalone/DataProfiler/blob/5b04b7fe5ee3556235c397efb69b32cd5d364a3b/setup.py#L33 Ran into an install isue ValueError: path 'resources/' cannot end with '/ As per https://stackoverflow.com/questions/20356482/valueerror-path-conf-cannot-end-with resource_dir = "resources/" needs to change to resource_dir = "resources" Thank you. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 """A setuptools for the Data Profiler Application and Python Libraries.""" 2 3 import os 4 5 # To use a consistent encoding 6 from codecs import open 7 from os import path 8 9 # Always prefer setuptools over distutils 10 from setuptools import find_packages, setup 11 12 # Load package version 13 from dataprofiler.version import __version__ 14 15 here = path.abspath(path.dirname(__file__)) 16 17 # Get the long description from the README file 18 with open(path.join(here, "README.md"), encoding="utf-8") as f: 19 LONG_DESCRIPTION = f.read() 20 21 # Get the install_requirements from requirements.txt 22 with open(path.join(here, "requirements.txt"), encoding="utf-8") as f: 23 required_packages = f.read().splitlines() 24 25 # Get the install_requirements from requirements-ml.txt 26 with open(path.join(here, "requirements-ml.txt"), encoding="utf-8") as f: 27 ml_packages = f.read().splitlines() 28 29 # Get the install_requirements from requirements-reports.txt 30 with open(path.join(here, "requirements-reports.txt"), encoding="utf-8") as f: 31 reports_packages = f.read().splitlines() 32 33 resource_dir = "resources/" 34 default_labeler_files = [ 35 (d, [os.path.join(d, f) for f in files]) for d, _, files in os.walk(resource_dir) 36 ] 37 38 39 DESCRIPTION = ( 40 "What is in your data? Detect schema, statistics and entities in almost any file." 41 ) 42 43 setup( 44 name="DataProfiler", 45 version=__version__, 46 python_requires=">=3.8", 47 description=DESCRIPTION, 48 long_description=LONG_DESCRIPTION, 49 long_description_content_type="text/markdown", 50 # The project's main homepage. 51 url="https://github.com/capitalone/data-profiler", 52 # Author details 53 author="Jeremy Goodsitt, Taylor Turner, Michael Davis, Kenny Bean, Tyler Farnan", 54 # Choose your license 55 license="Apache License, Version 2.0", 56 # See https://pypi.python.org/pypi?%3Aaction=list_classifiers 57 classifiers=[ 58 # How mature is this project? Common values are 59 # 3 - Alpha 60 # 4 - Beta 61 # 5 - Production/Stable 62 "Development Status :: 5 - Production/Stable", 63 # Indicate who your project is intended for 64 "Intended Audience :: Developers", 65 "Intended Audience :: Education", 66 "Intended Audience :: Information Technology", 67 "Intended Audience :: Science/Research", 68 "Intended Audience :: System Administrators", 69 "Topic :: Education", 70 "Topic :: Scientific/Engineering", 71 "Topic :: Scientific/Engineering :: Information Analysis", 72 "Topic :: Security", 73 "Topic :: Software Development :: Build Tools", 74 # Pick your license as you wish (should match "license" above) 75 "License :: OSI Approved :: Apache Software License", 76 # Specify the Python versions you support here. In particular, ensure 77 # that you indicate whether you support Python 3 or both. 78 "Programming Language :: Python :: 3", 79 ], 80 # What does your project relate to? 81 keywords="Data Investigation", 82 # You can just specify the packages manually here if your project is 83 # simple. Or you can use find_packages(). 84 # packages=find_packages(exclude=['src/test', 'src/sample']), 85 packages=find_packages(exclude=["tests", "examples"]), 86 # List run-time dependencies here. These will be installed by pip when 87 # your project is installed. For an analysis of "install_requires" vs pip's 88 # requirements files see: 89 # https://packaging.python.org/en/latest/requirements.html 90 install_requires=required_packages, 91 # List of run-time dependencies for the labeler. These will be installed 92 # by pip when someone installs the project[<label>]. 93 extras_require={ 94 "ml": ml_packages, 95 "reports": reports_packages, 96 "full": ml_packages + reports_packages, 97 }, 98 # # If there are data files included in your packages that need to be 99 # # installed, specify them here. If using Python 2.6 or less, then these 100 # # have to be included in MANIFEST.in as well. 101 # package_data={ 102 # 'data': [], 103 # }, 104 # 105 # # Although 'package_data' is the preferred approach, in some case you may 106 # # need to place data files outside of your packages. See: 107 # # http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa 108 # # In this case, 'data_file' will be installed into '<sys.prefix>/my_data' 109 data_files=default_labeler_files, 110 include_package_data=True, 111 ) 112 113 print("find_packages():", find_packages()) 114 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -30,7 +30,7 @@ with open(path.join(here, "requirements-reports.txt"), encoding="utf-8") as f: reports_packages = f.read().splitlines() -resource_dir = "resources/" +resource_dir = "resources" default_labeler_files = [ (d, [os.path.join(d, f) for f in files]) for d, _, files in os.walk(resource_dir) ]
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -30,7 +30,7 @@\n with open(path.join(here, \"requirements-reports.txt\"), encoding=\"utf-8\") as f:\n reports_packages = f.read().splitlines()\n \n-resource_dir = \"resources/\"\n+resource_dir = \"resources\"\n default_labeler_files = [\n (d, [os.path.join(d, f) for f in files]) for d, _, files in os.walk(resource_dir)\n ]\n", "issue": "Windows Install error - ValueError: path 'resources/' cannot end with '/\nhttps://github.com/capitalone/DataProfiler/blob/5b04b7fe5ee3556235c397efb69b32cd5d364a3b/setup.py#L33\r\n\r\nRan into an install isue \r\nValueError: path 'resources/' cannot end with '/\r\n\r\nAs per \r\nhttps://stackoverflow.com/questions/20356482/valueerror-path-conf-cannot-end-with\r\n\r\nresource_dir = \"resources/\"\r\nneeds to change to \r\nresource_dir = \"resources\"\r\n\r\nThank you. \n", "before_files": [{"content": "\"\"\"A setuptools for the Data Profiler Application and Python Libraries.\"\"\"\n\nimport os\n\n# To use a consistent encoding\nfrom codecs import open\nfrom os import path\n\n# Always prefer setuptools over distutils\nfrom setuptools import find_packages, setup\n\n# Load package version\nfrom dataprofiler.version import __version__\n\nhere = path.abspath(path.dirname(__file__))\n\n# Get the long description from the README file\nwith open(path.join(here, \"README.md\"), encoding=\"utf-8\") as f:\n LONG_DESCRIPTION = f.read()\n\n# Get the install_requirements from requirements.txt\nwith open(path.join(here, \"requirements.txt\"), encoding=\"utf-8\") as f:\n required_packages = f.read().splitlines()\n\n# Get the install_requirements from requirements-ml.txt\nwith open(path.join(here, \"requirements-ml.txt\"), encoding=\"utf-8\") as f:\n ml_packages = f.read().splitlines()\n\n# Get the install_requirements from requirements-reports.txt\nwith open(path.join(here, \"requirements-reports.txt\"), encoding=\"utf-8\") as f:\n reports_packages = f.read().splitlines()\n\nresource_dir = \"resources/\"\ndefault_labeler_files = [\n (d, [os.path.join(d, f) for f in files]) for d, _, files in os.walk(resource_dir)\n]\n\n\nDESCRIPTION = (\n \"What is in your data? Detect schema, statistics and entities in almost any file.\"\n)\n\nsetup(\n name=\"DataProfiler\",\n version=__version__,\n python_requires=\">=3.8\",\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n # The project's main homepage.\n url=\"https://github.com/capitalone/data-profiler\",\n # Author details\n author=\"Jeremy Goodsitt, Taylor Turner, Michael Davis, Kenny Bean, Tyler Farnan\",\n # Choose your license\n license=\"Apache License, Version 2.0\",\n # See https://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n # How mature is this project? Common values are\n # 3 - Alpha\n # 4 - Beta\n # 5 - Production/Stable\n \"Development Status :: 5 - Production/Stable\",\n # Indicate who your project is intended for\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Information Technology\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: System Administrators\",\n \"Topic :: Education\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n \"Topic :: Security\",\n \"Topic :: Software Development :: Build Tools\",\n # Pick your license as you wish (should match \"license\" above)\n \"License :: OSI Approved :: Apache Software License\",\n # Specify the Python versions you support here. In particular, ensure\n # that you indicate whether you support Python 3 or both.\n \"Programming Language :: Python :: 3\",\n ],\n # What does your project relate to?\n keywords=\"Data Investigation\",\n # You can just specify the packages manually here if your project is\n # simple. Or you can use find_packages().\n # packages=find_packages(exclude=['src/test', 'src/sample']),\n packages=find_packages(exclude=[\"tests\", \"examples\"]),\n # List run-time dependencies here. These will be installed by pip when\n # your project is installed. For an analysis of \"install_requires\" vs pip's\n # requirements files see:\n # https://packaging.python.org/en/latest/requirements.html\n install_requires=required_packages,\n # List of run-time dependencies for the labeler. These will be installed\n # by pip when someone installs the project[<label>].\n extras_require={\n \"ml\": ml_packages,\n \"reports\": reports_packages,\n \"full\": ml_packages + reports_packages,\n },\n # # If there are data files included in your packages that need to be\n # # installed, specify them here. If using Python 2.6 or less, then these\n # # have to be included in MANIFEST.in as well.\n # package_data={\n # 'data': [],\n # },\n #\n # # Although 'package_data' is the preferred approach, in some case you may\n # # need to place data files outside of your packages. See:\n # # http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa\n # # In this case, 'data_file' will be installed into '<sys.prefix>/my_data'\n data_files=default_labeler_files,\n include_package_data=True,\n)\n\nprint(\"find_packages():\", find_packages())\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"A setuptools for the Data Profiler Application and Python Libraries.\"\"\"\n\nimport os\n\n# To use a consistent encoding\nfrom codecs import open\nfrom os import path\n\n# Always prefer setuptools over distutils\nfrom setuptools import find_packages, setup\n\n# Load package version\nfrom dataprofiler.version import __version__\n\nhere = path.abspath(path.dirname(__file__))\n\n# Get the long description from the README file\nwith open(path.join(here, \"README.md\"), encoding=\"utf-8\") as f:\n LONG_DESCRIPTION = f.read()\n\n# Get the install_requirements from requirements.txt\nwith open(path.join(here, \"requirements.txt\"), encoding=\"utf-8\") as f:\n required_packages = f.read().splitlines()\n\n# Get the install_requirements from requirements-ml.txt\nwith open(path.join(here, \"requirements-ml.txt\"), encoding=\"utf-8\") as f:\n ml_packages = f.read().splitlines()\n\n# Get the install_requirements from requirements-reports.txt\nwith open(path.join(here, \"requirements-reports.txt\"), encoding=\"utf-8\") as f:\n reports_packages = f.read().splitlines()\n\nresource_dir = \"resources\"\ndefault_labeler_files = [\n (d, [os.path.join(d, f) for f in files]) for d, _, files in os.walk(resource_dir)\n]\n\n\nDESCRIPTION = (\n \"What is in your data? Detect schema, statistics and entities in almost any file.\"\n)\n\nsetup(\n name=\"DataProfiler\",\n version=__version__,\n python_requires=\">=3.8\",\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n # The project's main homepage.\n url=\"https://github.com/capitalone/data-profiler\",\n # Author details\n author=\"Jeremy Goodsitt, Taylor Turner, Michael Davis, Kenny Bean, Tyler Farnan\",\n # Choose your license\n license=\"Apache License, Version 2.0\",\n # See https://pypi.python.org/pypi?%3Aaction=list_classifiers\n classifiers=[\n # How mature is this project? Common values are\n # 3 - Alpha\n # 4 - Beta\n # 5 - Production/Stable\n \"Development Status :: 5 - Production/Stable\",\n # Indicate who your project is intended for\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Education\",\n \"Intended Audience :: Information Technology\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: System Administrators\",\n \"Topic :: Education\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Information Analysis\",\n \"Topic :: Security\",\n \"Topic :: Software Development :: Build Tools\",\n # Pick your license as you wish (should match \"license\" above)\n \"License :: OSI Approved :: Apache Software License\",\n # Specify the Python versions you support here. In particular, ensure\n # that you indicate whether you support Python 3 or both.\n \"Programming Language :: Python :: 3\",\n ],\n # What does your project relate to?\n keywords=\"Data Investigation\",\n # You can just specify the packages manually here if your project is\n # simple. Or you can use find_packages().\n # packages=find_packages(exclude=['src/test', 'src/sample']),\n packages=find_packages(exclude=[\"tests\", \"examples\"]),\n # List run-time dependencies here. These will be installed by pip when\n # your project is installed. For an analysis of \"install_requires\" vs pip's\n # requirements files see:\n # https://packaging.python.org/en/latest/requirements.html\n install_requires=required_packages,\n # List of run-time dependencies for the labeler. These will be installed\n # by pip when someone installs the project[<label>].\n extras_require={\n \"ml\": ml_packages,\n \"reports\": reports_packages,\n \"full\": ml_packages + reports_packages,\n },\n # # If there are data files included in your packages that need to be\n # # installed, specify them here. If using Python 2.6 or less, then these\n # # have to be included in MANIFEST.in as well.\n # package_data={\n # 'data': [],\n # },\n #\n # # Although 'package_data' is the preferred approach, in some case you may\n # # need to place data files outside of your packages. See:\n # # http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa\n # # In this case, 'data_file' will be installed into '<sys.prefix>/my_data'\n data_files=default_labeler_files,\n include_package_data=True,\n)\n\nprint(\"find_packages():\", find_packages())\n", "path": "setup.py"}]}
1,658
113
gh_patches_debug_4972
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-3342
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Spider superonefoods is broken During the global build at 2021-09-22-14-42-27, spider **superonefoods** failed with **0 features** and **1 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-09-22-14-42-27/logs/superonefoods.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-09-22-14-42-27/output/superonefoods.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-09-22-14-42-27/output/superonefoods.geojson)) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `locations/spiders/superonefoods.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 import json 3 import scrapy 4 5 from locations.items import GeojsonPointItem 6 7 8 class SuperonefoodsSpider(scrapy.Spider): 9 name = "superonefoods" 10 item_attributes = { 'brand': "Super One Foods" } 11 allowed_domains = ["www.superonefoods.com"] 12 start_urls = ( 13 'https://www.superonefoods.com/store-finder', 14 ) 15 16 def parse(self, response): 17 # retrieve js data variable from script tag 18 items = response.xpath('//script/text()')[3].re("var stores =(.+?);\n") 19 20 # convert data variable from unicode to string 21 items = [str(x) for x in items] 22 23 # convert type string representation of list to type list 24 data = [items[0]] 25 26 # load list into json object for parsing 27 jsondata = json.loads(data[0]) 28 29 # loop through json data object and retrieve values; yield the values to GeojsonPointItem 30 for item in jsondata: 31 yield GeojsonPointItem( 32 ref=item.get('_id'), 33 lat=float(item.get('latitude')), 34 lon=float(item.get('longitude')), 35 addr_full=item.get('address'), 36 city=item.get('city'), 37 state=item.get('state'), 38 postcode=item.get('zip'), 39 website='https://www.superonefoods.com/store-details/'+item.get('url'), 40 ) 41 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/locations/spiders/superonefoods.py b/locations/spiders/superonefoods.py --- a/locations/spiders/superonefoods.py +++ b/locations/spiders/superonefoods.py @@ -15,7 +15,7 @@ def parse(self, response): # retrieve js data variable from script tag - items = response.xpath('//script/text()')[3].re("var stores =(.+?);\n") + items = response.xpath('//script/text()')[4].re("var stores =(.+?);\n") # convert data variable from unicode to string items = [str(x) for x in items]
{"golden_diff": "diff --git a/locations/spiders/superonefoods.py b/locations/spiders/superonefoods.py\n--- a/locations/spiders/superonefoods.py\n+++ b/locations/spiders/superonefoods.py\n@@ -15,7 +15,7 @@\n \n def parse(self, response):\n # retrieve js data variable from script tag\n- items = response.xpath('//script/text()')[3].re(\"var stores =(.+?);\\n\")\n+ items = response.xpath('//script/text()')[4].re(\"var stores =(.+?);\\n\")\n \n # convert data variable from unicode to string\n items = [str(x) for x in items]\n", "issue": "Spider superonefoods is broken\nDuring the global build at 2021-09-22-14-42-27, spider **superonefoods** failed with **0 features** and **1 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-09-22-14-42-27/logs/superonefoods.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-09-22-14-42-27/output/superonefoods.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-09-22-14-42-27/output/superonefoods.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\n\n\nclass SuperonefoodsSpider(scrapy.Spider):\n name = \"superonefoods\"\n item_attributes = { 'brand': \"Super One Foods\" }\n allowed_domains = [\"www.superonefoods.com\"]\n start_urls = (\n 'https://www.superonefoods.com/store-finder',\n )\n\n def parse(self, response):\n # retrieve js data variable from script tag\n items = response.xpath('//script/text()')[3].re(\"var stores =(.+?);\\n\")\n\n # convert data variable from unicode to string\n items = [str(x) for x in items]\n\n # convert type string representation of list to type list\n data = [items[0]]\n\n # load list into json object for parsing\n jsondata = json.loads(data[0])\n\n # loop through json data object and retrieve values; yield the values to GeojsonPointItem\n for item in jsondata:\n yield GeojsonPointItem(\n ref=item.get('_id'),\n lat=float(item.get('latitude')),\n lon=float(item.get('longitude')),\n addr_full=item.get('address'),\n city=item.get('city'),\n state=item.get('state'),\n postcode=item.get('zip'),\n website='https://www.superonefoods.com/store-details/'+item.get('url'),\n )\n", "path": "locations/spiders/superonefoods.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\n\n\nclass SuperonefoodsSpider(scrapy.Spider):\n name = \"superonefoods\"\n item_attributes = { 'brand': \"Super One Foods\" }\n allowed_domains = [\"www.superonefoods.com\"]\n start_urls = (\n 'https://www.superonefoods.com/store-finder',\n )\n\n def parse(self, response):\n # retrieve js data variable from script tag\n items = response.xpath('//script/text()')[4].re(\"var stores =(.+?);\\n\")\n\n # convert data variable from unicode to string\n items = [str(x) for x in items]\n\n # convert type string representation of list to type list\n data = [items[0]]\n\n # load list into json object for parsing\n jsondata = json.loads(data[0])\n\n # loop through json data object and retrieve values; yield the values to GeojsonPointItem\n for item in jsondata:\n yield GeojsonPointItem(\n ref=item.get('_id'),\n lat=float(item.get('latitude')),\n lon=float(item.get('longitude')),\n addr_full=item.get('address'),\n city=item.get('city'),\n state=item.get('state'),\n postcode=item.get('zip'),\n website='https://www.superonefoods.com/store-details/'+item.get('url'),\n )\n", "path": "locations/spiders/superonefoods.py"}]}
825
149
gh_patches_debug_31326
rasdani/github-patches
git_diff
apluslms__a-plus-575
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Compress HTML pages in the cache Exercise and chapter pages are stored in the cache and only update if the backend reports that there is a change. Some pages might be large (e.g. 1M), but do compress relatively well. Memcached API supports compression on the fly, but that is not usable over django API. Thus, we should at least compress HTML content manually. Alternatively, we can specialize `CachedAbstract` for memcached, which would also allow us to use `cas` operation. Relevant files: * `lib/cache/cached.py` * `exercise/cache/exercise.py` (`content` in `_generate_data(...)` and `content()`) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `exercise/cache/exercise.py` Content: ``` 1 import time 2 from django.conf import settings 3 from django.db.models.signals import post_save, post_delete 4 5 from lib.cache import CachedAbstract 6 from lib.remote_page import RemotePageNotModified 7 from ..protocol.aplus import load_exercise_page 8 9 10 class ExerciseCache(CachedAbstract): 11 """ Exercise HTML content """ 12 KEY_PREFIX = "exercise" 13 14 def __init__(self, exercise, language, request, students, url_name): 15 self.exercise = exercise 16 self.load_args = [language, request, students, url_name] 17 super().__init__(exercise, modifiers=[language]) 18 19 def _needs_generation(self, data): 20 expires = data['expires'] if data else None 21 return not expires or time.time() > expires 22 23 def _generate_data(self, exercise, data=None): 24 try: 25 page = exercise.load_page( 26 *self.load_args, 27 last_modified=data['last_modified'] if data else None 28 ) 29 return { 30 'head': page.head, 31 'content': page.content, 32 'last_modified': page.last_modified, 33 'expires': page.expires if page.is_loaded else 0, 34 } 35 except RemotePageNotModified as e: 36 if e.expires: 37 data['expires'] = e.expires 38 return data 39 40 def head(self): 41 return self.data['head'] 42 43 def content(self): 44 return self.data['content'] 45 46 47 def invalidate_instance(instance): 48 for module in instance.course_modules.all(): 49 for exercise in module.learning_objects.all(): 50 for language,_ in settings.LANGUAGES: 51 ExerciseCache.invalidate(exercise, modifiers=[language]) 52 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/exercise/cache/exercise.py b/exercise/cache/exercise.py --- a/exercise/cache/exercise.py +++ b/exercise/cache/exercise.py @@ -1,4 +1,6 @@ +import logging import time + from django.conf import settings from django.db.models.signals import post_save, post_delete @@ -6,6 +8,18 @@ from lib.remote_page import RemotePageNotModified from ..protocol.aplus import load_exercise_page +logger = logging.getLogger('aplus.cached') + +try: + from lz4.block import compress as _compress, decompress + def compress(data): + return _compress(data, compression=1) +except ImportError: + logger.warning("Unable to import lz4, using a slower zlib instead") + from zlib import compress as _compress, decompress + def compress(data): + return _compress(data, level=1) + class ExerciseCache(CachedAbstract): """ Exercise HTML content """ @@ -26,9 +40,12 @@ *self.load_args, last_modified=data['last_modified'] if data else None ) + + content = compress(page.content.encode('utf-8')) + return { 'head': page.head, - 'content': page.content, + 'content': content, 'last_modified': page.last_modified, 'expires': page.expires if page.is_loaded else 0, } @@ -41,7 +58,8 @@ return self.data['head'] def content(self): - return self.data['content'] + content = decompress(self.data['content']).decode('utf-8') + return content def invalidate_instance(instance):
{"golden_diff": "diff --git a/exercise/cache/exercise.py b/exercise/cache/exercise.py\n--- a/exercise/cache/exercise.py\n+++ b/exercise/cache/exercise.py\n@@ -1,4 +1,6 @@\n+import logging\n import time\n+\n from django.conf import settings\n from django.db.models.signals import post_save, post_delete\n \n@@ -6,6 +8,18 @@\n from lib.remote_page import RemotePageNotModified\n from ..protocol.aplus import load_exercise_page\n \n+logger = logging.getLogger('aplus.cached')\n+\n+try:\n+ from lz4.block import compress as _compress, decompress\n+ def compress(data):\n+ return _compress(data, compression=1)\n+except ImportError:\n+ logger.warning(\"Unable to import lz4, using a slower zlib instead\")\n+ from zlib import compress as _compress, decompress\n+ def compress(data):\n+ return _compress(data, level=1)\n+\n \n class ExerciseCache(CachedAbstract):\n \"\"\" Exercise HTML content \"\"\"\n@@ -26,9 +40,12 @@\n *self.load_args,\n last_modified=data['last_modified'] if data else None\n )\n+\n+ content = compress(page.content.encode('utf-8'))\n+\n return {\n 'head': page.head,\n- 'content': page.content,\n+ 'content': content,\n 'last_modified': page.last_modified,\n 'expires': page.expires if page.is_loaded else 0,\n }\n@@ -41,7 +58,8 @@\n return self.data['head']\n \n def content(self):\n- return self.data['content']\n+ content = decompress(self.data['content']).decode('utf-8')\n+ return content\n \n \n def invalidate_instance(instance):\n", "issue": "Compress HTML pages in the cache\nExercise and chapter pages are stored in the cache and only update if the backend reports that there is a change. Some pages might be large (e.g. 1M), but do compress relatively well. Memcached API supports compression on the fly, but that is not usable over django API.\r\n\r\nThus, we should at least compress HTML content manually. Alternatively, we can specialize `CachedAbstract` for memcached, which would also allow us to use `cas` operation.\r\n\r\nRelevant files:\r\n* `lib/cache/cached.py`\r\n* `exercise/cache/exercise.py` (`content` in `_generate_data(...)` and `content()`)\n", "before_files": [{"content": "import time\nfrom django.conf import settings\nfrom django.db.models.signals import post_save, post_delete\n\nfrom lib.cache import CachedAbstract\nfrom lib.remote_page import RemotePageNotModified\nfrom ..protocol.aplus import load_exercise_page\n\n\nclass ExerciseCache(CachedAbstract):\n \"\"\" Exercise HTML content \"\"\"\n KEY_PREFIX = \"exercise\"\n\n def __init__(self, exercise, language, request, students, url_name):\n self.exercise = exercise\n self.load_args = [language, request, students, url_name]\n super().__init__(exercise, modifiers=[language])\n\n def _needs_generation(self, data):\n expires = data['expires'] if data else None\n return not expires or time.time() > expires\n\n def _generate_data(self, exercise, data=None):\n try:\n page = exercise.load_page(\n *self.load_args,\n last_modified=data['last_modified'] if data else None\n )\n return {\n 'head': page.head,\n 'content': page.content,\n 'last_modified': page.last_modified,\n 'expires': page.expires if page.is_loaded else 0,\n }\n except RemotePageNotModified as e:\n if e.expires:\n data['expires'] = e.expires\n return data\n\n def head(self):\n return self.data['head']\n\n def content(self):\n return self.data['content']\n\n\ndef invalidate_instance(instance):\n for module in instance.course_modules.all():\n for exercise in module.learning_objects.all():\n for language,_ in settings.LANGUAGES:\n ExerciseCache.invalidate(exercise, modifiers=[language])\n", "path": "exercise/cache/exercise.py"}], "after_files": [{"content": "import logging\nimport time\n\nfrom django.conf import settings\nfrom django.db.models.signals import post_save, post_delete\n\nfrom lib.cache import CachedAbstract\nfrom lib.remote_page import RemotePageNotModified\nfrom ..protocol.aplus import load_exercise_page\n\nlogger = logging.getLogger('aplus.cached')\n\ntry:\n from lz4.block import compress as _compress, decompress\n def compress(data):\n return _compress(data, compression=1)\nexcept ImportError:\n logger.warning(\"Unable to import lz4, using a slower zlib instead\")\n from zlib import compress as _compress, decompress\n def compress(data):\n return _compress(data, level=1)\n\n\nclass ExerciseCache(CachedAbstract):\n \"\"\" Exercise HTML content \"\"\"\n KEY_PREFIX = \"exercise\"\n\n def __init__(self, exercise, language, request, students, url_name):\n self.exercise = exercise\n self.load_args = [language, request, students, url_name]\n super().__init__(exercise, modifiers=[language])\n\n def _needs_generation(self, data):\n expires = data['expires'] if data else None\n return not expires or time.time() > expires\n\n def _generate_data(self, exercise, data=None):\n try:\n page = exercise.load_page(\n *self.load_args,\n last_modified=data['last_modified'] if data else None\n )\n\n content = compress(page.content.encode('utf-8'))\n\n return {\n 'head': page.head,\n 'content': content,\n 'last_modified': page.last_modified,\n 'expires': page.expires if page.is_loaded else 0,\n }\n except RemotePageNotModified as e:\n if e.expires:\n data['expires'] = e.expires\n return data\n\n def head(self):\n return self.data['head']\n\n def content(self):\n content = decompress(self.data['content']).decode('utf-8')\n return content\n\n\ndef invalidate_instance(instance):\n for module in instance.course_modules.all():\n for exercise in module.learning_objects.all():\n for language,_ in settings.LANGUAGES:\n ExerciseCache.invalidate(exercise, modifiers=[language])\n", "path": "exercise/cache/exercise.py"}]}
834
377
gh_patches_debug_8197
rasdani/github-patches
git_diff
sanic-org__sanic-2438
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Easier websocket interface annotation Right now, to properly annotate a websocket endpoint you need to do this: ```python from sanic.server.websockets.impl import WebsocketImplProtocol from sanic import Request @app.websocket("") async def handler(request: Request, ws: WebsocketImplProtocol): ... ``` That is not easy or intuitive. This would be much nicer: ```python from sanic import Request, Websocket @app.websocket("") async def handler(request: Request, ws: Websocket): ... ``` We should just alias and put it inside `__init__.py` for convenience. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sanic/__init__.py` Content: ``` 1 from sanic.__version__ import __version__ 2 from sanic.app import Sanic 3 from sanic.blueprints import Blueprint 4 from sanic.constants import HTTPMethod 5 from sanic.request import Request 6 from sanic.response import HTTPResponse, html, json, text 7 8 9 __all__ = ( 10 "__version__", 11 "Sanic", 12 "Blueprint", 13 "HTTPMethod", 14 "HTTPResponse", 15 "Request", 16 "html", 17 "json", 18 "text", 19 ) 20 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sanic/__init__.py b/sanic/__init__.py --- a/sanic/__init__.py +++ b/sanic/__init__.py @@ -4,6 +4,7 @@ from sanic.constants import HTTPMethod from sanic.request import Request from sanic.response import HTTPResponse, html, json, text +from sanic.server.websockets.impl import WebsocketImplProtocol as Websocket __all__ = ( @@ -13,6 +14,7 @@ "HTTPMethod", "HTTPResponse", "Request", + "Websocket", "html", "json", "text",
{"golden_diff": "diff --git a/sanic/__init__.py b/sanic/__init__.py\n--- a/sanic/__init__.py\n+++ b/sanic/__init__.py\n@@ -4,6 +4,7 @@\n from sanic.constants import HTTPMethod\n from sanic.request import Request\n from sanic.response import HTTPResponse, html, json, text\n+from sanic.server.websockets.impl import WebsocketImplProtocol as Websocket\n \n \n __all__ = (\n@@ -13,6 +14,7 @@\n \"HTTPMethod\",\n \"HTTPResponse\",\n \"Request\",\n+ \"Websocket\",\n \"html\",\n \"json\",\n \"text\",\n", "issue": "Easier websocket interface annotation\nRight now, to properly annotate a websocket endpoint you need to do this:\r\n\r\n```python\r\nfrom sanic.server.websockets.impl import WebsocketImplProtocol\r\nfrom sanic import Request\r\n\r\[email protected](\"\")\r\nasync def handler(request: Request, ws: WebsocketImplProtocol):\r\n ...\r\n```\r\n\r\nThat is not easy or intuitive.\r\n\r\nThis would be much nicer:\r\n\r\n```python\r\nfrom sanic import Request, Websocket\r\n\r\[email protected](\"\")\r\nasync def handler(request: Request, ws: Websocket):\r\n ...\r\n```\r\n\r\nWe should just alias and put it inside `__init__.py` for convenience.\n", "before_files": [{"content": "from sanic.__version__ import __version__\nfrom sanic.app import Sanic\nfrom sanic.blueprints import Blueprint\nfrom sanic.constants import HTTPMethod\nfrom sanic.request import Request\nfrom sanic.response import HTTPResponse, html, json, text\n\n\n__all__ = (\n \"__version__\",\n \"Sanic\",\n \"Blueprint\",\n \"HTTPMethod\",\n \"HTTPResponse\",\n \"Request\",\n \"html\",\n \"json\",\n \"text\",\n)\n", "path": "sanic/__init__.py"}], "after_files": [{"content": "from sanic.__version__ import __version__\nfrom sanic.app import Sanic\nfrom sanic.blueprints import Blueprint\nfrom sanic.constants import HTTPMethod\nfrom sanic.request import Request\nfrom sanic.response import HTTPResponse, html, json, text\nfrom sanic.server.websockets.impl import WebsocketImplProtocol as Websocket\n\n\n__all__ = (\n \"__version__\",\n \"Sanic\",\n \"Blueprint\",\n \"HTTPMethod\",\n \"HTTPResponse\",\n \"Request\",\n \"Websocket\",\n \"html\",\n \"json\",\n \"text\",\n)\n", "path": "sanic/__init__.py"}]}
521
142
gh_patches_debug_1060
rasdani/github-patches
git_diff
wagtail__wagtail-1791
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Cachebusting query parameter (e.g. _=1441835249458) not ignored by api From the [documentation for jQuery.ajax, under "cache"](http://api.jquery.com/jquery.ajax/): > Setting cache to false will only work correctly with HEAD and GET requests. It works by appending "_={timestamp}" to the GET parameters. The parameter is not needed for other types of requests, except in IE8 when a POST is made to a URL that has already been requested by a GET. It seems like it's standard practice to ignore the underscore keyword. Unless I'm mistaken this is an oversight and not a disagreement on the principle of the thing. Reproduce: make an Ajax call to any wagtail API endpoint with the cache flag set to false. Or just navigate to something like `http://localhost:8000/api/v1/pages/?type=home.HomePage&_=1441835249458` You'll get this message: ``` { "message": "query parameter is not an operation or a recognised field: _" } ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `wagtail/contrib/wagtailapi/endpoints.py` Content: ``` 1 from __future__ import absolute_import 2 3 from collections import OrderedDict 4 5 from django.conf.urls import url 6 from django.http import Http404 7 8 from rest_framework import status 9 from rest_framework.response import Response 10 from rest_framework.viewsets import GenericViewSet 11 12 from wagtail.wagtailcore.models import Page 13 from wagtail.wagtailimages.models import get_image_model 14 from wagtail.wagtaildocs.models import Document 15 from wagtail.wagtailcore.utils import resolve_model_string 16 17 from .filters import ( 18 FieldsFilter, OrderingFilter, SearchFilter, 19 ChildOfFilter, DescendantOfFilter 20 ) 21 from .renderers import WagtailJSONRenderer 22 from .pagination import WagtailPagination 23 from .serializers import BaseSerializer, PageSerializer, DocumentSerializer, ImageSerializer, get_serializer_class 24 from .utils import BadRequestError 25 26 27 class BaseAPIEndpoint(GenericViewSet): 28 renderer_classes = [WagtailJSONRenderer] 29 pagination_class = WagtailPagination 30 base_serializer_class = BaseSerializer 31 filter_classes = [] 32 queryset = None # Set on subclasses or implement `get_queryset()`. 33 34 known_query_parameters = frozenset([ 35 'limit', 36 'offset', 37 'fields', 38 'order', 39 'search', 40 ]) 41 extra_api_fields = [] 42 name = None # Set on subclass. 43 44 def listing_view(self, request): 45 queryset = self.get_queryset() 46 self.check_query_parameters(queryset) 47 queryset = self.filter_queryset(queryset) 48 queryset = self.paginate_queryset(queryset) 49 serializer = self.get_serializer(queryset, many=True) 50 return self.get_paginated_response(serializer.data) 51 52 def detail_view(self, request, pk): 53 instance = self.get_object() 54 serializer = self.get_serializer(instance) 55 return Response(serializer.data) 56 57 def handle_exception(self, exc): 58 if isinstance(exc, Http404): 59 data = {'message': str(exc)} 60 return Response(data, status=status.HTTP_404_NOT_FOUND) 61 elif isinstance(exc, BadRequestError): 62 data = {'message': str(exc)} 63 return Response(data, status=status.HTTP_400_BAD_REQUEST) 64 return super(BaseAPIEndpoint, self).handle_exception(exc) 65 66 def get_api_fields(self, model): 67 """ 68 This returns a list of field names that are allowed to 69 be used in the API (excluding the id field). 70 """ 71 api_fields = self.extra_api_fields[:] 72 73 if hasattr(model, 'api_fields'): 74 api_fields.extend(model.api_fields) 75 76 return api_fields 77 78 def check_query_parameters(self, queryset): 79 """ 80 Ensure that only valid query paramters are included in the URL. 81 """ 82 query_parameters = set(self.request.GET.keys()) 83 84 # All query paramters must be either a field or an operation 85 allowed_query_parameters = set(self.get_api_fields(queryset.model)).union(self.known_query_parameters).union({'id'}) 86 unknown_parameters = query_parameters - allowed_query_parameters 87 if unknown_parameters: 88 raise BadRequestError("query parameter is not an operation or a recognised field: %s" % ', '.join(sorted(unknown_parameters))) 89 90 def get_serializer_class(self): 91 request = self.request 92 93 # Get model 94 if self.action == 'listing_view': 95 model = self.get_queryset().model 96 else: 97 model = type(self.get_object()) 98 99 # Get all available fields 100 all_fields = self.get_api_fields(model) 101 all_fields = list(OrderedDict.fromkeys(all_fields)) # Removes any duplicates in case the developer put "title" in api_fields 102 103 if self.action == 'listing_view': 104 # Listing views just show the title field and any other allowed field the user specified 105 if 'fields' in request.GET: 106 fields = set(request.GET['fields'].split(',')) 107 else: 108 fields = {'title'} 109 110 unknown_fields = fields - set(all_fields) 111 112 if unknown_fields: 113 raise BadRequestError("unknown fields: %s" % ', '.join(sorted(unknown_fields))) 114 115 # Reorder fields so it matches the order of all_fields 116 fields = [field for field in all_fields if field in fields] 117 else: 118 # Detail views show all fields all the time 119 fields = all_fields 120 121 # Always show id and meta first 122 fields = ['id', 'meta'] + fields 123 124 # If showing details, add the parent field 125 if isinstance(self, PagesAPIEndpoint) and self.get_serializer_context().get('show_details', False): 126 fields.insert(2, 'parent') 127 128 return get_serializer_class(model, fields, base=self.base_serializer_class) 129 130 def get_serializer_context(self): 131 """ 132 The serialization context differs between listing and detail views. 133 """ 134 request = self.request 135 136 if self.action == 'listing_view': 137 return { 138 'request': request, 139 'view': self, 140 } 141 142 return { 143 'request': request, 144 'view': self, 145 'show_details': True 146 } 147 148 def get_renderer_context(self): 149 context = super(BaseAPIEndpoint, self).get_renderer_context() 150 context['endpoints'] = [ 151 PagesAPIEndpoint, 152 ImagesAPIEndpoint, 153 DocumentsAPIEndpoint 154 ] 155 return context 156 157 @classmethod 158 def get_urlpatterns(cls): 159 """ 160 This returns a list of URL patterns for the endpoint 161 """ 162 return [ 163 url(r'^$', cls.as_view({'get': 'listing_view'}), name='listing'), 164 url(r'^(?P<pk>\d+)/$', cls.as_view({'get': 'detail_view'}), name='detail'), 165 ] 166 167 @classmethod 168 def has_model(cls, model): 169 return NotImplemented 170 171 172 class PagesAPIEndpoint(BaseAPIEndpoint): 173 base_serializer_class = PageSerializer 174 filter_backends = [ 175 FieldsFilter, 176 ChildOfFilter, 177 DescendantOfFilter, 178 OrderingFilter, 179 SearchFilter 180 ] 181 known_query_parameters = BaseAPIEndpoint.known_query_parameters.union([ 182 'type', 183 'child_of', 184 'descendant_of', 185 ]) 186 extra_api_fields = ['title'] 187 name = 'pages' 188 189 def get_queryset(self): 190 request = self.request 191 192 # Allow pages to be filtered to a specific type 193 if 'type' not in request.GET: 194 model = Page 195 else: 196 model_name = request.GET['type'] 197 try: 198 model = resolve_model_string(model_name) 199 except LookupError: 200 raise BadRequestError("type doesn't exist") 201 if not issubclass(model, Page): 202 raise BadRequestError("type doesn't exist") 203 204 # Get live pages that are not in a private section 205 queryset = model.objects.public().live() 206 207 # Filter by site 208 queryset = queryset.descendant_of(request.site.root_page, inclusive=True) 209 210 return queryset 211 212 def get_object(self): 213 base = super(PagesAPIEndpoint, self).get_object() 214 return base.specific 215 216 @classmethod 217 def has_model(cls, model): 218 return issubclass(model, Page) 219 220 221 class ImagesAPIEndpoint(BaseAPIEndpoint): 222 queryset = get_image_model().objects.all().order_by('id') 223 base_serializer_class = ImageSerializer 224 filter_backends = [FieldsFilter, OrderingFilter, SearchFilter] 225 extra_api_fields = ['title', 'tags', 'width', 'height'] 226 name = 'images' 227 228 @classmethod 229 def has_model(cls, model): 230 return model == get_image_model() 231 232 233 class DocumentsAPIEndpoint(BaseAPIEndpoint): 234 queryset = Document.objects.all().order_by('id') 235 base_serializer_class = DocumentSerializer 236 filter_backends = [FieldsFilter, OrderingFilter, SearchFilter] 237 extra_api_fields = ['title', 'tags'] 238 name = 'documents' 239 240 @classmethod 241 def has_model(cls, model): 242 return model == Document 243 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/wagtail/contrib/wagtailapi/endpoints.py b/wagtail/contrib/wagtailapi/endpoints.py --- a/wagtail/contrib/wagtailapi/endpoints.py +++ b/wagtail/contrib/wagtailapi/endpoints.py @@ -37,6 +37,9 @@ 'fields', 'order', 'search', + + # Used by jQuery for cache-busting. See #1671 + '_', ]) extra_api_fields = [] name = None # Set on subclass.
{"golden_diff": "diff --git a/wagtail/contrib/wagtailapi/endpoints.py b/wagtail/contrib/wagtailapi/endpoints.py\n--- a/wagtail/contrib/wagtailapi/endpoints.py\n+++ b/wagtail/contrib/wagtailapi/endpoints.py\n@@ -37,6 +37,9 @@\n 'fields',\n 'order',\n 'search',\n+\n+ # Used by jQuery for cache-busting. See #1671\n+ '_',\n ])\n extra_api_fields = []\n name = None # Set on subclass.\n", "issue": "Cachebusting query parameter (e.g. _=1441835249458) not ignored by api\nFrom the [documentation for jQuery.ajax, under \"cache\"](http://api.jquery.com/jquery.ajax/):\n\n> Setting cache to false will only work correctly with HEAD and GET requests. It works by appending \"_={timestamp}\" to the GET parameters. The parameter is not needed for other types of requests, except in IE8 when a POST is made to a URL that has already been requested by a GET.\n\nIt seems like it's standard practice to ignore the underscore keyword. Unless I'm mistaken this is an oversight and not a disagreement on the principle of the thing.\n\nReproduce: make an Ajax call to any wagtail API endpoint with the cache flag set to false. Or just navigate to something like `http://localhost:8000/api/v1/pages/?type=home.HomePage&_=1441835249458`\n\nYou'll get this message: \n\n```\n{\n \"message\": \"query parameter is not an operation or a recognised field: _\"\n}\n```\n\n", "before_files": [{"content": "from __future__ import absolute_import\n\nfrom collections import OrderedDict\n\nfrom django.conf.urls import url\nfrom django.http import Http404\n\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import GenericViewSet\n\nfrom wagtail.wagtailcore.models import Page\nfrom wagtail.wagtailimages.models import get_image_model\nfrom wagtail.wagtaildocs.models import Document\nfrom wagtail.wagtailcore.utils import resolve_model_string\n\nfrom .filters import (\n FieldsFilter, OrderingFilter, SearchFilter,\n ChildOfFilter, DescendantOfFilter\n)\nfrom .renderers import WagtailJSONRenderer\nfrom .pagination import WagtailPagination\nfrom .serializers import BaseSerializer, PageSerializer, DocumentSerializer, ImageSerializer, get_serializer_class\nfrom .utils import BadRequestError\n\n\nclass BaseAPIEndpoint(GenericViewSet):\n renderer_classes = [WagtailJSONRenderer]\n pagination_class = WagtailPagination\n base_serializer_class = BaseSerializer\n filter_classes = []\n queryset = None # Set on subclasses or implement `get_queryset()`.\n\n known_query_parameters = frozenset([\n 'limit',\n 'offset',\n 'fields',\n 'order',\n 'search',\n ])\n extra_api_fields = []\n name = None # Set on subclass.\n\n def listing_view(self, request):\n queryset = self.get_queryset()\n self.check_query_parameters(queryset)\n queryset = self.filter_queryset(queryset)\n queryset = self.paginate_queryset(queryset)\n serializer = self.get_serializer(queryset, many=True)\n return self.get_paginated_response(serializer.data)\n\n def detail_view(self, request, pk):\n instance = self.get_object()\n serializer = self.get_serializer(instance)\n return Response(serializer.data)\n\n def handle_exception(self, exc):\n if isinstance(exc, Http404):\n data = {'message': str(exc)}\n return Response(data, status=status.HTTP_404_NOT_FOUND)\n elif isinstance(exc, BadRequestError):\n data = {'message': str(exc)}\n return Response(data, status=status.HTTP_400_BAD_REQUEST)\n return super(BaseAPIEndpoint, self).handle_exception(exc)\n\n def get_api_fields(self, model):\n \"\"\"\n This returns a list of field names that are allowed to\n be used in the API (excluding the id field).\n \"\"\"\n api_fields = self.extra_api_fields[:]\n\n if hasattr(model, 'api_fields'):\n api_fields.extend(model.api_fields)\n\n return api_fields\n\n def check_query_parameters(self, queryset):\n \"\"\"\n Ensure that only valid query paramters are included in the URL.\n \"\"\"\n query_parameters = set(self.request.GET.keys())\n\n # All query paramters must be either a field or an operation\n allowed_query_parameters = set(self.get_api_fields(queryset.model)).union(self.known_query_parameters).union({'id'})\n unknown_parameters = query_parameters - allowed_query_parameters\n if unknown_parameters:\n raise BadRequestError(\"query parameter is not an operation or a recognised field: %s\" % ', '.join(sorted(unknown_parameters)))\n\n def get_serializer_class(self):\n request = self.request\n\n # Get model\n if self.action == 'listing_view':\n model = self.get_queryset().model\n else:\n model = type(self.get_object())\n\n # Get all available fields\n all_fields = self.get_api_fields(model)\n all_fields = list(OrderedDict.fromkeys(all_fields)) # Removes any duplicates in case the developer put \"title\" in api_fields\n\n if self.action == 'listing_view':\n # Listing views just show the title field and any other allowed field the user specified\n if 'fields' in request.GET:\n fields = set(request.GET['fields'].split(','))\n else:\n fields = {'title'}\n\n unknown_fields = fields - set(all_fields)\n\n if unknown_fields:\n raise BadRequestError(\"unknown fields: %s\" % ', '.join(sorted(unknown_fields)))\n\n # Reorder fields so it matches the order of all_fields\n fields = [field for field in all_fields if field in fields]\n else:\n # Detail views show all fields all the time\n fields = all_fields\n\n # Always show id and meta first\n fields = ['id', 'meta'] + fields\n\n # If showing details, add the parent field\n if isinstance(self, PagesAPIEndpoint) and self.get_serializer_context().get('show_details', False):\n fields.insert(2, 'parent')\n\n return get_serializer_class(model, fields, base=self.base_serializer_class)\n\n def get_serializer_context(self):\n \"\"\"\n The serialization context differs between listing and detail views.\n \"\"\"\n request = self.request\n\n if self.action == 'listing_view':\n return {\n 'request': request,\n 'view': self,\n }\n\n return {\n 'request': request,\n 'view': self,\n 'show_details': True\n }\n\n def get_renderer_context(self):\n context = super(BaseAPIEndpoint, self).get_renderer_context()\n context['endpoints'] = [\n PagesAPIEndpoint,\n ImagesAPIEndpoint,\n DocumentsAPIEndpoint\n ]\n return context\n\n @classmethod\n def get_urlpatterns(cls):\n \"\"\"\n This returns a list of URL patterns for the endpoint\n \"\"\"\n return [\n url(r'^$', cls.as_view({'get': 'listing_view'}), name='listing'),\n url(r'^(?P<pk>\\d+)/$', cls.as_view({'get': 'detail_view'}), name='detail'),\n ]\n\n @classmethod\n def has_model(cls, model):\n return NotImplemented\n\n\nclass PagesAPIEndpoint(BaseAPIEndpoint):\n base_serializer_class = PageSerializer\n filter_backends = [\n FieldsFilter,\n ChildOfFilter,\n DescendantOfFilter,\n OrderingFilter,\n SearchFilter\n ]\n known_query_parameters = BaseAPIEndpoint.known_query_parameters.union([\n 'type',\n 'child_of',\n 'descendant_of',\n ])\n extra_api_fields = ['title']\n name = 'pages'\n\n def get_queryset(self):\n request = self.request\n\n # Allow pages to be filtered to a specific type\n if 'type' not in request.GET:\n model = Page\n else:\n model_name = request.GET['type']\n try:\n model = resolve_model_string(model_name)\n except LookupError:\n raise BadRequestError(\"type doesn't exist\")\n if not issubclass(model, Page):\n raise BadRequestError(\"type doesn't exist\")\n\n # Get live pages that are not in a private section\n queryset = model.objects.public().live()\n\n # Filter by site\n queryset = queryset.descendant_of(request.site.root_page, inclusive=True)\n\n return queryset\n\n def get_object(self):\n base = super(PagesAPIEndpoint, self).get_object()\n return base.specific\n\n @classmethod\n def has_model(cls, model):\n return issubclass(model, Page)\n\n\nclass ImagesAPIEndpoint(BaseAPIEndpoint):\n queryset = get_image_model().objects.all().order_by('id')\n base_serializer_class = ImageSerializer\n filter_backends = [FieldsFilter, OrderingFilter, SearchFilter]\n extra_api_fields = ['title', 'tags', 'width', 'height']\n name = 'images'\n\n @classmethod\n def has_model(cls, model):\n return model == get_image_model()\n\n\nclass DocumentsAPIEndpoint(BaseAPIEndpoint):\n queryset = Document.objects.all().order_by('id')\n base_serializer_class = DocumentSerializer\n filter_backends = [FieldsFilter, OrderingFilter, SearchFilter]\n extra_api_fields = ['title', 'tags']\n name = 'documents'\n\n @classmethod\n def has_model(cls, model):\n return model == Document\n", "path": "wagtail/contrib/wagtailapi/endpoints.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nfrom collections import OrderedDict\n\nfrom django.conf.urls import url\nfrom django.http import Http404\n\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import GenericViewSet\n\nfrom wagtail.wagtailcore.models import Page\nfrom wagtail.wagtailimages.models import get_image_model\nfrom wagtail.wagtaildocs.models import Document\nfrom wagtail.wagtailcore.utils import resolve_model_string\n\nfrom .filters import (\n FieldsFilter, OrderingFilter, SearchFilter,\n ChildOfFilter, DescendantOfFilter\n)\nfrom .renderers import WagtailJSONRenderer\nfrom .pagination import WagtailPagination\nfrom .serializers import BaseSerializer, PageSerializer, DocumentSerializer, ImageSerializer, get_serializer_class\nfrom .utils import BadRequestError\n\n\nclass BaseAPIEndpoint(GenericViewSet):\n renderer_classes = [WagtailJSONRenderer]\n pagination_class = WagtailPagination\n base_serializer_class = BaseSerializer\n filter_classes = []\n queryset = None # Set on subclasses or implement `get_queryset()`.\n\n known_query_parameters = frozenset([\n 'limit',\n 'offset',\n 'fields',\n 'order',\n 'search',\n\n # Used by jQuery for cache-busting. See #1671\n '_',\n ])\n extra_api_fields = []\n name = None # Set on subclass.\n\n def listing_view(self, request):\n queryset = self.get_queryset()\n self.check_query_parameters(queryset)\n queryset = self.filter_queryset(queryset)\n queryset = self.paginate_queryset(queryset)\n serializer = self.get_serializer(queryset, many=True)\n return self.get_paginated_response(serializer.data)\n\n def detail_view(self, request, pk):\n instance = self.get_object()\n serializer = self.get_serializer(instance)\n return Response(serializer.data)\n\n def handle_exception(self, exc):\n if isinstance(exc, Http404):\n data = {'message': str(exc)}\n return Response(data, status=status.HTTP_404_NOT_FOUND)\n elif isinstance(exc, BadRequestError):\n data = {'message': str(exc)}\n return Response(data, status=status.HTTP_400_BAD_REQUEST)\n return super(BaseAPIEndpoint, self).handle_exception(exc)\n\n def get_api_fields(self, model):\n \"\"\"\n This returns a list of field names that are allowed to\n be used in the API (excluding the id field).\n \"\"\"\n api_fields = self.extra_api_fields[:]\n\n if hasattr(model, 'api_fields'):\n api_fields.extend(model.api_fields)\n\n return api_fields\n\n def check_query_parameters(self, queryset):\n \"\"\"\n Ensure that only valid query paramters are included in the URL.\n \"\"\"\n query_parameters = set(self.request.GET.keys())\n\n # All query paramters must be either a field or an operation\n allowed_query_parameters = set(self.get_api_fields(queryset.model)).union(self.known_query_parameters).union({'id'})\n unknown_parameters = query_parameters - allowed_query_parameters\n if unknown_parameters:\n raise BadRequestError(\"query parameter is not an operation or a recognised field: %s\" % ', '.join(sorted(unknown_parameters)))\n\n def get_serializer_class(self):\n request = self.request\n\n # Get model\n if self.action == 'listing_view':\n model = self.get_queryset().model\n else:\n model = type(self.get_object())\n\n # Get all available fields\n all_fields = self.get_api_fields(model)\n all_fields = list(OrderedDict.fromkeys(all_fields)) # Removes any duplicates in case the developer put \"title\" in api_fields\n\n if self.action == 'listing_view':\n # Listing views just show the title field and any other allowed field the user specified\n if 'fields' in request.GET:\n fields = set(request.GET['fields'].split(','))\n else:\n fields = {'title'}\n\n unknown_fields = fields - set(all_fields)\n\n if unknown_fields:\n raise BadRequestError(\"unknown fields: %s\" % ', '.join(sorted(unknown_fields)))\n\n # Reorder fields so it matches the order of all_fields\n fields = [field for field in all_fields if field in fields]\n else:\n # Detail views show all fields all the time\n fields = all_fields\n\n # Always show id and meta first\n fields = ['id', 'meta'] + fields\n\n # If showing details, add the parent field\n if isinstance(self, PagesAPIEndpoint) and self.get_serializer_context().get('show_details', False):\n fields.insert(2, 'parent')\n\n return get_serializer_class(model, fields, base=self.base_serializer_class)\n\n def get_serializer_context(self):\n \"\"\"\n The serialization context differs between listing and detail views.\n \"\"\"\n request = self.request\n\n if self.action == 'listing_view':\n return {\n 'request': request,\n 'view': self,\n }\n\n return {\n 'request': request,\n 'view': self,\n 'show_details': True\n }\n\n def get_renderer_context(self):\n context = super(BaseAPIEndpoint, self).get_renderer_context()\n context['endpoints'] = [\n PagesAPIEndpoint,\n ImagesAPIEndpoint,\n DocumentsAPIEndpoint\n ]\n return context\n\n @classmethod\n def get_urlpatterns(cls):\n \"\"\"\n This returns a list of URL patterns for the endpoint\n \"\"\"\n return [\n url(r'^$', cls.as_view({'get': 'listing_view'}), name='listing'),\n url(r'^(?P<pk>\\d+)/$', cls.as_view({'get': 'detail_view'}), name='detail'),\n ]\n\n @classmethod\n def has_model(cls, model):\n return NotImplemented\n\n\nclass PagesAPIEndpoint(BaseAPIEndpoint):\n base_serializer_class = PageSerializer\n filter_backends = [\n FieldsFilter,\n ChildOfFilter,\n DescendantOfFilter,\n OrderingFilter,\n SearchFilter\n ]\n known_query_parameters = BaseAPIEndpoint.known_query_parameters.union([\n 'type',\n 'child_of',\n 'descendant_of',\n ])\n extra_api_fields = ['title']\n name = 'pages'\n\n def get_queryset(self):\n request = self.request\n\n # Allow pages to be filtered to a specific type\n if 'type' not in request.GET:\n model = Page\n else:\n model_name = request.GET['type']\n try:\n model = resolve_model_string(model_name)\n except LookupError:\n raise BadRequestError(\"type doesn't exist\")\n if not issubclass(model, Page):\n raise BadRequestError(\"type doesn't exist\")\n\n # Get live pages that are not in a private section\n queryset = model.objects.public().live()\n\n # Filter by site\n queryset = queryset.descendant_of(request.site.root_page, inclusive=True)\n\n return queryset\n\n def get_object(self):\n base = super(PagesAPIEndpoint, self).get_object()\n return base.specific\n\n @classmethod\n def has_model(cls, model):\n return issubclass(model, Page)\n\n\nclass ImagesAPIEndpoint(BaseAPIEndpoint):\n queryset = get_image_model().objects.all().order_by('id')\n base_serializer_class = ImageSerializer\n filter_backends = [FieldsFilter, OrderingFilter, SearchFilter]\n extra_api_fields = ['title', 'tags', 'width', 'height']\n name = 'images'\n\n @classmethod\n def has_model(cls, model):\n return model == get_image_model()\n\n\nclass DocumentsAPIEndpoint(BaseAPIEndpoint):\n queryset = Document.objects.all().order_by('id')\n base_serializer_class = DocumentSerializer\n filter_backends = [FieldsFilter, OrderingFilter, SearchFilter]\n extra_api_fields = ['title', 'tags']\n name = 'documents'\n\n @classmethod\n def has_model(cls, model):\n return model == Document\n", "path": "wagtail/contrib/wagtailapi/endpoints.py"}]}
2,800
125
gh_patches_debug_28345
rasdani/github-patches
git_diff
webkom__lego-1292
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Make contact form send to mail list for HS, instead of individual members The form currently fetches all members of the HS group, and sends the message to each individual address. This makes it cumbersome to discuss the message for HS, it would be better to send it to their mailing list instead. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lego/apps/users/fixtures/initial_abakus_groups.py` Content: ``` 1 from lego.apps.users.constants import GROUP_COMMITTEE, GROUP_GRADE 2 from lego.apps.users.models import AbakusGroup 3 from lego.utils.functions import insert_abakus_groups 4 5 # isort:skip 6 """ 7 The structure of the tree is key and a list of two dicts. 8 The first dict is the parameters of the current group 9 and the second dict are the children of the current group. 10 11 E.g. Abakus: [ 12 { 13 description: 'ABAKUSGRUPPE', 14 permissions: ['/sudo/...'] 15 ... 16 }, 17 { 18 'Webkom': [{ 19 description: 'WEBKOMGRUPPE', 20 permissions: ['/sudo/'] 21 ... 22 }, {}] 23 } 24 ] 25 """ 26 27 initial_tree = { 28 'Users': [{ 29 'description': 'Brukere på Abakus.no' 30 }, {}], 31 'Abakus': [ 32 { 33 'description': 34 'Medlemmer av Abakus', 35 'permissions': [ 36 '/sudo/admin/meetings/create', '/sudo/admin/meetinginvitations/create', 37 '/sudo/admin/registrations/create/', '/sudo/admin/events/payment/', 38 '/sudo/admin/comments/create' 39 ] 40 }, 41 { 42 'Abakom': [ 43 { 44 'description': 45 'Medlemmer av Abakom', 46 'permissions': [ 47 '/sudo/admin/events/', 48 '/sudo/admin/pools/', 49 '/sudo/admin/registrations/', 50 '/sudo/admin/companies/', 51 '/sudo/admin/joblistings/', 52 ] 53 }, 54 { 55 'Arrkom': [{ 56 'type': GROUP_COMMITTEE, 57 'logo_id': 'abakus_arrkom.png' 58 }, {}], 59 'backup': [{ 60 'type': GROUP_COMMITTEE, 61 'logo_id': 'abakus_backup.png' 62 }, {}], 63 'Bedkom': [ 64 { 65 'type': 66 GROUP_COMMITTEE, 67 'logo_id': 68 'abakus_bedkom.png', 69 'permissions': [ 70 '/sudo/admin/companyinterest/', '/sudo/admin/surveys/', 71 '/sudo/admin/submissions/' 72 ] 73 }, {} 74 ], 75 'Fagkom': [ 76 { 77 'type': 78 GROUP_COMMITTEE, 79 'logo_id': 80 'abakus_fagkom.png', 81 'permissions': [ 82 '/sudo/admin/companyinterest/', '/sudo/admin/surveys/', 83 '/sudo/admin/submissions/' 84 ] 85 }, {} 86 ], 87 'Koskom': [{ 88 'type': GROUP_COMMITTEE, 89 'logo_id': 'abakus_koskom.png' 90 }, {}], 91 'LaBamba': [{ 92 'type': GROUP_COMMITTEE, 93 'logo_id': 'abakus_labamba.png' 94 }, {}], 95 'PR': [{ 96 'type': GROUP_COMMITTEE, 97 'logo_id': 'abakus_pr.png' 98 }, {}], 99 'readme': [{ 100 'type': GROUP_COMMITTEE, 101 'logo_id': 'abakus_readme.png' 102 }, {}], 103 'Webkom': [ 104 { 105 'type': GROUP_COMMITTEE, 106 'logo_id': 'abakus_webkom.png', 107 'permissions': ['/sudo/'], 108 'text': 'hei' 109 }, {} 110 ], 111 'Hovedstyret': 112 [{ 113 'logo_id': 'abakus_hs.png', 114 'permissions': ['/sudo/admin/'], 115 }, {}] 116 } 117 ], 118 'Interessegrupper': 119 [{ 120 'description': 'Super-gruppe for alle interessegrupper i Abakus' 121 }, {}] 122 } 123 ], 124 'Students': [ 125 {}, 126 { 127 'Datateknologi': [ 128 {}, 129 { 130 '1. klasse Datateknologi': [{ 131 'type': GROUP_GRADE 132 }, {}], 133 '2. klasse Datateknologi': [{ 134 'type': GROUP_GRADE 135 }, {}], 136 '3. klasse Datateknologi': [{ 137 'type': GROUP_GRADE 138 }, {}], 139 '4. klasse Datateknologi': [{ 140 'type': GROUP_GRADE 141 }, {}], 142 '5. klasse Datateknologi': [{ 143 'type': GROUP_GRADE 144 }, {}], 145 } 146 ], 147 'Kommunikasjonsteknologi': [ 148 {}, 149 { 150 '1. klasse Kommunikasjonsteknologi': [{ 151 'type': GROUP_GRADE 152 }, {}], 153 '2. klasse Kommunikasjonsteknologi': [{ 154 'type': GROUP_GRADE 155 }, {}], 156 '3. klasse Kommunikasjonsteknologi': [{ 157 'type': GROUP_GRADE 158 }, {}], 159 '4. klasse Kommunikasjonsteknologi': [{ 160 'type': GROUP_GRADE 161 }, {}], 162 '5. klasse Kommunikasjonsteknologi': [{ 163 'type': GROUP_GRADE 164 }, {}], 165 } 166 ] 167 } 168 ] 169 } 170 171 172 def load_abakus_groups(): 173 insert_abakus_groups(initial_tree) 174 AbakusGroup.objects.rebuild() 175 ``` Path: `lego/apps/contact/send.py` Content: ``` 1 from lego.apps.users.models import AbakusGroup 2 from lego.utils.tasks import send_email 3 4 5 def send_message(title, message, user, anonymous): 6 """ 7 Send a message to HS when users posts to the contact form. 8 Don't catch AbakusGroup.DoesNotExist, this notifies us when the group doesn't exist. 9 """ 10 anonymous = anonymous if user.is_authenticated else True 11 abakus_group = AbakusGroup.objects.get(name='Hovedstyret') 12 users = [membership.user for membership in abakus_group.memberships.select_related('user')] 13 emails = [user.email_address for user in users] 14 15 from_name = 'Anonymous' if anonymous else user.full_name 16 from_email = 'Unknown' if anonymous else user.email_address 17 18 send_email.delay( 19 to_email=emails, context={ 20 'title': title, 21 'message': message, 22 'from_name': from_name, 23 'from_email': from_email 24 }, subject='Ny henvendelse fra kontaktskjemaet', 25 plain_template='contact/email/contact_form.txt', 26 html_template='contact/email/contact_form.html', from_email=None 27 ) 28 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lego/apps/contact/send.py b/lego/apps/contact/send.py --- a/lego/apps/contact/send.py +++ b/lego/apps/contact/send.py @@ -9,14 +9,12 @@ """ anonymous = anonymous if user.is_authenticated else True abakus_group = AbakusGroup.objects.get(name='Hovedstyret') - users = [membership.user for membership in abakus_group.memberships.select_related('user')] - emails = [user.email_address for user in users] from_name = 'Anonymous' if anonymous else user.full_name from_email = 'Unknown' if anonymous else user.email_address send_email.delay( - to_email=emails, context={ + to_email=abakus_group.contact_email, context={ 'title': title, 'message': message, 'from_name': from_name, diff --git a/lego/apps/users/fixtures/initial_abakus_groups.py b/lego/apps/users/fixtures/initial_abakus_groups.py --- a/lego/apps/users/fixtures/initial_abakus_groups.py +++ b/lego/apps/users/fixtures/initial_abakus_groups.py @@ -108,11 +108,13 @@ 'text': 'hei' }, {} ], - 'Hovedstyret': - [{ - 'logo_id': 'abakus_hs.png', - 'permissions': ['/sudo/admin/'], - }, {}] + 'Hovedstyret': [ + { + 'logo_id': 'abakus_hs.png', + 'permissions': ['/sudo/admin/'], + 'contact_email': "[email protected]" + }, {} + ] } ], 'Interessegrupper':
{"golden_diff": "diff --git a/lego/apps/contact/send.py b/lego/apps/contact/send.py\n--- a/lego/apps/contact/send.py\n+++ b/lego/apps/contact/send.py\n@@ -9,14 +9,12 @@\n \"\"\"\n anonymous = anonymous if user.is_authenticated else True\n abakus_group = AbakusGroup.objects.get(name='Hovedstyret')\n- users = [membership.user for membership in abakus_group.memberships.select_related('user')]\n- emails = [user.email_address for user in users]\n \n from_name = 'Anonymous' if anonymous else user.full_name\n from_email = 'Unknown' if anonymous else user.email_address\n \n send_email.delay(\n- to_email=emails, context={\n+ to_email=abakus_group.contact_email, context={\n 'title': title,\n 'message': message,\n 'from_name': from_name,\ndiff --git a/lego/apps/users/fixtures/initial_abakus_groups.py b/lego/apps/users/fixtures/initial_abakus_groups.py\n--- a/lego/apps/users/fixtures/initial_abakus_groups.py\n+++ b/lego/apps/users/fixtures/initial_abakus_groups.py\n@@ -108,11 +108,13 @@\n 'text': 'hei'\n }, {}\n ],\n- 'Hovedstyret':\n- [{\n- 'logo_id': 'abakus_hs.png',\n- 'permissions': ['/sudo/admin/'],\n- }, {}]\n+ 'Hovedstyret': [\n+ {\n+ 'logo_id': 'abakus_hs.png',\n+ 'permissions': ['/sudo/admin/'],\n+ 'contact_email': \"[email protected]\"\n+ }, {}\n+ ]\n }\n ],\n 'Interessegrupper':\n", "issue": "Make contact form send to mail list for HS, instead of individual members\nThe form currently fetches all members of the HS group, and sends the message to each individual address. This makes it cumbersome to discuss the message for HS, it would be better to send it to their mailing list instead.\n", "before_files": [{"content": "from lego.apps.users.constants import GROUP_COMMITTEE, GROUP_GRADE\nfrom lego.apps.users.models import AbakusGroup\nfrom lego.utils.functions import insert_abakus_groups\n\n# isort:skip\n\"\"\"\nThe structure of the tree is key and a list of two dicts.\nThe first dict is the parameters of the current group\nand the second dict are the children of the current group.\n\nE.g. Abakus: [\n {\n description: 'ABAKUSGRUPPE',\n permissions: ['/sudo/...']\n ...\n },\n {\n 'Webkom': [{\n description: 'WEBKOMGRUPPE',\n permissions: ['/sudo/']\n ...\n }, {}]\n }\n]\n\"\"\"\n\ninitial_tree = {\n 'Users': [{\n 'description': 'Brukere p\u00e5 Abakus.no'\n }, {}],\n 'Abakus': [\n {\n 'description':\n 'Medlemmer av Abakus',\n 'permissions': [\n '/sudo/admin/meetings/create', '/sudo/admin/meetinginvitations/create',\n '/sudo/admin/registrations/create/', '/sudo/admin/events/payment/',\n '/sudo/admin/comments/create'\n ]\n },\n {\n 'Abakom': [\n {\n 'description':\n 'Medlemmer av Abakom',\n 'permissions': [\n '/sudo/admin/events/',\n '/sudo/admin/pools/',\n '/sudo/admin/registrations/',\n '/sudo/admin/companies/',\n '/sudo/admin/joblistings/',\n ]\n },\n {\n 'Arrkom': [{\n 'type': GROUP_COMMITTEE,\n 'logo_id': 'abakus_arrkom.png'\n }, {}],\n 'backup': [{\n 'type': GROUP_COMMITTEE,\n 'logo_id': 'abakus_backup.png'\n }, {}],\n 'Bedkom': [\n {\n 'type':\n GROUP_COMMITTEE,\n 'logo_id':\n 'abakus_bedkom.png',\n 'permissions': [\n '/sudo/admin/companyinterest/', '/sudo/admin/surveys/',\n '/sudo/admin/submissions/'\n ]\n }, {}\n ],\n 'Fagkom': [\n {\n 'type':\n GROUP_COMMITTEE,\n 'logo_id':\n 'abakus_fagkom.png',\n 'permissions': [\n '/sudo/admin/companyinterest/', '/sudo/admin/surveys/',\n '/sudo/admin/submissions/'\n ]\n }, {}\n ],\n 'Koskom': [{\n 'type': GROUP_COMMITTEE,\n 'logo_id': 'abakus_koskom.png'\n }, {}],\n 'LaBamba': [{\n 'type': GROUP_COMMITTEE,\n 'logo_id': 'abakus_labamba.png'\n }, {}],\n 'PR': [{\n 'type': GROUP_COMMITTEE,\n 'logo_id': 'abakus_pr.png'\n }, {}],\n 'readme': [{\n 'type': GROUP_COMMITTEE,\n 'logo_id': 'abakus_readme.png'\n }, {}],\n 'Webkom': [\n {\n 'type': GROUP_COMMITTEE,\n 'logo_id': 'abakus_webkom.png',\n 'permissions': ['/sudo/'],\n 'text': 'hei'\n }, {}\n ],\n 'Hovedstyret':\n [{\n 'logo_id': 'abakus_hs.png',\n 'permissions': ['/sudo/admin/'],\n }, {}]\n }\n ],\n 'Interessegrupper':\n [{\n 'description': 'Super-gruppe for alle interessegrupper i Abakus'\n }, {}]\n }\n ],\n 'Students': [\n {},\n {\n 'Datateknologi': [\n {},\n {\n '1. klasse Datateknologi': [{\n 'type': GROUP_GRADE\n }, {}],\n '2. klasse Datateknologi': [{\n 'type': GROUP_GRADE\n }, {}],\n '3. klasse Datateknologi': [{\n 'type': GROUP_GRADE\n }, {}],\n '4. klasse Datateknologi': [{\n 'type': GROUP_GRADE\n }, {}],\n '5. klasse Datateknologi': [{\n 'type': GROUP_GRADE\n }, {}],\n }\n ],\n 'Kommunikasjonsteknologi': [\n {},\n {\n '1. klasse Kommunikasjonsteknologi': [{\n 'type': GROUP_GRADE\n }, {}],\n '2. klasse Kommunikasjonsteknologi': [{\n 'type': GROUP_GRADE\n }, {}],\n '3. klasse Kommunikasjonsteknologi': [{\n 'type': GROUP_GRADE\n }, {}],\n '4. klasse Kommunikasjonsteknologi': [{\n 'type': GROUP_GRADE\n }, {}],\n '5. klasse Kommunikasjonsteknologi': [{\n 'type': GROUP_GRADE\n }, {}],\n }\n ]\n }\n ]\n}\n\n\ndef load_abakus_groups():\n insert_abakus_groups(initial_tree)\n AbakusGroup.objects.rebuild()\n", "path": "lego/apps/users/fixtures/initial_abakus_groups.py"}, {"content": "from lego.apps.users.models import AbakusGroup\nfrom lego.utils.tasks import send_email\n\n\ndef send_message(title, message, user, anonymous):\n \"\"\"\n Send a message to HS when users posts to the contact form.\n Don't catch AbakusGroup.DoesNotExist, this notifies us when the group doesn't exist.\n \"\"\"\n anonymous = anonymous if user.is_authenticated else True\n abakus_group = AbakusGroup.objects.get(name='Hovedstyret')\n users = [membership.user for membership in abakus_group.memberships.select_related('user')]\n emails = [user.email_address for user in users]\n\n from_name = 'Anonymous' if anonymous else user.full_name\n from_email = 'Unknown' if anonymous else user.email_address\n\n send_email.delay(\n to_email=emails, context={\n 'title': title,\n 'message': message,\n 'from_name': from_name,\n 'from_email': from_email\n }, subject='Ny henvendelse fra kontaktskjemaet',\n plain_template='contact/email/contact_form.txt',\n html_template='contact/email/contact_form.html', from_email=None\n )\n", "path": "lego/apps/contact/send.py"}], "after_files": [{"content": "from lego.apps.users.constants import GROUP_COMMITTEE, GROUP_GRADE\nfrom lego.apps.users.models import AbakusGroup\nfrom lego.utils.functions import insert_abakus_groups\n\n# isort:skip\n\"\"\"\nThe structure of the tree is key and a list of two dicts.\nThe first dict is the parameters of the current group\nand the second dict are the children of the current group.\n\nE.g. Abakus: [\n {\n description: 'ABAKUSGRUPPE',\n permissions: ['/sudo/...']\n ...\n },\n {\n 'Webkom': [{\n description: 'WEBKOMGRUPPE',\n permissions: ['/sudo/']\n ...\n }, {}]\n }\n]\n\"\"\"\n\ninitial_tree = {\n 'Users': [{\n 'description': 'Brukere p\u00e5 Abakus.no'\n }, {}],\n 'Abakus': [\n {\n 'description':\n 'Medlemmer av Abakus',\n 'permissions': [\n '/sudo/admin/meetings/create', '/sudo/admin/meetinginvitations/create',\n '/sudo/admin/registrations/create/', '/sudo/admin/events/payment/',\n '/sudo/admin/comments/create'\n ]\n },\n {\n 'Abakom': [\n {\n 'description':\n 'Medlemmer av Abakom',\n 'permissions': [\n '/sudo/admin/events/',\n '/sudo/admin/pools/',\n '/sudo/admin/registrations/',\n '/sudo/admin/companies/',\n '/sudo/admin/joblistings/',\n ]\n },\n {\n 'Arrkom': [{\n 'type': GROUP_COMMITTEE,\n 'logo_id': 'abakus_arrkom.png'\n }, {}],\n 'backup': [{\n 'type': GROUP_COMMITTEE,\n 'logo_id': 'abakus_backup.png'\n }, {}],\n 'Bedkom': [\n {\n 'type':\n GROUP_COMMITTEE,\n 'logo_id':\n 'abakus_bedkom.png',\n 'permissions': [\n '/sudo/admin/companyinterest/', '/sudo/admin/surveys/',\n '/sudo/admin/submissions/'\n ]\n }, {}\n ],\n 'Fagkom': [\n {\n 'type':\n GROUP_COMMITTEE,\n 'logo_id':\n 'abakus_fagkom.png',\n 'permissions': [\n '/sudo/admin/companyinterest/', '/sudo/admin/surveys/',\n '/sudo/admin/submissions/'\n ]\n }, {}\n ],\n 'Koskom': [{\n 'type': GROUP_COMMITTEE,\n 'logo_id': 'abakus_koskom.png'\n }, {}],\n 'LaBamba': [{\n 'type': GROUP_COMMITTEE,\n 'logo_id': 'abakus_labamba.png'\n }, {}],\n 'PR': [{\n 'type': GROUP_COMMITTEE,\n 'logo_id': 'abakus_pr.png'\n }, {}],\n 'readme': [{\n 'type': GROUP_COMMITTEE,\n 'logo_id': 'abakus_readme.png'\n }, {}],\n 'Webkom': [\n {\n 'type': GROUP_COMMITTEE,\n 'logo_id': 'abakus_webkom.png',\n 'permissions': ['/sudo/'],\n 'text': 'hei'\n }, {}\n ],\n 'Hovedstyret': [\n {\n 'logo_id': 'abakus_hs.png',\n 'permissions': ['/sudo/admin/'],\n 'contact_email': \"[email protected]\"\n }, {}\n ]\n }\n ],\n 'Interessegrupper':\n [{\n 'description': 'Super-gruppe for alle interessegrupper i Abakus'\n }, {}]\n }\n ],\n 'Students': [\n {},\n {\n 'Datateknologi': [\n {},\n {\n '1. klasse Datateknologi': [{\n 'type': GROUP_GRADE\n }, {}],\n '2. klasse Datateknologi': [{\n 'type': GROUP_GRADE\n }, {}],\n '3. klasse Datateknologi': [{\n 'type': GROUP_GRADE\n }, {}],\n '4. klasse Datateknologi': [{\n 'type': GROUP_GRADE\n }, {}],\n '5. klasse Datateknologi': [{\n 'type': GROUP_GRADE\n }, {}],\n }\n ],\n 'Kommunikasjonsteknologi': [\n {},\n {\n '1. klasse Kommunikasjonsteknologi': [{\n 'type': GROUP_GRADE\n }, {}],\n '2. klasse Kommunikasjonsteknologi': [{\n 'type': GROUP_GRADE\n }, {}],\n '3. klasse Kommunikasjonsteknologi': [{\n 'type': GROUP_GRADE\n }, {}],\n '4. klasse Kommunikasjonsteknologi': [{\n 'type': GROUP_GRADE\n }, {}],\n '5. klasse Kommunikasjonsteknologi': [{\n 'type': GROUP_GRADE\n }, {}],\n }\n ]\n }\n ]\n}\n\n\ndef load_abakus_groups():\n insert_abakus_groups(initial_tree)\n AbakusGroup.objects.rebuild()\n", "path": "lego/apps/users/fixtures/initial_abakus_groups.py"}, {"content": "from lego.apps.users.models import AbakusGroup\nfrom lego.utils.tasks import send_email\n\n\ndef send_message(title, message, user, anonymous):\n \"\"\"\n Send a message to HS when users posts to the contact form.\n Don't catch AbakusGroup.DoesNotExist, this notifies us when the group doesn't exist.\n \"\"\"\n anonymous = anonymous if user.is_authenticated else True\n abakus_group = AbakusGroup.objects.get(name='Hovedstyret')\n\n from_name = 'Anonymous' if anonymous else user.full_name\n from_email = 'Unknown' if anonymous else user.email_address\n\n send_email.delay(\n to_email=abakus_group.contact_email, context={\n 'title': title,\n 'message': message,\n 'from_name': from_name,\n 'from_email': from_email\n }, subject='Ny henvendelse fra kontaktskjemaet',\n plain_template='contact/email/contact_form.txt',\n html_template='contact/email/contact_form.html', from_email=None\n )\n", "path": "lego/apps/contact/send.py"}]}
2,151
393
gh_patches_debug_13864
rasdani/github-patches
git_diff
OpenNMT__OpenNMT-py-303
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- small bug with truncated decoding and tgt feature concat. I'm running https://github.com/Henry-E/opennmt-gen and getting an error with matrix multiplication. ` File "/home/henrye/anaconda3/envs/pytorch/lib/python3.6/site-packages/torch/autograd/_functions/blas.py", line 26, in forward matrix1, matrix2, out=output)` The full stack trace is below The problem is in a multiplication inside the decoder RNN. It's trying to multiply matrix 1 by matrix 2. At a guess it's something to do with the extra dimensions of the features not being accounted for by the decoder. I'm not super familiar with the equations being used for the features though so it's hard to tell where the model is being initialised incorrectly. ``` ipdb> matrix1.size() torch.Size([5, 2400]) ipdb> matrix2.size() torch.Size([600, 2400]) ``` The model ``` NMTModel ( (encoder): MeanEncoder ( (embeddings): Embeddings ( (make_embedding): Sequential ( (emb_luts): Elementwise ( (0): Embedding(10818, 600, padding_idx=1) (1): Embedding(704, 600, padding_idx=1) (2): Embedding(40, 600, padding_idx=1) (3): Embedding(4, 600, padding_idx=1) ) ) ) ) (decoder): InputFeedRNNDecoder ( (embeddings): Embeddings ( (make_embedding): Sequential ( (emb_luts): Elementwise ( (0): Embedding(10818, 600, padding_idx=1) ) ) ) (dropout): Dropout (p = 0.3) (rnn): StackedLSTM ( (dropout): Dropout (p = 0.3) (layers): ModuleList ( (0): LSTMCell(1200, 600) (1): LSTMCell(600, 600) ) ) (attn): GlobalAttention ( (linear_out): Linear (1200 -> 600) (sm): Softmax () (tanh): Tanh () ) (copy_attn): GlobalAttention ( (linear_out): Linear (1200 -> 600) (sm): Softmax () (tanh): Tanh () ) ) (generator): CopyGenerator ( (linear): Linear (600 -> 10818) (linear_copy): Linear (600 -> 1) ) ) ``` Stack trace ``` Traceback (most recent call last): File "/home/henrye/anaconda3/envs/pytorch/lib/python3.6/site-packages/ipdb/__main__.py", line 198, in main pdb._runscript(mainpyfile) File "/home/henrye/anaconda3/envs/pytorch/lib/python3.6/pdb.py", line 1548, in _runscript self.run(statement) File "/home/henrye/anaconda3/envs/pytorch/lib/python3.6/bdb.py", line 431, in run exec(cmd, globals, locals) File "<string>", line 1, in <module> File "/home/henrye/downloads/OpenNMT-py/train.py", line 1, in <module> from __future__ import division File "/home/henrye/downloads/OpenNMT-py/train.py", line 284, in main train_model(model, train, valid, fields, optim) File "/home/henrye/downloads/OpenNMT-py/train.py", line 144, in train_model train_stats = trainer.train(epoch, report_func) File "/home/henrye/downloads/OpenNMT-py/onmt/Trainer.py", line 119, in train self.model(src, tgt, src_lengths, dec_state) File "/home/henrye/anaconda3/envs/pytorch/lib/python3.6/site-packages/torch/nn/modules/module.py", line 224, in __call__ result = self.forward(*input, **kwargs) File "/home/henrye/downloads/OpenNMT-py/onmt/Models.py", line 421, in forward else dec_state) File "/home/henrye/anaconda3/envs/pytorch/lib/python3.6/site-packages/torch/nn/modules/module.py", line 224, in __call__ result = self.forward(*input, **kwargs) File "/home/henrye/downloads/OpenNMT-py/onmt/Models.py", line 175, in forward self._run_forward_pass(input, context, state) File "/home/henrye/downloads/OpenNMT-py/onmt/Models.py", line 337, in _run_forward_pass rnn_output, hidden = self.rnn(emb_t, hidden) File "/home/henrye/anaconda3/envs/pytorch/lib/python3.6/site-packages/torch/nn/modules/module.py", line 224, in __call__ result = self.forward(*input, **kwargs) File "/home/henrye/downloads/OpenNMT-py/onmt/modules/StackedRNN.py", line 24, in forward h_1_i, c_1_i = layer(input, (h_0[i], c_0[i])) File "/home/henrye/anaconda3/envs/pytorch/lib/python3.6/site-packages/torch/nn/modules/module.py", line 224, in __call__ result = self.forward(*input, **kwargs) File "/home/henrye/anaconda3/envs/pytorch/lib/python3.6/site-packages/torch/nn/modules/rnn.py", line 608, in forward self.bias_ih, self.bias_hh, File "/home/henrye/anaconda3/envs/pytorch/lib/python3.6/site-packages/torch/nn/_functions/rnn.py", line 26, in LSTMCell hgates = F.linear(hidden[0], w_hh) File "/home/henrye/anaconda3/envs/pytorch/lib/python3.6/site-packages/torch/nn/functional.py", line 555, in linear output = input.matmul(weight.t()) File "/home/henrye/anaconda3/envs/pytorch/lib/python3.6/site-packages/torch/autograd/variable.py", line 560, in matmul return torch.matmul(self, other) File "/home/henrye/anaconda3/envs/pytorch/lib/python3.6/site-packages/torch/functional.py", line 173, in matmul return torch.mm(tensor1, tensor2) File "/home/henrye/anaconda3/envs/pytorch/lib/python3.6/site-packages/torch/autograd/variable.py", line 579, in mm return Addmm.apply(output, self, matrix, 0, 1, True) File "/home/henrye/anaconda3/envs/pytorch/lib/python3.6/site-packages/torch/autograd/_functions/blas.py", line 26, in forward matrix1, matrix2, out=output) RuntimeError: size mismatch at /opt/conda/conda-bld/pytorch_1503970438496/work/torch/lib/THC/generic/THCTensorMathBlas.cu:243 ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `onmt/Trainer.py` Content: ``` 1 from __future__ import division 2 """ 3 This is the loadable seq2seq trainer library that is 4 in charge of training details, loss compute, and statistics. 5 See train.py for a use case of this library. 6 7 Note!!! To make this a general library, we implement *only* 8 mechanism things here(i.e. what to do), and leave the strategy 9 things to users(i.e. how to do it). Also see train.py(one of the 10 users of this library) for the strategy things we do. 11 """ 12 import time 13 import sys 14 import math 15 import torch 16 import torch.nn as nn 17 18 import onmt 19 import onmt.modules 20 21 22 class Statistics(object): 23 """ 24 Train/validate loss statistics. 25 """ 26 def __init__(self, loss=0, n_words=0, n_correct=0): 27 self.loss = loss 28 self.n_words = n_words 29 self.n_correct = n_correct 30 self.n_src_words = 0 31 self.start_time = time.time() 32 33 def update(self, stat): 34 self.loss += stat.loss 35 self.n_words += stat.n_words 36 self.n_correct += stat.n_correct 37 38 def accuracy(self): 39 return 100 * (self.n_correct / self.n_words) 40 41 def ppl(self): 42 return math.exp(min(self.loss / self.n_words, 100)) 43 44 def elapsed_time(self): 45 return time.time() - self.start_time 46 47 def output(self, epoch, batch, n_batches, start): 48 t = self.elapsed_time() 49 print(("Epoch %2d, %5d/%5d; acc: %6.2f; ppl: %6.2f; " + 50 "%3.0f src tok/s; %3.0f tgt tok/s; %6.0f s elapsed") % 51 (epoch, batch, n_batches, 52 self.accuracy(), 53 self.ppl(), 54 self.n_src_words / (t + 1e-5), 55 self.n_words / (t + 1e-5), 56 time.time() - start)) 57 sys.stdout.flush() 58 59 def log(self, prefix, experiment, optim): 60 t = self.elapsed_time() 61 experiment.add_scalar_value(prefix + "_ppl", self.ppl()) 62 experiment.add_scalar_value(prefix + "_accuracy", self.accuracy()) 63 experiment.add_scalar_value(prefix + "_tgtper", self.n_words / t) 64 experiment.add_scalar_value(prefix + "_lr", optim.lr) 65 66 67 class Trainer(object): 68 def __init__(self, model, train_iter, valid_iter, 69 train_loss, valid_loss, optim, 70 trunc_size, shard_size): 71 """ 72 Args: 73 model: the seq2seq model. 74 train_iter: the train data iterator. 75 valid_iter: the validate data iterator. 76 train_loss: the train side LossCompute object for computing loss. 77 valid_loss: the valid side LossCompute object for computing loss. 78 optim: the optimizer responsible for lr update. 79 trunc_size: a batch is divided by several truncs of this size. 80 shard_size: compute loss in shards of this size for efficiency. 81 """ 82 # Basic attributes. 83 self.model = model 84 self.train_iter = train_iter 85 self.valid_iter = valid_iter 86 self.train_loss = train_loss 87 self.valid_loss = valid_loss 88 self.optim = optim 89 self.trunc_size = trunc_size 90 self.shard_size = shard_size 91 92 # Set model in training mode. 93 self.model.train() 94 95 def train(self, epoch, report_func=None): 96 """ Called for each epoch to train. """ 97 total_stats = Statistics() 98 report_stats = Statistics() 99 100 for i, batch in enumerate(self.train_iter): 101 target_size = batch.tgt.size(0) 102 # Truncated BPTT 103 trunc_size = self.trunc_size if self.trunc_size else target_size 104 105 dec_state = None 106 _, src_lengths = batch.src 107 108 src = onmt.IO.make_features(batch, 'src') 109 tgt = onmt.IO.make_features(batch, 'tgt') 110 report_stats.n_src_words += src_lengths.sum() 111 112 for j in range(0, target_size-1, trunc_size): 113 # 1. Create truncated target. 114 tgt = tgt[j: j + trunc_size] 115 116 # 2. F-prop all but generator. 117 self.model.zero_grad() 118 outputs, attns, dec_state = \ 119 self.model(src, tgt, src_lengths, dec_state) 120 121 # 3. Compute loss in shards for memory efficiency. 122 batch_stats = self.train_loss.sharded_compute_loss( 123 batch, outputs, attns, j, 124 trunc_size, self.shard_size) 125 126 # 4. Update the parameters and statistics. 127 self.optim.step() 128 total_stats.update(batch_stats) 129 report_stats.update(batch_stats) 130 131 # If truncated, don't backprop fully. 132 if dec_state is not None: 133 dec_state.detach() 134 135 if report_func is not None: 136 report_func(epoch, i, len(self.train_iter), 137 total_stats.start_time, self.optim.lr, 138 report_stats) 139 report_stats = Statistics() 140 141 return total_stats 142 143 def validate(self): 144 """ Called for each epoch to validate. """ 145 # Set model in validating mode. 146 self.model.eval() 147 148 stats = Statistics() 149 150 for batch in self.valid_iter: 151 _, src_lengths = batch.src 152 src = onmt.IO.make_features(batch, 'src') 153 tgt = onmt.IO.make_features(batch, 'tgt') 154 155 # F-prop through the model. 156 outputs, attns, _ = self.model(src, tgt, src_lengths) 157 158 # Compute loss. 159 gen_state = onmt.Loss.make_gen_state( 160 outputs, batch, attns, (0, batch.tgt.size(0))) 161 _, batch_stats = self.valid_loss(batch, **gen_state) 162 163 # Update statistics. 164 stats.update(batch_stats) 165 166 # Set model back to training mode. 167 self.model.train() 168 169 return stats 170 171 def epoch_step(self, ppl, epoch): 172 """ Called for each epoch to update learning rate. """ 173 return self.optim.updateLearningRate(ppl, epoch) 174 175 def drop_checkpoint(self, opt, epoch, fields, valid_stats): 176 """ Called conditionally each epoch to save a snapshot. """ 177 real_model = (self.model.module 178 if isinstance(self.model, nn.DataParallel) 179 else self.model) 180 real_generator = (real_model.generator.module 181 if isinstance(real_model.generator, nn.DataParallel) 182 else real_model.generator) 183 184 model_state_dict = real_model.state_dict() 185 model_state_dict = {k: v for k, v in model_state_dict.items() 186 if 'generator' not in k} 187 generator_state_dict = real_generator.state_dict() 188 checkpoint = { 189 'model': model_state_dict, 190 'generator': generator_state_dict, 191 'vocab': onmt.IO.ONMTDataset.save_vocab(fields), 192 'opt': opt, 193 'epoch': epoch, 194 'optim': self.optim 195 } 196 torch.save(checkpoint, 197 '%s_acc_%.2f_ppl_%.2f_e%d.pt' 198 % (opt.save_model, valid_stats.accuracy(), 199 valid_stats.ppl(), epoch)) 200 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/onmt/Trainer.py b/onmt/Trainer.py --- a/onmt/Trainer.py +++ b/onmt/Trainer.py @@ -106,12 +106,12 @@ _, src_lengths = batch.src src = onmt.IO.make_features(batch, 'src') - tgt = onmt.IO.make_features(batch, 'tgt') + tgt_outer = onmt.IO.make_features(batch, 'tgt') report_stats.n_src_words += src_lengths.sum() for j in range(0, target_size-1, trunc_size): # 1. Create truncated target. - tgt = tgt[j: j + trunc_size] + tgt = tgt_outer[j: j + trunc_size] # 2. F-prop all but generator. self.model.zero_grad()
{"golden_diff": "diff --git a/onmt/Trainer.py b/onmt/Trainer.py\n--- a/onmt/Trainer.py\n+++ b/onmt/Trainer.py\n@@ -106,12 +106,12 @@\n _, src_lengths = batch.src\n \n src = onmt.IO.make_features(batch, 'src')\n- tgt = onmt.IO.make_features(batch, 'tgt')\n+ tgt_outer = onmt.IO.make_features(batch, 'tgt')\n report_stats.n_src_words += src_lengths.sum()\n \n for j in range(0, target_size-1, trunc_size):\n # 1. Create truncated target.\n- tgt = tgt[j: j + trunc_size]\n+ tgt = tgt_outer[j: j + trunc_size]\n \n # 2. F-prop all but generator.\n self.model.zero_grad()\n", "issue": "small bug with truncated decoding and tgt feature concat.\nI'm running https://github.com/Henry-E/opennmt-gen and getting an error with matrix multiplication.\r\n` File \"/home/henrye/anaconda3/envs/pytorch/lib/python3.6/site-packages/torch/autograd/_functions/blas.py\", line 26, in forward\r\n matrix1, matrix2, out=output)` \r\nThe full stack trace is below\r\n\r\nThe problem is in a multiplication inside the decoder RNN. It's trying to multiply matrix 1 by matrix 2. At a guess it's something to do with the extra dimensions of the features not being accounted for by the decoder. I'm not super familiar with the equations being used for the features though so it's hard to tell where the model is being initialised incorrectly. \r\n```\r\nipdb> matrix1.size()\r\ntorch.Size([5, 2400])\r\nipdb> matrix2.size()\r\ntorch.Size([600, 2400])\r\n```\r\n\r\nThe model\r\n```\r\nNMTModel (\r\n (encoder): MeanEncoder (\r\n (embeddings): Embeddings (\r\n (make_embedding): Sequential (\r\n (emb_luts): Elementwise (\r\n (0): Embedding(10818, 600, padding_idx=1)\r\n (1): Embedding(704, 600, padding_idx=1)\r\n (2): Embedding(40, 600, padding_idx=1)\r\n (3): Embedding(4, 600, padding_idx=1)\r\n )\r\n )\r\n )\r\n )\r\n (decoder): InputFeedRNNDecoder (\r\n (embeddings): Embeddings (\r\n (make_embedding): Sequential (\r\n (emb_luts): Elementwise (\r\n (0): Embedding(10818, 600, padding_idx=1)\r\n )\r\n )\r\n )\r\n (dropout): Dropout (p = 0.3)\r\n (rnn): StackedLSTM (\r\n (dropout): Dropout (p = 0.3)\r\n (layers): ModuleList (\r\n (0): LSTMCell(1200, 600)\r\n (1): LSTMCell(600, 600)\r\n )\r\n )\r\n (attn): GlobalAttention (\r\n (linear_out): Linear (1200 -> 600)\r\n (sm): Softmax ()\r\n (tanh): Tanh ()\r\n )\r\n (copy_attn): GlobalAttention (\r\n (linear_out): Linear (1200 -> 600)\r\n (sm): Softmax ()\r\n (tanh): Tanh ()\r\n )\r\n )\r\n (generator): CopyGenerator (\r\n (linear): Linear (600 -> 10818)\r\n (linear_copy): Linear (600 -> 1)\r\n )\r\n)\r\n```\r\n\r\n\r\n\r\nStack trace\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/henrye/anaconda3/envs/pytorch/lib/python3.6/site-packages/ipdb/__main__.py\", line 198, in main\r\n pdb._runscript(mainpyfile)\r\n File \"/home/henrye/anaconda3/envs/pytorch/lib/python3.6/pdb.py\", line 1548, in _runscript\r\n self.run(statement)\r\n File \"/home/henrye/anaconda3/envs/pytorch/lib/python3.6/bdb.py\", line 431, in run\r\n exec(cmd, globals, locals)\r\n File \"<string>\", line 1, in <module>\r\n File \"/home/henrye/downloads/OpenNMT-py/train.py\", line 1, in <module>\r\n from __future__ import division\r\n File \"/home/henrye/downloads/OpenNMT-py/train.py\", line 284, in main\r\n train_model(model, train, valid, fields, optim)\r\n File \"/home/henrye/downloads/OpenNMT-py/train.py\", line 144, in train_model\r\n train_stats = trainer.train(epoch, report_func)\r\n File \"/home/henrye/downloads/OpenNMT-py/onmt/Trainer.py\", line 119, in train\r\n self.model(src, tgt, src_lengths, dec_state)\r\n File \"/home/henrye/anaconda3/envs/pytorch/lib/python3.6/site-packages/torch/nn/modules/module.py\", line 224, in __call__\r\n result = self.forward(*input, **kwargs)\r\n File \"/home/henrye/downloads/OpenNMT-py/onmt/Models.py\", line 421, in forward\r\n else dec_state)\r\n File \"/home/henrye/anaconda3/envs/pytorch/lib/python3.6/site-packages/torch/nn/modules/module.py\", line 224, in __call__\r\n result = self.forward(*input, **kwargs)\r\n File \"/home/henrye/downloads/OpenNMT-py/onmt/Models.py\", line 175, in forward\r\n self._run_forward_pass(input, context, state)\r\n File \"/home/henrye/downloads/OpenNMT-py/onmt/Models.py\", line 337, in _run_forward_pass\r\n rnn_output, hidden = self.rnn(emb_t, hidden)\r\n File \"/home/henrye/anaconda3/envs/pytorch/lib/python3.6/site-packages/torch/nn/modules/module.py\", line 224, in __call__\r\n result = self.forward(*input, **kwargs)\r\n File \"/home/henrye/downloads/OpenNMT-py/onmt/modules/StackedRNN.py\", line 24, in forward\r\n h_1_i, c_1_i = layer(input, (h_0[i], c_0[i]))\r\n File \"/home/henrye/anaconda3/envs/pytorch/lib/python3.6/site-packages/torch/nn/modules/module.py\", line 224, in __call__\r\n result = self.forward(*input, **kwargs)\r\n File \"/home/henrye/anaconda3/envs/pytorch/lib/python3.6/site-packages/torch/nn/modules/rnn.py\", line 608, in forward\r\n self.bias_ih, self.bias_hh,\r\n File \"/home/henrye/anaconda3/envs/pytorch/lib/python3.6/site-packages/torch/nn/_functions/rnn.py\", line 26, in LSTMCell\r\n hgates = F.linear(hidden[0], w_hh)\r\n File \"/home/henrye/anaconda3/envs/pytorch/lib/python3.6/site-packages/torch/nn/functional.py\", line 555, in linear\r\n output = input.matmul(weight.t())\r\n File \"/home/henrye/anaconda3/envs/pytorch/lib/python3.6/site-packages/torch/autograd/variable.py\", line 560, in matmul\r\n return torch.matmul(self, other)\r\n File \"/home/henrye/anaconda3/envs/pytorch/lib/python3.6/site-packages/torch/functional.py\", line 173, in matmul\r\n return torch.mm(tensor1, tensor2)\r\n File \"/home/henrye/anaconda3/envs/pytorch/lib/python3.6/site-packages/torch/autograd/variable.py\", line 579, in mm\r\n return Addmm.apply(output, self, matrix, 0, 1, True)\r\n File \"/home/henrye/anaconda3/envs/pytorch/lib/python3.6/site-packages/torch/autograd/_functions/blas.py\", line 26, in forward\r\n matrix1, matrix2, out=output)\r\nRuntimeError: size mismatch at /opt/conda/conda-bld/pytorch_1503970438496/work/torch/lib/THC/generic/THCTensorMathBlas.cu:243\r\n```\n", "before_files": [{"content": "from __future__ import division\n\"\"\"\nThis is the loadable seq2seq trainer library that is\nin charge of training details, loss compute, and statistics.\nSee train.py for a use case of this library.\n\nNote!!! To make this a general library, we implement *only*\nmechanism things here(i.e. what to do), and leave the strategy\nthings to users(i.e. how to do it). Also see train.py(one of the\nusers of this library) for the strategy things we do.\n\"\"\"\nimport time\nimport sys\nimport math\nimport torch\nimport torch.nn as nn\n\nimport onmt\nimport onmt.modules\n\n\nclass Statistics(object):\n \"\"\"\n Train/validate loss statistics.\n \"\"\"\n def __init__(self, loss=0, n_words=0, n_correct=0):\n self.loss = loss\n self.n_words = n_words\n self.n_correct = n_correct\n self.n_src_words = 0\n self.start_time = time.time()\n\n def update(self, stat):\n self.loss += stat.loss\n self.n_words += stat.n_words\n self.n_correct += stat.n_correct\n\n def accuracy(self):\n return 100 * (self.n_correct / self.n_words)\n\n def ppl(self):\n return math.exp(min(self.loss / self.n_words, 100))\n\n def elapsed_time(self):\n return time.time() - self.start_time\n\n def output(self, epoch, batch, n_batches, start):\n t = self.elapsed_time()\n print((\"Epoch %2d, %5d/%5d; acc: %6.2f; ppl: %6.2f; \" +\n \"%3.0f src tok/s; %3.0f tgt tok/s; %6.0f s elapsed\") %\n (epoch, batch, n_batches,\n self.accuracy(),\n self.ppl(),\n self.n_src_words / (t + 1e-5),\n self.n_words / (t + 1e-5),\n time.time() - start))\n sys.stdout.flush()\n\n def log(self, prefix, experiment, optim):\n t = self.elapsed_time()\n experiment.add_scalar_value(prefix + \"_ppl\", self.ppl())\n experiment.add_scalar_value(prefix + \"_accuracy\", self.accuracy())\n experiment.add_scalar_value(prefix + \"_tgtper\", self.n_words / t)\n experiment.add_scalar_value(prefix + \"_lr\", optim.lr)\n\n\nclass Trainer(object):\n def __init__(self, model, train_iter, valid_iter,\n train_loss, valid_loss, optim,\n trunc_size, shard_size):\n \"\"\"\n Args:\n model: the seq2seq model.\n train_iter: the train data iterator.\n valid_iter: the validate data iterator.\n train_loss: the train side LossCompute object for computing loss.\n valid_loss: the valid side LossCompute object for computing loss.\n optim: the optimizer responsible for lr update.\n trunc_size: a batch is divided by several truncs of this size.\n shard_size: compute loss in shards of this size for efficiency.\n \"\"\"\n # Basic attributes.\n self.model = model\n self.train_iter = train_iter\n self.valid_iter = valid_iter\n self.train_loss = train_loss\n self.valid_loss = valid_loss\n self.optim = optim\n self.trunc_size = trunc_size\n self.shard_size = shard_size\n\n # Set model in training mode.\n self.model.train()\n\n def train(self, epoch, report_func=None):\n \"\"\" Called for each epoch to train. \"\"\"\n total_stats = Statistics()\n report_stats = Statistics()\n\n for i, batch in enumerate(self.train_iter):\n target_size = batch.tgt.size(0)\n # Truncated BPTT\n trunc_size = self.trunc_size if self.trunc_size else target_size\n\n dec_state = None\n _, src_lengths = batch.src\n\n src = onmt.IO.make_features(batch, 'src')\n tgt = onmt.IO.make_features(batch, 'tgt')\n report_stats.n_src_words += src_lengths.sum()\n\n for j in range(0, target_size-1, trunc_size):\n # 1. Create truncated target.\n tgt = tgt[j: j + trunc_size]\n\n # 2. F-prop all but generator.\n self.model.zero_grad()\n outputs, attns, dec_state = \\\n self.model(src, tgt, src_lengths, dec_state)\n\n # 3. Compute loss in shards for memory efficiency.\n batch_stats = self.train_loss.sharded_compute_loss(\n batch, outputs, attns, j,\n trunc_size, self.shard_size)\n\n # 4. Update the parameters and statistics.\n self.optim.step()\n total_stats.update(batch_stats)\n report_stats.update(batch_stats)\n\n # If truncated, don't backprop fully.\n if dec_state is not None:\n dec_state.detach()\n\n if report_func is not None:\n report_func(epoch, i, len(self.train_iter),\n total_stats.start_time, self.optim.lr,\n report_stats)\n report_stats = Statistics()\n\n return total_stats\n\n def validate(self):\n \"\"\" Called for each epoch to validate. \"\"\"\n # Set model in validating mode.\n self.model.eval()\n\n stats = Statistics()\n\n for batch in self.valid_iter:\n _, src_lengths = batch.src\n src = onmt.IO.make_features(batch, 'src')\n tgt = onmt.IO.make_features(batch, 'tgt')\n\n # F-prop through the model.\n outputs, attns, _ = self.model(src, tgt, src_lengths)\n\n # Compute loss.\n gen_state = onmt.Loss.make_gen_state(\n outputs, batch, attns, (0, batch.tgt.size(0)))\n _, batch_stats = self.valid_loss(batch, **gen_state)\n\n # Update statistics.\n stats.update(batch_stats)\n\n # Set model back to training mode.\n self.model.train()\n\n return stats\n\n def epoch_step(self, ppl, epoch):\n \"\"\" Called for each epoch to update learning rate. \"\"\"\n return self.optim.updateLearningRate(ppl, epoch)\n\n def drop_checkpoint(self, opt, epoch, fields, valid_stats):\n \"\"\" Called conditionally each epoch to save a snapshot. \"\"\"\n real_model = (self.model.module\n if isinstance(self.model, nn.DataParallel)\n else self.model)\n real_generator = (real_model.generator.module\n if isinstance(real_model.generator, nn.DataParallel)\n else real_model.generator)\n\n model_state_dict = real_model.state_dict()\n model_state_dict = {k: v for k, v in model_state_dict.items()\n if 'generator' not in k}\n generator_state_dict = real_generator.state_dict()\n checkpoint = {\n 'model': model_state_dict,\n 'generator': generator_state_dict,\n 'vocab': onmt.IO.ONMTDataset.save_vocab(fields),\n 'opt': opt,\n 'epoch': epoch,\n 'optim': self.optim\n }\n torch.save(checkpoint,\n '%s_acc_%.2f_ppl_%.2f_e%d.pt'\n % (opt.save_model, valid_stats.accuracy(),\n valid_stats.ppl(), epoch))\n", "path": "onmt/Trainer.py"}], "after_files": [{"content": "from __future__ import division\n\"\"\"\nThis is the loadable seq2seq trainer library that is\nin charge of training details, loss compute, and statistics.\nSee train.py for a use case of this library.\n\nNote!!! To make this a general library, we implement *only*\nmechanism things here(i.e. what to do), and leave the strategy\nthings to users(i.e. how to do it). Also see train.py(one of the\nusers of this library) for the strategy things we do.\n\"\"\"\nimport time\nimport sys\nimport math\nimport torch\nimport torch.nn as nn\n\nimport onmt\nimport onmt.modules\n\n\nclass Statistics(object):\n \"\"\"\n Train/validate loss statistics.\n \"\"\"\n def __init__(self, loss=0, n_words=0, n_correct=0):\n self.loss = loss\n self.n_words = n_words\n self.n_correct = n_correct\n self.n_src_words = 0\n self.start_time = time.time()\n\n def update(self, stat):\n self.loss += stat.loss\n self.n_words += stat.n_words\n self.n_correct += stat.n_correct\n\n def accuracy(self):\n return 100 * (self.n_correct / self.n_words)\n\n def ppl(self):\n return math.exp(min(self.loss / self.n_words, 100))\n\n def elapsed_time(self):\n return time.time() - self.start_time\n\n def output(self, epoch, batch, n_batches, start):\n t = self.elapsed_time()\n print((\"Epoch %2d, %5d/%5d; acc: %6.2f; ppl: %6.2f; \" +\n \"%3.0f src tok/s; %3.0f tgt tok/s; %6.0f s elapsed\") %\n (epoch, batch, n_batches,\n self.accuracy(),\n self.ppl(),\n self.n_src_words / (t + 1e-5),\n self.n_words / (t + 1e-5),\n time.time() - start))\n sys.stdout.flush()\n\n def log(self, prefix, experiment, optim):\n t = self.elapsed_time()\n experiment.add_scalar_value(prefix + \"_ppl\", self.ppl())\n experiment.add_scalar_value(prefix + \"_accuracy\", self.accuracy())\n experiment.add_scalar_value(prefix + \"_tgtper\", self.n_words / t)\n experiment.add_scalar_value(prefix + \"_lr\", optim.lr)\n\n\nclass Trainer(object):\n def __init__(self, model, train_iter, valid_iter,\n train_loss, valid_loss, optim,\n trunc_size, shard_size):\n \"\"\"\n Args:\n model: the seq2seq model.\n train_iter: the train data iterator.\n valid_iter: the validate data iterator.\n train_loss: the train side LossCompute object for computing loss.\n valid_loss: the valid side LossCompute object for computing loss.\n optim: the optimizer responsible for lr update.\n trunc_size: a batch is divided by several truncs of this size.\n shard_size: compute loss in shards of this size for efficiency.\n \"\"\"\n # Basic attributes.\n self.model = model\n self.train_iter = train_iter\n self.valid_iter = valid_iter\n self.train_loss = train_loss\n self.valid_loss = valid_loss\n self.optim = optim\n self.trunc_size = trunc_size\n self.shard_size = shard_size\n\n # Set model in training mode.\n self.model.train()\n\n def train(self, epoch, report_func=None):\n \"\"\" Called for each epoch to train. \"\"\"\n total_stats = Statistics()\n report_stats = Statistics()\n\n for i, batch in enumerate(self.train_iter):\n target_size = batch.tgt.size(0)\n # Truncated BPTT\n trunc_size = self.trunc_size if self.trunc_size else target_size\n\n dec_state = None\n _, src_lengths = batch.src\n\n src = onmt.IO.make_features(batch, 'src')\n tgt_outer = onmt.IO.make_features(batch, 'tgt')\n report_stats.n_src_words += src_lengths.sum()\n\n for j in range(0, target_size-1, trunc_size):\n # 1. Create truncated target.\n tgt = tgt_outer[j: j + trunc_size]\n\n # 2. F-prop all but generator.\n self.model.zero_grad()\n outputs, attns, dec_state = \\\n self.model(src, tgt, src_lengths, dec_state)\n\n # 3. Compute loss in shards for memory efficiency.\n batch_stats = self.train_loss.sharded_compute_loss(\n batch, outputs, attns, j,\n trunc_size, self.shard_size)\n\n # 4. Update the parameters and statistics.\n self.optim.step()\n total_stats.update(batch_stats)\n report_stats.update(batch_stats)\n\n # If truncated, don't backprop fully.\n if dec_state is not None:\n dec_state.detach()\n\n if report_func is not None:\n report_func(epoch, i, len(self.train_iter),\n total_stats.start_time, self.optim.lr,\n report_stats)\n report_stats = Statistics()\n\n return total_stats\n\n def validate(self):\n \"\"\" Called for each epoch to validate. \"\"\"\n # Set model in validating mode.\n self.model.eval()\n\n stats = Statistics()\n\n for batch in self.valid_iter:\n _, src_lengths = batch.src\n src = onmt.IO.make_features(batch, 'src')\n tgt = onmt.IO.make_features(batch, 'tgt')\n\n # F-prop through the model.\n outputs, attns, _ = self.model(src, tgt, src_lengths)\n\n # Compute loss.\n gen_state = onmt.Loss.make_gen_state(\n outputs, batch, attns, (0, batch.tgt.size(0)))\n _, batch_stats = self.valid_loss(batch, **gen_state)\n\n # Update statistics.\n stats.update(batch_stats)\n\n # Set model back to training mode.\n self.model.train()\n\n return stats\n\n def epoch_step(self, ppl, epoch):\n \"\"\" Called for each epoch to update learning rate. \"\"\"\n return self.optim.updateLearningRate(ppl, epoch)\n\n def drop_checkpoint(self, opt, epoch, fields, valid_stats):\n \"\"\" Called conditionally each epoch to save a snapshot. \"\"\"\n real_model = (self.model.module\n if isinstance(self.model, nn.DataParallel)\n else self.model)\n real_generator = (real_model.generator.module\n if isinstance(real_model.generator, nn.DataParallel)\n else real_model.generator)\n\n model_state_dict = real_model.state_dict()\n model_state_dict = {k: v for k, v in model_state_dict.items()\n if 'generator' not in k}\n generator_state_dict = real_generator.state_dict()\n checkpoint = {\n 'model': model_state_dict,\n 'generator': generator_state_dict,\n 'vocab': onmt.IO.ONMTDataset.save_vocab(fields),\n 'opt': opt,\n 'epoch': epoch,\n 'optim': self.optim\n }\n torch.save(checkpoint,\n '%s_acc_%.2f_ppl_%.2f_e%d.pt'\n % (opt.save_model, valid_stats.accuracy(),\n valid_stats.ppl(), epoch))\n", "path": "onmt/Trainer.py"}]}
4,043
182
gh_patches_debug_17230
rasdani/github-patches
git_diff
DataDog__dd-trace-py-2988
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Parsing and usage of boolean environment variables doesn't work as expected We cannot specify "falsy" values via environment variables, as they will be overridden by `default` in `get_env`. Also some integrations (e.g. `botocore` for setting `distributed_tracing`) don't parse the result of `get_env` correctly, and thus cannot be changed via environment variables. I would like to set `config.botocore["distributing_tracing"]` to `False` via the `DD_BOTOCORE_DISTRIBUTED_TRACING` environment variable, but because the default is `True`, there is no way to change the semantics of the setting to disable the behaviour. There are two issues, and if either of them was fixed, the problem would be fixed. I would be happy to provide a fix for both of them, or be directed to a way to fix it, without having to add extra code to my project. ## Empty environment variables are assumed to be missing If you do `DD_BOTOCORE_DISTRIBUTED_TRACING=` (an empty string), [the relevant code](https://github.com/DataDog/dd-trace-py/blob/e7a73336ab7c4c1479d504f86730c4dfaeb2e5f7/ddtrace/utils/formats.py#L47-L58) will replace it with `default`, in which case it would be `True`: ```python def get_env(*parts, **kwargs): ... # env = "DD_BOTOCORE_DISTRIBUTED_TRACING" value = os.getenv(env) # value = "" ... # legacy = None value = value or legacy # value = None # default = True return value if value else default # return True ``` The fix would be to check for `None`, instead for "falsy": ```python if value is None: if legacy is not None: value = legacy else: value = default ``` ## Usage of `get_env` for booleans should convert text values to boolean When the `botocore` integration [uses](https://github.com/DataDog/dd-trace-py/blob/e7a73336ab7c4c1479d504f86730c4dfaeb2e5f7/ddtrace/contrib/botocore/patch.py#L39) `get_env` to define a boolean option, it should convert common boolean text values to boolean, and reject others: ```python config._add( "botocore", { "distributed_tracing": get_bool_env("botocore", "distributed_tracing", default=True), ..., }, ) ``` And ```python TRUE_STRINGS = [True, "true", "yes", "y", "enable", "enabled", "1"] FALSE_STRINGS = [False, "false", "no", "n", "disable", "disabled", "0"] def to_boolean(value: str) -> bool: if value.lower() in TRUE_STRINGS: return True if value.lower() in FALSE_STRINGS: return False raise ValueError(f"Unknown boolean value '{value}'") def get_bool_env(*parts, **kwargs): return to_boolean(get_env(*parts, **kwargs)) ``` ### Which version of dd-trace-py are you using? `ddtrace==0.55.3` ### Which version of pip are you using? `pip 21.2.4 (python 3.8)` ### Which version of the libraries are you using? Not relevant ### How can we reproduce your problem? First issue: Before running the script: ```shell DD_BOTOCORE_DISTRIBUTED_TRACING= ``` Run the script: ```python3 from ddtrace import config from ddtrace.contrib import botocore print(config.botocore["distributed_tracing"]) print(bool(config.botocore["distributed_tracing"])) ``` Outputs: ``` True True ``` Second issue: Before running the script: ```shell DD_BOTOCORE_DISTRIBUTED_TRACING=False ``` Run the script: ```python3 from ddtrace import config from ddtrace.contrib import botocore print(config.botocore["distributed_tracing"]) print(bool(config.botocore["distributed_tracing"])) ``` Outputs: ``` False True ``` ### What is the result that you get? I cannot meaningfully change the value of `config.botocore["distributed_tracing"]` ### What is the result that you expected? I want to be able to meaningfully change `config.botocore["distributed_tracing"]` via the environment variable (i.e. `DD_BOTOCORE_DISTRIBUTED_TRACING`) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ddtrace/contrib/botocore/patch.py` Content: ``` 1 """ 2 Trace queries to aws api done via botocore client 3 """ 4 # 3p 5 import base64 6 import json 7 8 import botocore.client 9 10 from ddtrace import config 11 from ddtrace.vendor import wrapt 12 13 # project 14 from ...constants import ANALYTICS_SAMPLE_RATE_KEY 15 from ...constants import SPAN_MEASURED_KEY 16 from ...ext import SpanTypes 17 from ...ext import aws 18 from ...ext import http 19 from ...internal.logger import get_logger 20 from ...pin import Pin 21 from ...propagation.http import HTTPPropagator 22 from ...utils import get_argument_value 23 from ...utils.formats import deep_getattr 24 from ...utils.formats import get_env 25 from ...utils.wrappers import unwrap 26 27 28 # Original botocore client class 29 _Botocore_client = botocore.client.BaseClient 30 31 ARGS_NAME = ("action", "params", "path", "verb") 32 TRACED_ARGS = {"params", "path", "verb"} 33 34 log = get_logger(__name__) 35 36 # Botocore default settings 37 config._add( 38 "botocore", 39 { 40 "distributed_tracing": get_env("botocore", "distributed_tracing", default=True), 41 "invoke_with_legacy_context": get_env("botocore", "invoke_with_legacy_context", default=False), 42 }, 43 ) 44 45 46 def inject_trace_data_to_message_attributes(trace_data, entry): 47 if "MessageAttributes" not in entry: 48 entry["MessageAttributes"] = {} 49 # An Amazon SQS message can contain up to 10 metadata attributes. 50 if len(entry["MessageAttributes"]) < 10: 51 entry["MessageAttributes"]["_datadog"] = {"DataType": "String", "StringValue": json.dumps(trace_data)} 52 else: 53 log.debug("skipping trace injection, max number (10) of MessageAttributes exceeded") 54 55 56 def inject_trace_to_sqs_batch_message(args, span): 57 trace_data = {} 58 HTTPPropagator.inject(span.context, trace_data) 59 params = args[1] 60 61 for entry in params["Entries"]: 62 inject_trace_data_to_message_attributes(trace_data, entry) 63 64 65 def inject_trace_to_sqs_message(args, span): 66 trace_data = {} 67 HTTPPropagator.inject(span.context, trace_data) 68 params = args[1] 69 70 inject_trace_data_to_message_attributes(trace_data, params) 71 72 73 def modify_client_context(client_context_object, trace_headers): 74 if config.botocore["invoke_with_legacy_context"]: 75 trace_headers = {"_datadog": trace_headers} 76 77 if "custom" in client_context_object: 78 client_context_object["custom"].update(trace_headers) 79 else: 80 client_context_object["custom"] = trace_headers 81 82 83 def inject_trace_to_client_context(args, span): 84 trace_headers = {} 85 HTTPPropagator.inject(span.context, trace_headers) 86 client_context_object = {} 87 params = args[1] 88 if "ClientContext" in params: 89 try: 90 client_context_json = base64.b64decode(params["ClientContext"]).decode("utf-8") 91 client_context_object = json.loads(client_context_json) 92 except Exception: 93 log.warning("malformed client_context=%s", params["ClientContext"], exc_info=True) 94 return 95 modify_client_context(client_context_object, trace_headers) 96 try: 97 json_context = json.dumps(client_context_object).encode("utf-8") 98 except Exception: 99 log.warning("unable to encode modified client context as json: %s", client_context_object, exc_info=True) 100 return 101 params["ClientContext"] = base64.b64encode(json_context).decode("utf-8") 102 103 104 def patch(): 105 if getattr(botocore.client, "_datadog_patch", False): 106 return 107 setattr(botocore.client, "_datadog_patch", True) 108 109 wrapt.wrap_function_wrapper("botocore.client", "BaseClient._make_api_call", patched_api_call) 110 Pin(service="aws", app="aws").onto(botocore.client.BaseClient) 111 112 113 def unpatch(): 114 if getattr(botocore.client, "_datadog_patch", False): 115 setattr(botocore.client, "_datadog_patch", False) 116 unwrap(botocore.client.BaseClient, "_make_api_call") 117 118 119 def patched_api_call(original_func, instance, args, kwargs): 120 121 pin = Pin.get_from(instance) 122 if not pin or not pin.enabled(): 123 return original_func(*args, **kwargs) 124 125 endpoint_name = deep_getattr(instance, "_endpoint._endpoint_prefix") 126 127 with pin.tracer.trace( 128 "{}.command".format(endpoint_name), service="{}.{}".format(pin.service, endpoint_name), span_type=SpanTypes.HTTP 129 ) as span: 130 span.set_tag(SPAN_MEASURED_KEY) 131 operation = None 132 if args: 133 operation = get_argument_value(args, kwargs, 0, "operation_name") 134 # DEV: join is the fastest way of concatenating strings that is compatible 135 # across Python versions (see 136 # https://stackoverflow.com/questions/1316887/what-is-the-most-efficient-string-concatenation-method-in-python) 137 span.resource = ".".join((endpoint_name, operation.lower())) 138 139 if config.botocore["distributed_tracing"]: 140 if endpoint_name == "lambda" and operation == "Invoke": 141 inject_trace_to_client_context(args, span) 142 if endpoint_name == "sqs" and operation == "SendMessage": 143 inject_trace_to_sqs_message(args, span) 144 if endpoint_name == "sqs" and operation == "SendMessageBatch": 145 inject_trace_to_sqs_batch_message(args, span) 146 147 else: 148 span.resource = endpoint_name 149 150 aws.add_span_arg_tags(span, endpoint_name, args, ARGS_NAME, TRACED_ARGS) 151 152 region_name = deep_getattr(instance, "meta.region_name") 153 154 span._set_str_tag("aws.agent", "botocore") 155 if operation is not None: 156 span._set_str_tag("aws.operation", operation) 157 if region_name is not None: 158 span._set_str_tag("aws.region", region_name) 159 160 result = original_func(*args, **kwargs) 161 162 response_meta = result.get("ResponseMetadata") 163 if response_meta: 164 if "HTTPStatusCode" in response_meta: 165 span.set_tag(http.STATUS_CODE, response_meta["HTTPStatusCode"]) 166 167 if "RetryAttempts" in response_meta: 168 span.set_tag("retry_attempts", response_meta["RetryAttempts"]) 169 170 if "RequestId" in response_meta: 171 span.set_tag("aws.requestid", response_meta["RequestId"]) 172 173 # set analytics sample rate 174 span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.botocore.get_analytics_sample_rate()) 175 176 return result 177 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ddtrace/contrib/botocore/patch.py b/ddtrace/contrib/botocore/patch.py --- a/ddtrace/contrib/botocore/patch.py +++ b/ddtrace/contrib/botocore/patch.py @@ -20,6 +20,7 @@ from ...pin import Pin from ...propagation.http import HTTPPropagator from ...utils import get_argument_value +from ...utils.formats import asbool from ...utils.formats import deep_getattr from ...utils.formats import get_env from ...utils.wrappers import unwrap @@ -37,8 +38,8 @@ config._add( "botocore", { - "distributed_tracing": get_env("botocore", "distributed_tracing", default=True), - "invoke_with_legacy_context": get_env("botocore", "invoke_with_legacy_context", default=False), + "distributed_tracing": asbool(get_env("botocore", "distributed_tracing", default=True)), + "invoke_with_legacy_context": asbool(get_env("botocore", "invoke_with_legacy_context", default=False)), }, )
{"golden_diff": "diff --git a/ddtrace/contrib/botocore/patch.py b/ddtrace/contrib/botocore/patch.py\n--- a/ddtrace/contrib/botocore/patch.py\n+++ b/ddtrace/contrib/botocore/patch.py\n@@ -20,6 +20,7 @@\n from ...pin import Pin\n from ...propagation.http import HTTPPropagator\n from ...utils import get_argument_value\n+from ...utils.formats import asbool\n from ...utils.formats import deep_getattr\n from ...utils.formats import get_env\n from ...utils.wrappers import unwrap\n@@ -37,8 +38,8 @@\n config._add(\n \"botocore\",\n {\n- \"distributed_tracing\": get_env(\"botocore\", \"distributed_tracing\", default=True),\n- \"invoke_with_legacy_context\": get_env(\"botocore\", \"invoke_with_legacy_context\", default=False),\n+ \"distributed_tracing\": asbool(get_env(\"botocore\", \"distributed_tracing\", default=True)),\n+ \"invoke_with_legacy_context\": asbool(get_env(\"botocore\", \"invoke_with_legacy_context\", default=False)),\n },\n )\n", "issue": "Parsing and usage of boolean environment variables doesn't work as expected\nWe cannot specify \"falsy\" values via environment variables, as they will be overridden by `default` in `get_env`.\r\n\r\nAlso some integrations (e.g. `botocore` for setting `distributed_tracing`) don't parse the result of `get_env` correctly, and thus cannot be changed via environment variables.\r\n\r\nI would like to set `config.botocore[\"distributing_tracing\"]` to `False` via the `DD_BOTOCORE_DISTRIBUTED_TRACING` environment variable, but because the default is `True`, there is no way to change the semantics of the setting to disable the behaviour.\r\n\r\nThere are two issues, and if either of them was fixed, the problem would be fixed.\r\n\r\nI would be happy to provide a fix for both of them, or be directed to a way to fix it, without having to add extra code to my project.\r\n\r\n## Empty environment variables are assumed to be missing\r\n\r\nIf you do `DD_BOTOCORE_DISTRIBUTED_TRACING=` (an empty string), [the relevant code](https://github.com/DataDog/dd-trace-py/blob/e7a73336ab7c4c1479d504f86730c4dfaeb2e5f7/ddtrace/utils/formats.py#L47-L58) will replace it with `default`, in which case it would be `True`:\r\n\r\n```python\r\ndef get_env(*parts, **kwargs):\r\n ...\r\n\r\n # env = \"DD_BOTOCORE_DISTRIBUTED_TRACING\"\r\n value = os.getenv(env) # value = \"\"\r\n ...\r\n\r\n # legacy = None\r\n value = value or legacy # value = None\r\n # default = True\r\n return value if value else default # return True\r\n```\r\n\r\nThe fix would be to check for `None`, instead for \"falsy\":\r\n\r\n```python\r\nif value is None:\r\n if legacy is not None:\r\n value = legacy\r\n else:\r\n value = default\r\n```\r\n\r\n## Usage of `get_env` for booleans should convert text values to boolean\r\n\r\nWhen the `botocore` integration [uses](https://github.com/DataDog/dd-trace-py/blob/e7a73336ab7c4c1479d504f86730c4dfaeb2e5f7/ddtrace/contrib/botocore/patch.py#L39) `get_env` to define a boolean option, it should convert common boolean text values to boolean, and reject others:\r\n\r\n```python\r\nconfig._add(\r\n \"botocore\",\r\n {\r\n \"distributed_tracing\": get_bool_env(\"botocore\", \"distributed_tracing\", default=True),\r\n ...,\r\n },\r\n)\r\n```\r\nAnd\r\n```python\r\nTRUE_STRINGS = [True, \"true\", \"yes\", \"y\", \"enable\", \"enabled\", \"1\"]\r\nFALSE_STRINGS = [False, \"false\", \"no\", \"n\", \"disable\", \"disabled\", \"0\"]\r\n\r\n\r\ndef to_boolean(value: str) -> bool:\r\n if value.lower() in TRUE_STRINGS:\r\n return True\r\n if value.lower() in FALSE_STRINGS:\r\n return False\r\n raise ValueError(f\"Unknown boolean value '{value}'\")\r\n\r\n\r\n\r\ndef get_bool_env(*parts, **kwargs):\r\n return to_boolean(get_env(*parts, **kwargs))\r\n```\r\n\r\n### Which version of dd-trace-py are you using?\r\n\r\n`ddtrace==0.55.3`\r\n\r\n### Which version of pip are you using?\r\n\r\n`pip 21.2.4 (python 3.8)`\r\n\r\n### Which version of the libraries are you using?\r\n\r\nNot relevant\r\n\r\n### How can we reproduce your problem?\r\n\r\nFirst issue:\r\n\r\nBefore running the script:\r\n\r\n```shell\r\nDD_BOTOCORE_DISTRIBUTED_TRACING=\r\n```\r\n\r\nRun the script:\r\n\r\n```python3\r\nfrom ddtrace import config\r\nfrom ddtrace.contrib import botocore\r\nprint(config.botocore[\"distributed_tracing\"])\r\nprint(bool(config.botocore[\"distributed_tracing\"]))\r\n```\r\n\r\nOutputs:\r\n\r\n```\r\nTrue\r\nTrue\r\n```\r\n\r\nSecond issue:\r\n\r\nBefore running the script:\r\n\r\n```shell\r\nDD_BOTOCORE_DISTRIBUTED_TRACING=False\r\n```\r\n\r\nRun the script:\r\n\r\n```python3\r\nfrom ddtrace import config\r\nfrom ddtrace.contrib import botocore\r\nprint(config.botocore[\"distributed_tracing\"])\r\nprint(bool(config.botocore[\"distributed_tracing\"]))\r\n```\r\n\r\nOutputs:\r\n\r\n```\r\nFalse\r\nTrue\r\n```\r\n\r\n\r\n### What is the result that you get?\r\n\r\nI cannot meaningfully change the value of `config.botocore[\"distributed_tracing\"]`\r\n\r\n### What is the result that you expected?\r\n\r\nI want to be able to meaningfully change `config.botocore[\"distributed_tracing\"]` via the environment variable (i.e. `DD_BOTOCORE_DISTRIBUTED_TRACING`)\r\n\n", "before_files": [{"content": "\"\"\"\nTrace queries to aws api done via botocore client\n\"\"\"\n# 3p\nimport base64\nimport json\n\nimport botocore.client\n\nfrom ddtrace import config\nfrom ddtrace.vendor import wrapt\n\n# project\nfrom ...constants import ANALYTICS_SAMPLE_RATE_KEY\nfrom ...constants import SPAN_MEASURED_KEY\nfrom ...ext import SpanTypes\nfrom ...ext import aws\nfrom ...ext import http\nfrom ...internal.logger import get_logger\nfrom ...pin import Pin\nfrom ...propagation.http import HTTPPropagator\nfrom ...utils import get_argument_value\nfrom ...utils.formats import deep_getattr\nfrom ...utils.formats import get_env\nfrom ...utils.wrappers import unwrap\n\n\n# Original botocore client class\n_Botocore_client = botocore.client.BaseClient\n\nARGS_NAME = (\"action\", \"params\", \"path\", \"verb\")\nTRACED_ARGS = {\"params\", \"path\", \"verb\"}\n\nlog = get_logger(__name__)\n\n# Botocore default settings\nconfig._add(\n \"botocore\",\n {\n \"distributed_tracing\": get_env(\"botocore\", \"distributed_tracing\", default=True),\n \"invoke_with_legacy_context\": get_env(\"botocore\", \"invoke_with_legacy_context\", default=False),\n },\n)\n\n\ndef inject_trace_data_to_message_attributes(trace_data, entry):\n if \"MessageAttributes\" not in entry:\n entry[\"MessageAttributes\"] = {}\n # An Amazon SQS message can contain up to 10 metadata attributes.\n if len(entry[\"MessageAttributes\"]) < 10:\n entry[\"MessageAttributes\"][\"_datadog\"] = {\"DataType\": \"String\", \"StringValue\": json.dumps(trace_data)}\n else:\n log.debug(\"skipping trace injection, max number (10) of MessageAttributes exceeded\")\n\n\ndef inject_trace_to_sqs_batch_message(args, span):\n trace_data = {}\n HTTPPropagator.inject(span.context, trace_data)\n params = args[1]\n\n for entry in params[\"Entries\"]:\n inject_trace_data_to_message_attributes(trace_data, entry)\n\n\ndef inject_trace_to_sqs_message(args, span):\n trace_data = {}\n HTTPPropagator.inject(span.context, trace_data)\n params = args[1]\n\n inject_trace_data_to_message_attributes(trace_data, params)\n\n\ndef modify_client_context(client_context_object, trace_headers):\n if config.botocore[\"invoke_with_legacy_context\"]:\n trace_headers = {\"_datadog\": trace_headers}\n\n if \"custom\" in client_context_object:\n client_context_object[\"custom\"].update(trace_headers)\n else:\n client_context_object[\"custom\"] = trace_headers\n\n\ndef inject_trace_to_client_context(args, span):\n trace_headers = {}\n HTTPPropagator.inject(span.context, trace_headers)\n client_context_object = {}\n params = args[1]\n if \"ClientContext\" in params:\n try:\n client_context_json = base64.b64decode(params[\"ClientContext\"]).decode(\"utf-8\")\n client_context_object = json.loads(client_context_json)\n except Exception:\n log.warning(\"malformed client_context=%s\", params[\"ClientContext\"], exc_info=True)\n return\n modify_client_context(client_context_object, trace_headers)\n try:\n json_context = json.dumps(client_context_object).encode(\"utf-8\")\n except Exception:\n log.warning(\"unable to encode modified client context as json: %s\", client_context_object, exc_info=True)\n return\n params[\"ClientContext\"] = base64.b64encode(json_context).decode(\"utf-8\")\n\n\ndef patch():\n if getattr(botocore.client, \"_datadog_patch\", False):\n return\n setattr(botocore.client, \"_datadog_patch\", True)\n\n wrapt.wrap_function_wrapper(\"botocore.client\", \"BaseClient._make_api_call\", patched_api_call)\n Pin(service=\"aws\", app=\"aws\").onto(botocore.client.BaseClient)\n\n\ndef unpatch():\n if getattr(botocore.client, \"_datadog_patch\", False):\n setattr(botocore.client, \"_datadog_patch\", False)\n unwrap(botocore.client.BaseClient, \"_make_api_call\")\n\n\ndef patched_api_call(original_func, instance, args, kwargs):\n\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return original_func(*args, **kwargs)\n\n endpoint_name = deep_getattr(instance, \"_endpoint._endpoint_prefix\")\n\n with pin.tracer.trace(\n \"{}.command\".format(endpoint_name), service=\"{}.{}\".format(pin.service, endpoint_name), span_type=SpanTypes.HTTP\n ) as span:\n span.set_tag(SPAN_MEASURED_KEY)\n operation = None\n if args:\n operation = get_argument_value(args, kwargs, 0, \"operation_name\")\n # DEV: join is the fastest way of concatenating strings that is compatible\n # across Python versions (see\n # https://stackoverflow.com/questions/1316887/what-is-the-most-efficient-string-concatenation-method-in-python)\n span.resource = \".\".join((endpoint_name, operation.lower()))\n\n if config.botocore[\"distributed_tracing\"]:\n if endpoint_name == \"lambda\" and operation == \"Invoke\":\n inject_trace_to_client_context(args, span)\n if endpoint_name == \"sqs\" and operation == \"SendMessage\":\n inject_trace_to_sqs_message(args, span)\n if endpoint_name == \"sqs\" and operation == \"SendMessageBatch\":\n inject_trace_to_sqs_batch_message(args, span)\n\n else:\n span.resource = endpoint_name\n\n aws.add_span_arg_tags(span, endpoint_name, args, ARGS_NAME, TRACED_ARGS)\n\n region_name = deep_getattr(instance, \"meta.region_name\")\n\n span._set_str_tag(\"aws.agent\", \"botocore\")\n if operation is not None:\n span._set_str_tag(\"aws.operation\", operation)\n if region_name is not None:\n span._set_str_tag(\"aws.region\", region_name)\n\n result = original_func(*args, **kwargs)\n\n response_meta = result.get(\"ResponseMetadata\")\n if response_meta:\n if \"HTTPStatusCode\" in response_meta:\n span.set_tag(http.STATUS_CODE, response_meta[\"HTTPStatusCode\"])\n\n if \"RetryAttempts\" in response_meta:\n span.set_tag(\"retry_attempts\", response_meta[\"RetryAttempts\"])\n\n if \"RequestId\" in response_meta:\n span.set_tag(\"aws.requestid\", response_meta[\"RequestId\"])\n\n # set analytics sample rate\n span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.botocore.get_analytics_sample_rate())\n\n return result\n", "path": "ddtrace/contrib/botocore/patch.py"}], "after_files": [{"content": "\"\"\"\nTrace queries to aws api done via botocore client\n\"\"\"\n# 3p\nimport base64\nimport json\n\nimport botocore.client\n\nfrom ddtrace import config\nfrom ddtrace.vendor import wrapt\n\n# project\nfrom ...constants import ANALYTICS_SAMPLE_RATE_KEY\nfrom ...constants import SPAN_MEASURED_KEY\nfrom ...ext import SpanTypes\nfrom ...ext import aws\nfrom ...ext import http\nfrom ...internal.logger import get_logger\nfrom ...pin import Pin\nfrom ...propagation.http import HTTPPropagator\nfrom ...utils import get_argument_value\nfrom ...utils.formats import asbool\nfrom ...utils.formats import deep_getattr\nfrom ...utils.formats import get_env\nfrom ...utils.wrappers import unwrap\n\n\n# Original botocore client class\n_Botocore_client = botocore.client.BaseClient\n\nARGS_NAME = (\"action\", \"params\", \"path\", \"verb\")\nTRACED_ARGS = {\"params\", \"path\", \"verb\"}\n\nlog = get_logger(__name__)\n\n# Botocore default settings\nconfig._add(\n \"botocore\",\n {\n \"distributed_tracing\": asbool(get_env(\"botocore\", \"distributed_tracing\", default=True)),\n \"invoke_with_legacy_context\": asbool(get_env(\"botocore\", \"invoke_with_legacy_context\", default=False)),\n },\n)\n\n\ndef inject_trace_data_to_message_attributes(trace_data, entry):\n if \"MessageAttributes\" not in entry:\n entry[\"MessageAttributes\"] = {}\n # An Amazon SQS message can contain up to 10 metadata attributes.\n if len(entry[\"MessageAttributes\"]) < 10:\n entry[\"MessageAttributes\"][\"_datadog\"] = {\"DataType\": \"String\", \"StringValue\": json.dumps(trace_data)}\n else:\n log.debug(\"skipping trace injection, max number (10) of MessageAttributes exceeded\")\n\n\ndef inject_trace_to_sqs_batch_message(args, span):\n trace_data = {}\n HTTPPropagator.inject(span.context, trace_data)\n params = args[1]\n\n for entry in params[\"Entries\"]:\n inject_trace_data_to_message_attributes(trace_data, entry)\n\n\ndef inject_trace_to_sqs_message(args, span):\n trace_data = {}\n HTTPPropagator.inject(span.context, trace_data)\n params = args[1]\n\n inject_trace_data_to_message_attributes(trace_data, params)\n\n\ndef modify_client_context(client_context_object, trace_headers):\n if config.botocore[\"invoke_with_legacy_context\"]:\n trace_headers = {\"_datadog\": trace_headers}\n\n if \"custom\" in client_context_object:\n client_context_object[\"custom\"].update(trace_headers)\n else:\n client_context_object[\"custom\"] = trace_headers\n\n\ndef inject_trace_to_client_context(args, span):\n trace_headers = {}\n HTTPPropagator.inject(span.context, trace_headers)\n client_context_object = {}\n params = args[1]\n if \"ClientContext\" in params:\n try:\n client_context_json = base64.b64decode(params[\"ClientContext\"]).decode(\"utf-8\")\n client_context_object = json.loads(client_context_json)\n except Exception:\n log.warning(\"malformed client_context=%s\", params[\"ClientContext\"], exc_info=True)\n return\n modify_client_context(client_context_object, trace_headers)\n try:\n json_context = json.dumps(client_context_object).encode(\"utf-8\")\n except Exception:\n log.warning(\"unable to encode modified client context as json: %s\", client_context_object, exc_info=True)\n return\n params[\"ClientContext\"] = base64.b64encode(json_context).decode(\"utf-8\")\n\n\ndef patch():\n if getattr(botocore.client, \"_datadog_patch\", False):\n return\n setattr(botocore.client, \"_datadog_patch\", True)\n\n wrapt.wrap_function_wrapper(\"botocore.client\", \"BaseClient._make_api_call\", patched_api_call)\n Pin(service=\"aws\", app=\"aws\").onto(botocore.client.BaseClient)\n\n\ndef unpatch():\n if getattr(botocore.client, \"_datadog_patch\", False):\n setattr(botocore.client, \"_datadog_patch\", False)\n unwrap(botocore.client.BaseClient, \"_make_api_call\")\n\n\ndef patched_api_call(original_func, instance, args, kwargs):\n\n pin = Pin.get_from(instance)\n if not pin or not pin.enabled():\n return original_func(*args, **kwargs)\n\n endpoint_name = deep_getattr(instance, \"_endpoint._endpoint_prefix\")\n\n with pin.tracer.trace(\n \"{}.command\".format(endpoint_name), service=\"{}.{}\".format(pin.service, endpoint_name), span_type=SpanTypes.HTTP\n ) as span:\n span.set_tag(SPAN_MEASURED_KEY)\n operation = None\n if args:\n operation = get_argument_value(args, kwargs, 0, \"operation_name\")\n # DEV: join is the fastest way of concatenating strings that is compatible\n # across Python versions (see\n # https://stackoverflow.com/questions/1316887/what-is-the-most-efficient-string-concatenation-method-in-python)\n span.resource = \".\".join((endpoint_name, operation.lower()))\n\n if config.botocore[\"distributed_tracing\"]:\n if endpoint_name == \"lambda\" and operation == \"Invoke\":\n inject_trace_to_client_context(args, span)\n if endpoint_name == \"sqs\" and operation == \"SendMessage\":\n inject_trace_to_sqs_message(args, span)\n if endpoint_name == \"sqs\" and operation == \"SendMessageBatch\":\n inject_trace_to_sqs_batch_message(args, span)\n\n else:\n span.resource = endpoint_name\n\n aws.add_span_arg_tags(span, endpoint_name, args, ARGS_NAME, TRACED_ARGS)\n\n region_name = deep_getattr(instance, \"meta.region_name\")\n\n span._set_str_tag(\"aws.agent\", \"botocore\")\n if operation is not None:\n span._set_str_tag(\"aws.operation\", operation)\n if region_name is not None:\n span._set_str_tag(\"aws.region\", region_name)\n\n result = original_func(*args, **kwargs)\n\n response_meta = result.get(\"ResponseMetadata\")\n if response_meta:\n if \"HTTPStatusCode\" in response_meta:\n span.set_tag(http.STATUS_CODE, response_meta[\"HTTPStatusCode\"])\n\n if \"RetryAttempts\" in response_meta:\n span.set_tag(\"retry_attempts\", response_meta[\"RetryAttempts\"])\n\n if \"RequestId\" in response_meta:\n span.set_tag(\"aws.requestid\", response_meta[\"RequestId\"])\n\n # set analytics sample rate\n span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.botocore.get_analytics_sample_rate())\n\n return result\n", "path": "ddtrace/contrib/botocore/patch.py"}]}
3,184
253